code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Arrays
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLShuffleReadMetricsReporter}
import org.apache.spark.sql.internal.SQLConf
sealed trait ShufflePartitionSpec
// A partition that reads data of one or more reducers, from `startReducerIndex` (inclusive) to
// `endReducerIndex` (exclusive).
case class CoalescedPartitionSpec(
startReducerIndex: Int,
endReducerIndex: Int) extends ShufflePartitionSpec
// A partition that reads partial data of one reducer, from `startMapIndex` (inclusive) to
// `endMapIndex` (exclusive).
case class PartialReducerPartitionSpec(
reducerIndex: Int,
startMapIndex: Int,
endMapIndex: Int,
@transient dataSize: Long) extends ShufflePartitionSpec
// A partition that reads partial data of one mapper, from `startReducerIndex` (inclusive) to
// `endReducerIndex` (exclusive).
case class PartialMapperPartitionSpec(
mapIndex: Int,
startReducerIndex: Int,
endReducerIndex: Int) extends ShufflePartitionSpec
/**
* The [[Partition]] used by [[ShuffledRowRDD]].
*/
private final case class ShuffledRowRDDPartition(
index: Int, spec: ShufflePartitionSpec) extends Partition
/**
* A dummy partitioner for use with records whose partition ids have been pre-computed (i.e. for
* use on RDDs of (Int, Row) pairs where the Int is a partition id in the expected range).
*/
private class PartitionIdPassthrough(override val numPartitions: Int) extends Partitioner {
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
/**
* A Partitioner that might group together one or more partitions from the parent.
*
* @param parent a parent partitioner
* @param partitionStartIndices indices of partitions in parent that should create new partitions
* in child (this should be an array of increasing partition IDs). For example, if we have a
* parent with 5 partitions, and partitionStartIndices is [0, 2, 4], we get three output
* partitions, corresponding to partition ranges [0, 1], [2, 3] and [4] of the parent partitioner.
*/
class CoalescedPartitioner(val parent: Partitioner, val partitionStartIndices: Array[Int])
extends Partitioner {
@transient private lazy val parentPartitionMapping: Array[Int] = {
val n = parent.numPartitions
val result = new Array[Int](n)
for (i <- 0 until partitionStartIndices.length) {
val start = partitionStartIndices(i)
val end = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
for (j <- start until end) {
result(j) = i
}
}
result
}
override def numPartitions: Int = partitionStartIndices.length
override def getPartition(key: Any): Int = {
parentPartitionMapping(parent.getPartition(key))
}
override def equals(other: Any): Boolean = other match {
case c: CoalescedPartitioner =>
c.parent == parent && Arrays.equals(c.partitionStartIndices, partitionStartIndices)
case _ =>
false
}
override def hashCode(): Int = 31 * parent.hashCode() + Arrays.hashCode(partitionStartIndices)
}
/**
* This is a specialized version of [[org.apache.spark.rdd.ShuffledRDD]] that is optimized for
* shuffling rows instead of Java key-value pairs. Note that something like this should eventually
* be implemented in Spark core, but that is blocked by some more general refactorings to shuffle
* interfaces / internals.
*
* This RDD takes a [[ShuffleDependency]] (`dependency`),
* and an array of [[ShufflePartitionSpec]] as input arguments.
*
* The `dependency` has the parent RDD of this RDD, which represents the dataset before shuffle
* (i.e. map output). Elements of this RDD are (partitionId, Row) pairs.
* Partition ids should be in the range [0, numPartitions - 1].
* `dependency.partitioner` is the original partitioner used to partition
* map output, and `dependency.partitioner.numPartitions` is the number of pre-shuffle partitions
* (i.e. the number of partitions of the map output).
*/
class ShuffledRowRDD(
var dependency: ShuffleDependency[Int, InternalRow, InternalRow],
metrics: Map[String, SQLMetric],
partitionSpecs: Array[ShufflePartitionSpec])
extends RDD[InternalRow](dependency.rdd.context, Nil) {
def this(
dependency: ShuffleDependency[Int, InternalRow, InternalRow],
metrics: Map[String, SQLMetric]) = {
this(dependency, metrics,
Array.tabulate(dependency.partitioner.numPartitions)(i => CoalescedPartitionSpec(i, i + 1)))
}
if (SQLConf.get.fetchShuffleBlocksInBatch) {
dependency.rdd.context.setLocalProperty(
SortShuffleManager.FETCH_SHUFFLE_BLOCKS_IN_BATCH_ENABLED_KEY, "true")
}
override def getDependencies: Seq[Dependency[_]] = List(dependency)
override val partitioner: Option[Partitioner] =
if (partitionSpecs.forall(_.isInstanceOf[CoalescedPartitionSpec])) {
val indices = partitionSpecs.map(_.asInstanceOf[CoalescedPartitionSpec].startReducerIndex)
// TODO this check is based on assumptions of callers' behavior but is sufficient for now.
if (indices.toSet.size == partitionSpecs.length) {
Some(new CoalescedPartitioner(dependency.partitioner, indices))
} else {
None
}
} else {
None
}
override def getPartitions: Array[Partition] = {
Array.tabulate[Partition](partitionSpecs.length) { i =>
ShuffledRowRDDPartition(i, partitionSpecs(i))
}
}
override def getPreferredLocations(partition: Partition): Seq[String] = {
val tracker = SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]
partition.asInstanceOf[ShuffledRowRDDPartition].spec match {
case CoalescedPartitionSpec(startReducerIndex, endReducerIndex) =>
// TODO order by partition size.
startReducerIndex.until(endReducerIndex).flatMap { reducerIndex =>
tracker.getPreferredLocationsForShuffle(dependency, reducerIndex)
}
case PartialReducerPartitionSpec(_, startMapIndex, endMapIndex, _) =>
tracker.getMapLocation(dependency, startMapIndex, endMapIndex)
case PartialMapperPartitionSpec(mapIndex, _, _) =>
tracker.getMapLocation(dependency, mapIndex, mapIndex + 1)
}
}
override def compute(split: Partition, context: TaskContext): Iterator[InternalRow] = {
val tempMetrics = context.taskMetrics().createTempShuffleReadMetrics()
// `SQLShuffleReadMetricsReporter` will update its own metrics for SQL exchange operator,
// as well as the `tempMetrics` for basic shuffle metrics.
val sqlMetricsReporter = new SQLShuffleReadMetricsReporter(tempMetrics, metrics)
val reader = split.asInstanceOf[ShuffledRowRDDPartition].spec match {
case CoalescedPartitionSpec(startReducerIndex, endReducerIndex) =>
SparkEnv.get.shuffleManager.getReader(
dependency.shuffleHandle,
startReducerIndex,
endReducerIndex,
context,
sqlMetricsReporter)
case PartialReducerPartitionSpec(reducerIndex, startMapIndex, endMapIndex, _) =>
SparkEnv.get.shuffleManager.getReader(
dependency.shuffleHandle,
startMapIndex,
endMapIndex,
reducerIndex,
reducerIndex + 1,
context,
sqlMetricsReporter)
case PartialMapperPartitionSpec(mapIndex, startReducerIndex, endReducerIndex) =>
SparkEnv.get.shuffleManager.getReader(
dependency.shuffleHandle,
mapIndex,
mapIndex + 1,
startReducerIndex,
endReducerIndex,
context,
sqlMetricsReporter)
}
reader.read().asInstanceOf[Iterator[Product2[Int, InternalRow]]].map(_._2)
}
override def clearDependencies(): Unit = {
super.clearDependencies()
dependency = null
}
}
|
dbtsai/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
|
Scala
|
apache-2.0
| 8,769
|
package de.choffmeister.secpwd.utils
import java.io.{File, PrintWriter, FileInputStream, FileOutputStream}
import scala.io.Source
import scala.io.Codec
import scala.language.implicitConversions
class RichFile(val file: File) {
def text: String = Source.fromFile( file )(Codec.UTF8).mkString
def text_=(s: String) {
val out = new PrintWriter(file, "UTF-8")
try {
out.print(s)
}
finally {
out.close()
}
}
def bytes: Array[Byte] = {
val fs = new FileInputStream(file)
try {
val length = file.length().toInt
val b = new Array[Byte](length)
fs.read(b, 0, length)
b
} finally {
fs.close()
}
}
def bytes_=(b: Array[Byte]) {
val fs = new FileOutputStream(file)
try {
fs.write(b, 0, b.length)
} finally {
fs.close()
}
}
}
object RichFile {
implicit def fileToRichFile(file: File) = new RichFile(file)
}
|
choffmeister/secpwd
|
src/main/scala/de/choffmeister/secpwd/utils/RichFile.scala
|
Scala
|
apache-2.0
| 922
|
package nl.malienkolders.htm.admin.worker
import nl.malienkolders.htm.lib.util.Helpers
import java.net.MulticastSocket
import java.net.DatagramPacket
import java.net.InetAddress
import java.nio.ByteBuffer
import nl.malienkolders.htm.lib.model.Viewer
import net.liftweb.mapper.By
import net.liftweb.common.Loggable
object BroadcastListener extends Loggable {
val socket = new MulticastSocket(4446)
def run: Unit = {
val group = Helpers.getMulticastGroup
logger.info("JOINING the multicast group " + group.toString())
socket.joinGroup(group)
logger.info("JOINED the multicast group")
while (true) {
val buffer = new Array[Byte](256)
val packet = new DatagramPacket(buffer, buffer.length)
socket.receive(packet)
val ip = InetAddress.getByAddress(buffer.take(4))
val port = ByteBuffer.wrap(buffer.drop(4).take(4)).getInt()
val name = new String(buffer.drop(8), "UTF-8").trim()
val url = ip.toString().drop(1) + ":" + port
logger.debug("RECEIVED: %s (%s)" format (url, name))
val viewer = Viewer.find(By(Viewer.url, url)).map(viewer =>
viewer.alias(name)).openOr(
Viewer.create.alias(name).url(url))
viewer.save
}
}
}
|
hema-tournament-manager/htm
|
htm-admin/src/main/scala/nl/malienkolders/htm/admin/worker/BroadcastListener.scala
|
Scala
|
apache-2.0
| 1,226
|
package org.zalando.jsonapi.json
import org.zalando.jsonapi.JsonapiRootObjectWriter
import org.zalando.jsonapi.model.JsonApiObject.StringValue
import org.zalando.jsonapi.model.{ Attribute, RootObject }
import org.zalando.jsonapi.model.RootObject.ResourceObject
/**
* Class Person is used during testing
* @param id
* @param name
*/
case class Person(id: Int, name: String)
object Person {
implicit val personJsonapiRootObjectWriter: JsonapiRootObjectWriter[Person] = new JsonapiRootObjectWriter[Person] {
override def toJsonapi(person: Person) = {
RootObject(data = Some(ResourceObject(
`type` = "person",
id = Some(person.id.toString),
attributes = Some(List(
Attribute("name", StringValue(person.name))
)), links = None)))
}
}
}
|
texvex/scala-jsonapi
|
src/test/scala/org/zalando/jsonapi/json/Person.scala
|
Scala
|
mit
| 797
|
package defw.http
import org.scalatest._
class HttpTest extends FlatSpec {
"Http.get" should "request to http://rss.dailynews.yahoo.co.jp/fc/rss.xml in HTTP/GET" in {
val uri = "http://rss.dailynews.yahoo.co.jp/fc/rss.xml"
val result = Http.get(uri)
assert(result.get._1 === 200)
}
"Http.post" should "request to http://rss.dailynews.yahoo.co.jp/fc/rss.xml in HTTP/POST" in {
val uri = "http://rss.dailynews.yahoo.co.jp/fc/rss.xml"
val parameter = Map("key" -> "value")
val result = Http.post(uri, parameter)
assert(result.get._1 === 200)
}
}
|
takahish0306/scala-defw
|
util/src/test/scala/defw/http/HttpTest.scala
|
Scala
|
apache-2.0
| 584
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{File, IOException}
import java.lang.reflect.{InvocationTargetException, Modifier}
import java.net.{URI, URL}
import java.security.PrivilegedExceptionAction
import java.util.concurrent.{TimeoutException, TimeUnit}
import scala.collection.mutable.HashMap
import scala.concurrent.Promise
import scala.concurrent.duration.Duration
import scala.util.control.NonFatal
import org.apache.commons.lang3.{StringUtils => ComStrUtils}
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException
import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils
import org.apache.hadoop.yarn.util.{ConverterUtils, Records}
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.HistoryServer
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Streaming.STREAMING_DYN_ALLOCATION_MAX_EXECUTORS
import org.apache.spark.internal.config.UI._
import org.apache.spark.metrics.{MetricsSystem, MetricsSystemInstances}
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc._
import org.apache.spark.scheduler.cluster.{CoarseGrainedSchedulerBackend, YarnSchedulerBackend}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.util._
/**
* Common application master functionality for Spark on Yarn.
*/
private[spark] class ApplicationMaster(
args: ApplicationMasterArguments,
sparkConf: SparkConf,
yarnConf: YarnConfiguration) extends Logging {
// TODO: Currently, task to container is computed once (TaskSetManager) - which need not be
// optimal as more containers are available. Might need to handle this better.
private val appAttemptId =
if (System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name()) != null) {
YarnSparkHadoopUtil.getContainerId.getApplicationAttemptId()
} else {
null
}
private val isClusterMode = args.userClass != null
private val securityMgr = new SecurityManager(sparkConf)
private var metricsSystem: Option[MetricsSystem] = None
private val userClassLoader = {
val classpath = Client.getUserClasspath(sparkConf)
val urls = classpath.map { entry =>
new URL("file:" + new File(entry.getPath()).getAbsolutePath())
}
if (isClusterMode) {
if (Client.isUserClassPathFirst(sparkConf, isDriver = true)) {
new ChildFirstURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
} else {
new MutableURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
}
} else {
new MutableURLClassLoader(urls, Utils.getContextOrSparkClassLoader)
}
}
private val client = new YarnRMClient()
// Default to twice the number of executors (twice the maximum number of executors if dynamic
// allocation is enabled), with a minimum of 3.
private val maxNumExecutorFailures = {
val effectiveNumExecutors =
if (Utils.isStreamingDynamicAllocationEnabled(sparkConf)) {
sparkConf.get(STREAMING_DYN_ALLOCATION_MAX_EXECUTORS)
} else if (Utils.isDynamicAllocationEnabled(sparkConf)) {
sparkConf.get(DYN_ALLOCATION_MAX_EXECUTORS)
} else {
sparkConf.get(EXECUTOR_INSTANCES).getOrElse(0)
}
// By default, effectiveNumExecutors is Int.MaxValue if dynamic allocation is enabled. We need
// avoid the integer overflow here.
val defaultMaxNumExecutorFailures = math.max(3,
if (effectiveNumExecutors > Int.MaxValue / 2) Int.MaxValue else (2 * effectiveNumExecutors))
sparkConf.get(MAX_EXECUTOR_FAILURES).getOrElse(defaultMaxNumExecutorFailures)
}
@volatile private var exitCode = 0
@volatile private var unregistered = false
@volatile private var finished = false
@volatile private var finalStatus = getDefaultFinalStatus
@volatile private var finalMsg: String = ""
@volatile private var userClassThread: Thread = _
@volatile private var reporterThread: Thread = _
@volatile private var allocator: YarnAllocator = _
// A flag to check whether user has initialized spark context
@volatile private var registered = false
// Lock for controlling the allocator (heartbeat) thread.
private val allocatorLock = new Object()
// Steady state heartbeat interval. We want to be reasonably responsive without causing too many
// requests to RM.
private val heartbeatInterval = {
// Ensure that progress is sent before YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS elapses.
val expiryInterval = yarnConf.getInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 120000)
math.max(0, math.min(expiryInterval / 2, sparkConf.get(RM_HEARTBEAT_INTERVAL)))
}
// Initial wait interval before allocator poll, to allow for quicker ramp up when executors are
// being requested.
private val initialAllocationInterval = math.min(heartbeatInterval,
sparkConf.get(INITIAL_HEARTBEAT_INTERVAL))
// Next wait interval before allocator poll.
private var nextAllocationInterval = initialAllocationInterval
// In cluster mode, used to tell the AM when the user's SparkContext has been initialized.
private val sparkContextPromise = Promise[SparkContext]()
/**
* Load the list of localized files set by the client, used when launching executors. This should
* be called in a context where the needed credentials to access HDFS are available.
*/
private def prepareLocalResources(distCacheConf: SparkConf): Map[String, LocalResource] = {
logInfo("Preparing Local resources")
val resources = HashMap[String, LocalResource]()
def setupDistributedCache(
file: String,
rtype: LocalResourceType,
timestamp: String,
size: String,
vis: String): Unit = {
val uri = new URI(file)
val amJarRsrc = Records.newRecord(classOf[LocalResource])
amJarRsrc.setType(rtype)
amJarRsrc.setVisibility(LocalResourceVisibility.valueOf(vis))
amJarRsrc.setResource(ConverterUtils.getYarnUrlFromURI(uri))
amJarRsrc.setTimestamp(timestamp.toLong)
amJarRsrc.setSize(size.toLong)
val fileName = Option(uri.getFragment()).getOrElse(new Path(uri).getName())
resources(fileName) = amJarRsrc
}
val distFiles = distCacheConf.get(CACHED_FILES)
val fileSizes = distCacheConf.get(CACHED_FILES_SIZES)
val timeStamps = distCacheConf.get(CACHED_FILES_TIMESTAMPS)
val visibilities = distCacheConf.get(CACHED_FILES_VISIBILITIES)
val resTypes = distCacheConf.get(CACHED_FILES_TYPES)
for (i <- 0 to distFiles.size - 1) {
val resType = LocalResourceType.valueOf(resTypes(i))
setupDistributedCache(distFiles(i), resType, timeStamps(i).toString, fileSizes(i).toString,
visibilities(i))
}
// Distribute the conf archive to executors.
distCacheConf.get(CACHED_CONF_ARCHIVE).foreach { path =>
val uri = new URI(path)
val fs = FileSystem.get(uri, yarnConf)
val status = fs.getFileStatus(new Path(uri))
// SPARK-16080: Make sure to use the correct name for the destination when distributing the
// conf archive to executors.
val destUri = new URI(uri.getScheme(), uri.getRawSchemeSpecificPart(),
Client.LOCALIZED_CONF_DIR)
setupDistributedCache(destUri.toString(), LocalResourceType.ARCHIVE,
status.getModificationTime().toString, status.getLen.toString,
LocalResourceVisibility.PRIVATE.name())
}
resources.toMap
}
final def run(): Int = {
try {
val attemptID = if (isClusterMode) {
// Set the web ui port to be ephemeral for yarn so we don't conflict with
// other spark processes running on the same box
System.setProperty(UI_PORT.key, "0")
// Set the master and deploy mode property to match the requested mode.
System.setProperty("spark.master", "yarn")
System.setProperty(SUBMIT_DEPLOY_MODE.key, "cluster")
// Set this internal configuration if it is running on cluster mode, this
// configuration will be checked in SparkContext to avoid misuse of yarn cluster mode.
System.setProperty("spark.yarn.app.id", appAttemptId.getApplicationId().toString())
Option(appAttemptId.getAttemptId.toString)
} else {
None
}
new CallerContext(
"APPMASTER", sparkConf.get(APP_CALLER_CONTEXT),
Option(appAttemptId.getApplicationId.toString), attemptID).setCurrentContext()
logInfo("ApplicationAttemptId: " + appAttemptId)
// This shutdown hook should run *after* the SparkContext is shut down.
val priority = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY - 1
ShutdownHookManager.addShutdownHook(priority) { () =>
val maxAppAttempts = client.getMaxRegAttempts(sparkConf, yarnConf)
val isLastAttempt = appAttemptId.getAttemptId() >= maxAppAttempts
if (!finished) {
// The default state of ApplicationMaster is failed if it is invoked by shut down hook.
// This behavior is different compared to 1.x version.
// If user application is exited ahead of time by calling System.exit(N), here mark
// this application as failed with EXIT_EARLY. For a good shutdown, user shouldn't call
// System.exit(0) to terminate the application.
finish(finalStatus,
ApplicationMaster.EXIT_EARLY,
"Shutdown hook called before final status was reported.")
}
if (!unregistered) {
// we only want to unregister if we don't want the RM to retry
if (finalStatus == FinalApplicationStatus.SUCCEEDED || isLastAttempt) {
unregister(finalStatus, finalMsg)
cleanupStagingDir(new Path(System.getenv("SPARK_YARN_STAGING_DIR")))
}
}
}
if (isClusterMode) {
runDriver()
} else {
runExecutorLauncher()
}
} catch {
case e: Exception =>
// catch everything else if not specifically handled
logError("Uncaught exception: ", e)
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_UNCAUGHT_EXCEPTION,
"Uncaught exception: " + StringUtils.stringifyException(e))
} finally {
try {
metricsSystem.foreach { ms =>
ms.report()
ms.stop()
}
} catch {
case e: Exception =>
logWarning("Exception during stopping of the metric system: ", e)
}
}
exitCode
}
def runUnmanaged(
clientRpcEnv: RpcEnv,
appAttemptId: ApplicationAttemptId,
stagingDir: Path,
cachedResourcesConf: SparkConf): Unit = {
try {
new CallerContext(
"APPMASTER", sparkConf.get(APP_CALLER_CONTEXT),
Option(appAttemptId.getApplicationId.toString), None).setCurrentContext()
val driverRef = clientRpcEnv.setupEndpointRef(
RpcAddress(sparkConf.get(DRIVER_HOST_ADDRESS),
sparkConf.get(DRIVER_PORT)),
YarnSchedulerBackend.ENDPOINT_NAME)
// The client-mode AM doesn't listen for incoming connections, so report an invalid port.
registerAM(Utils.localHostName, -1, sparkConf,
sparkConf.getOption("spark.driver.appUIAddress"), appAttemptId)
addAmIpFilter(Some(driverRef), ProxyUriUtils.getPath(appAttemptId.getApplicationId))
createAllocator(driverRef, sparkConf, clientRpcEnv, appAttemptId, cachedResourcesConf)
reporterThread.join()
} catch {
case e: Exception =>
// catch everything else if not specifically handled
logError("Uncaught exception: ", e)
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_UNCAUGHT_EXCEPTION,
"Uncaught exception: " + StringUtils.stringifyException(e))
if (!unregistered) {
unregister(finalStatus, finalMsg)
cleanupStagingDir(stagingDir)
}
} finally {
try {
metricsSystem.foreach { ms =>
ms.report()
ms.stop()
}
} catch {
case e: Exception =>
logWarning("Exception during stopping of the metric system: ", e)
}
}
}
def stopUnmanaged(stagingDir: Path): Unit = {
if (!finished) {
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
}
if (!unregistered) {
unregister(finalStatus, finalMsg)
cleanupStagingDir(stagingDir)
}
}
/**
* Set the default final application status for client mode to UNDEFINED to handle
* if YARN HA restarts the application so that it properly retries. Set the final
* status to SUCCEEDED in cluster mode to handle if the user calls System.exit
* from the application code.
*/
final def getDefaultFinalStatus(): FinalApplicationStatus = {
if (isClusterMode) {
FinalApplicationStatus.FAILED
} else {
FinalApplicationStatus.UNDEFINED
}
}
/**
* unregister is used to completely unregister the application from the ResourceManager.
* This means the ResourceManager will not retry the application attempt on your behalf if
* a failure occurred.
*/
final def unregister(status: FinalApplicationStatus, diagnostics: String = null): Unit = {
synchronized {
if (registered && !unregistered) {
logInfo(s"Unregistering ApplicationMaster with $status" +
Option(diagnostics).map(msg => s" (diag message: $msg)").getOrElse(""))
unregistered = true
client.unregister(status, Option(diagnostics).getOrElse(""))
}
}
}
final def finish(status: FinalApplicationStatus, code: Int, msg: String = null): Unit = {
synchronized {
if (!finished) {
val inShutdown = ShutdownHookManager.inShutdown()
if (registered || !isClusterMode) {
exitCode = code
finalStatus = status
} else {
finalStatus = FinalApplicationStatus.FAILED
exitCode = ApplicationMaster.EXIT_SC_NOT_INITED
}
logInfo(s"Final app status: $finalStatus, exitCode: $exitCode" +
Option(msg).map(msg => s", (reason: $msg)").getOrElse(""))
finalMsg = ComStrUtils.abbreviate(msg, sparkConf.get(AM_FINAL_MSG_LIMIT).toInt)
finished = true
if (!inShutdown && Thread.currentThread() != reporterThread && reporterThread != null) {
logDebug("shutting down reporter thread")
reporterThread.interrupt()
}
if (!inShutdown && Thread.currentThread() != userClassThread && userClassThread != null) {
logDebug("shutting down user thread")
userClassThread.interrupt()
}
}
}
}
private def sparkContextInitialized(sc: SparkContext) = {
sparkContextPromise.synchronized {
// Notify runDriver function that SparkContext is available
sparkContextPromise.success(sc)
// Pause the user class thread in order to make proper initialization in runDriver function.
sparkContextPromise.wait()
}
}
private def resumeDriver(): Unit = {
// When initialization in runDriver happened the user class thread has to be resumed.
sparkContextPromise.synchronized {
sparkContextPromise.notify()
}
}
private def registerAM(
host: String,
port: Int,
_sparkConf: SparkConf,
uiAddress: Option[String],
appAttempt: ApplicationAttemptId): Unit = {
val appId = appAttempt.getApplicationId().toString()
val attemptId = appAttempt.getAttemptId().toString()
val historyAddress = ApplicationMaster
.getHistoryServerAddress(_sparkConf, yarnConf, appId, attemptId)
client.register(host, port, yarnConf, _sparkConf, uiAddress, historyAddress)
registered = true
}
private def createAllocator(
driverRef: RpcEndpointRef,
_sparkConf: SparkConf,
rpcEnv: RpcEnv,
appAttemptId: ApplicationAttemptId,
distCacheConf: SparkConf): Unit = {
// In client mode, the AM may be restarting after delegation tokens have reached their TTL. So
// always contact the driver to get the current set of valid tokens, so that local resources can
// be initialized below.
if (!isClusterMode) {
val tokens = driverRef.askSync[Array[Byte]](RetrieveDelegationTokens)
if (tokens != null) {
SparkHadoopUtil.get.addDelegationTokens(tokens, _sparkConf)
}
}
val appId = appAttemptId.getApplicationId().toString()
val driverUrl = RpcEndpointAddress(driverRef.address.host, driverRef.address.port,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
val localResources = prepareLocalResources(distCacheConf)
// Before we initialize the allocator, let's log the information about how executors will
// be run up front, to avoid printing this out for every single executor being launched.
// Use placeholders for information that changes such as executor IDs.
logInfo {
val executorMemory = _sparkConf.get(EXECUTOR_MEMORY).toInt
val executorCores = _sparkConf.get(EXECUTOR_CORES)
val dummyRunner = new ExecutorRunnable(None, yarnConf, _sparkConf, driverUrl, "<executorId>",
"<hostname>", executorMemory, executorCores, appId, securityMgr, localResources,
ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
dummyRunner.launchContextDebugInfo()
}
allocator = client.createAllocator(
yarnConf,
_sparkConf,
appAttemptId,
driverUrl,
driverRef,
securityMgr,
localResources)
// Initialize the AM endpoint *after* the allocator has been initialized. This ensures
// that when the driver sends an initial executor request (e.g. after an AM restart),
// the allocator is ready to service requests.
rpcEnv.setupEndpoint("YarnAM", new AMEndpoint(rpcEnv, driverRef))
allocator.allocateResources()
val ms = MetricsSystem.createMetricsSystem(MetricsSystemInstances.APPLICATION_MASTER,
sparkConf, securityMgr)
val prefix = _sparkConf.get(YARN_METRICS_NAMESPACE).getOrElse(appId)
ms.registerSource(new ApplicationMasterSource(prefix, allocator))
// do not register static sources in this case as per SPARK-25277
ms.start(false)
metricsSystem = Some(ms)
reporterThread = launchReporterThread()
}
private def runDriver(): Unit = {
addAmIpFilter(None, System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV))
userClassThread = startUserApplication()
// This a bit hacky, but we need to wait until the spark.driver.port property has
// been set by the Thread executing the user class.
logInfo("Waiting for spark context initialization...")
val totalWaitTime = sparkConf.get(AM_MAX_WAIT_TIME)
try {
val sc = ThreadUtils.awaitResult(sparkContextPromise.future,
Duration(totalWaitTime, TimeUnit.MILLISECONDS))
if (sc != null) {
val rpcEnv = sc.env.rpcEnv
val userConf = sc.getConf
val host = userConf.get(DRIVER_HOST_ADDRESS)
val port = userConf.get(DRIVER_PORT)
registerAM(host, port, userConf, sc.ui.map(_.webUrl), appAttemptId)
val driverRef = rpcEnv.setupEndpointRef(
RpcAddress(host, port),
YarnSchedulerBackend.ENDPOINT_NAME)
createAllocator(driverRef, userConf, rpcEnv, appAttemptId, distCacheConf)
} else {
// Sanity check; should never happen in normal operation, since sc should only be null
// if the user app did not create a SparkContext.
throw new IllegalStateException("User did not initialize spark context!")
}
resumeDriver()
userClassThread.join()
} catch {
case e: SparkException if e.getCause().isInstanceOf[TimeoutException] =>
logError(
s"SparkContext did not initialize after waiting for $totalWaitTime ms. " +
"Please check earlier log output for errors. Failing the application.")
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_SC_NOT_INITED,
"Timed out waiting for SparkContext.")
} finally {
resumeDriver()
}
}
private def runExecutorLauncher(): Unit = {
val hostname = Utils.localHostName
val amCores = sparkConf.get(AM_CORES)
val rpcEnv = RpcEnv.create("sparkYarnAM", hostname, hostname, -1, sparkConf, securityMgr,
amCores, true)
// The client-mode AM doesn't listen for incoming connections, so report an invalid port.
registerAM(hostname, -1, sparkConf, sparkConf.get(DRIVER_APP_UI_ADDRESS), appAttemptId)
// The driver should be up and listening, so unlike cluster mode, just try to connect to it
// with no waiting or retrying.
val (driverHost, driverPort) = Utils.parseHostPort(args.userArgs(0))
val driverRef = rpcEnv.setupEndpointRef(
RpcAddress(driverHost, driverPort),
YarnSchedulerBackend.ENDPOINT_NAME)
addAmIpFilter(Some(driverRef),
System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV))
createAllocator(driverRef, sparkConf, rpcEnv, appAttemptId, distCacheConf)
// In client mode the actor will stop the reporter thread.
reporterThread.join()
}
private def allocationThreadImpl(): Unit = {
// The number of failures in a row until the allocation thread gives up.
val reporterMaxFailures = sparkConf.get(MAX_REPORTER_THREAD_FAILURES)
var failureCount = 0
while (!finished) {
try {
if (allocator.getNumExecutorsFailed >= maxNumExecutorFailures) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_MAX_EXECUTOR_FAILURES,
s"Max number of executor failures ($maxNumExecutorFailures) reached")
} else if (allocator.isAllNodeBlacklisted) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_MAX_EXECUTOR_FAILURES,
"Due to executor failures all available nodes are blacklisted")
} else {
logDebug("Sending progress")
allocator.allocateResources()
}
failureCount = 0
} catch {
case i: InterruptedException => // do nothing
case e: ApplicationAttemptNotFoundException =>
failureCount += 1
logError("Exception from Reporter thread.", e)
finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_REPORTER_FAILURE,
e.getMessage)
case e: Throwable =>
failureCount += 1
if (!NonFatal(e)) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_REPORTER_FAILURE,
"Fatal exception: " + StringUtils.stringifyException(e))
} else if (failureCount >= reporterMaxFailures) {
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_REPORTER_FAILURE, "Exception was thrown " +
s"$failureCount time(s) from Reporter thread.")
} else {
logWarning(s"Reporter thread fails $failureCount time(s) in a row.", e)
}
}
try {
val numPendingAllocate = allocator.getNumContainersPendingAllocate
var sleepStartNs = 0L
var sleepInterval = 200L // ms
allocatorLock.synchronized {
sleepInterval =
if (numPendingAllocate > 0 || allocator.getNumPendingLossReasonRequests > 0) {
val currentAllocationInterval =
math.min(heartbeatInterval, nextAllocationInterval)
nextAllocationInterval = currentAllocationInterval * 2 // avoid overflow
currentAllocationInterval
} else {
nextAllocationInterval = initialAllocationInterval
heartbeatInterval
}
sleepStartNs = System.nanoTime()
allocatorLock.wait(sleepInterval)
}
val sleepDuration = System.nanoTime() - sleepStartNs
if (sleepDuration < TimeUnit.MILLISECONDS.toNanos(sleepInterval)) {
// log when sleep is interrupted
logDebug(s"Number of pending allocations is $numPendingAllocate. " +
s"Slept for $sleepDuration/$sleepInterval ms.")
// if sleep was less than the minimum interval, sleep for the rest of it
val toSleep = math.max(0, initialAllocationInterval - sleepDuration)
if (toSleep > 0) {
logDebug(s"Going back to sleep for $toSleep ms")
// use Thread.sleep instead of allocatorLock.wait. there is no need to be woken up
// by the methods that signal allocatorLock because this is just finishing the min
// sleep interval, which should happen even if this is signalled again.
Thread.sleep(toSleep)
}
} else {
logDebug(s"Number of pending allocations is $numPendingAllocate. " +
s"Slept for $sleepDuration/$sleepInterval.")
}
} catch {
case e: InterruptedException =>
}
}
}
private def launchReporterThread(): Thread = {
val t = new Thread {
override def run(): Unit = {
try {
allocationThreadImpl()
} finally {
allocator.stop()
}
}
}
t.setDaemon(true)
t.setName("Reporter")
t.start()
logInfo(s"Started progress reporter thread with (heartbeat : $heartbeatInterval, " +
s"initial allocation : $initialAllocationInterval) intervals")
t
}
private def distCacheConf(): SparkConf = {
val distCacheConf = new SparkConf(false)
if (args.distCacheConf != null) {
Utils.getPropertiesFromFile(args.distCacheConf).foreach { case (k, v) =>
distCacheConf.set(k, v)
}
}
distCacheConf
}
/**
* Clean up the staging directory.
*/
private def cleanupStagingDir(stagingDirPath: Path): Unit = {
try {
val preserveFiles = sparkConf.get(PRESERVE_STAGING_FILES)
if (!preserveFiles) {
logInfo("Deleting staging directory " + stagingDirPath)
val fs = stagingDirPath.getFileSystem(yarnConf)
fs.delete(stagingDirPath, true)
}
} catch {
case ioe: IOException =>
logError("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
/** Add the Yarn IP filter that is required for properly securing the UI. */
private def addAmIpFilter(driver: Option[RpcEndpointRef], proxyBase: String) = {
val amFilter = "org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter"
val params = client.getAmIpFilterParams(yarnConf, proxyBase)
driver match {
case Some(d) =>
d.send(AddWebUIFilter(amFilter, params.toMap, proxyBase))
case None =>
System.setProperty(UI_FILTERS.key, amFilter)
params.foreach { case (k, v) => System.setProperty(s"spark.$amFilter.param.$k", v) }
}
}
/**
* Start the user class, which contains the spark driver, in a separate Thread.
* If the main routine exits cleanly or exits with System.exit(N) for any N
* we assume it was successful, for all other cases we assume failure.
*
* Returns the user thread that was started.
*/
private def startUserApplication(): Thread = {
logInfo("Starting the user application in a separate Thread")
var userArgs = args.userArgs
if (args.primaryPyFile != null && args.primaryPyFile.endsWith(".py")) {
// When running pyspark, the app is run using PythonRunner. The second argument is the list
// of files to add to PYTHONPATH, which Client.scala already handles, so it's empty.
userArgs = Seq(args.primaryPyFile, "") ++ userArgs
}
if (args.primaryRFile != null &&
(args.primaryRFile.endsWith(".R") || args.primaryRFile.endsWith(".r"))) {
// TODO(davies): add R dependencies here
}
val mainMethod = userClassLoader.loadClass(args.userClass)
.getMethod("main", classOf[Array[String]])
val userThread = new Thread {
override def run(): Unit = {
try {
if (!Modifier.isStatic(mainMethod.getModifiers)) {
logError(s"Could not find static main method in object ${args.userClass}")
finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_EXCEPTION_USER_CLASS)
} else {
mainMethod.invoke(null, userArgs.toArray)
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
logDebug("Done running user class")
}
} catch {
case e: InvocationTargetException =>
e.getCause match {
case _: InterruptedException =>
// Reporter thread can interrupt to stop user class
case SparkUserAppException(exitCode) =>
val msg = s"User application exited with status $exitCode"
logError(msg)
finish(FinalApplicationStatus.FAILED, exitCode, msg)
case cause: Throwable =>
logError("User class threw exception: " + cause, cause)
finish(FinalApplicationStatus.FAILED,
ApplicationMaster.EXIT_EXCEPTION_USER_CLASS,
"User class threw exception: " + StringUtils.stringifyException(cause))
}
sparkContextPromise.tryFailure(e.getCause())
} finally {
// Notify the thread waiting for the SparkContext, in case the application did not
// instantiate one. This will do nothing when the user code instantiates a SparkContext
// (with the correct master), or when the user code throws an exception (due to the
// tryFailure above).
sparkContextPromise.trySuccess(null)
}
}
}
userThread.setContextClassLoader(userClassLoader)
userThread.setName("Driver")
userThread.start()
userThread
}
private def resetAllocatorInterval(): Unit = allocatorLock.synchronized {
nextAllocationInterval = initialAllocationInterval
allocatorLock.notifyAll()
}
/**
* An [[RpcEndpoint]] that communicates with the driver's scheduler backend.
*/
private class AMEndpoint(override val rpcEnv: RpcEnv, driver: RpcEndpointRef)
extends RpcEndpoint with Logging {
override def onStart(): Unit = {
driver.send(RegisterClusterManager(self))
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case r: RequestExecutors =>
Option(allocator) match {
case Some(a) =>
if (a.requestTotalExecutorsWithPreferredLocalities(
r.resourceProfileToTotalExecs,
r.numLocalityAwareTasksPerResourceProfileId,
r.hostToLocalTaskCount,
r.nodeBlacklist)) {
resetAllocatorInterval()
}
context.reply(true)
case None =>
logWarning("Container allocator is not ready to request executors yet.")
context.reply(false)
}
case KillExecutors(executorIds) =>
logInfo(s"Driver requested to kill executor(s) ${executorIds.mkString(", ")}.")
Option(allocator) match {
case Some(a) => executorIds.foreach(a.killExecutor)
case None => logWarning("Container allocator is not ready to kill executors yet.")
}
context.reply(true)
case GetExecutorLossReason(eid) =>
Option(allocator) match {
case Some(a) =>
a.enqueueGetLossReasonRequest(eid, context)
resetAllocatorInterval()
case None =>
logWarning("Container allocator is not ready to find executor loss reasons yet.")
}
case UpdateDelegationTokens(tokens) =>
SparkHadoopUtil.get.addDelegationTokens(tokens, sparkConf)
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
// In cluster mode or unmanaged am case, do not rely on the disassociated event to exit
// This avoids potentially reporting incorrect exit codes if the driver fails
if (!(isClusterMode || sparkConf.get(YARN_UNMANAGED_AM))) {
logInfo(s"Driver terminated or disconnected! Shutting down. $remoteAddress")
finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
}
}
}
}
object ApplicationMaster extends Logging {
// exit codes for different causes, no reason behind the values
private val EXIT_SUCCESS = 0
private val EXIT_UNCAUGHT_EXCEPTION = 10
private val EXIT_MAX_EXECUTOR_FAILURES = 11
private val EXIT_REPORTER_FAILURE = 12
private val EXIT_SC_NOT_INITED = 13
private val EXIT_SECURITY = 14
private val EXIT_EXCEPTION_USER_CLASS = 15
private val EXIT_EARLY = 16
private var master: ApplicationMaster = _
def main(args: Array[String]): Unit = {
SignalUtils.registerLogger(log)
val amArgs = new ApplicationMasterArguments(args)
val sparkConf = new SparkConf()
if (amArgs.propertiesFile != null) {
Utils.getPropertiesFromFile(amArgs.propertiesFile).foreach { case (k, v) =>
sparkConf.set(k, v)
}
}
// Set system properties for each config entry. This covers two use cases:
// - The default configuration stored by the SparkHadoopUtil class
// - The user application creating a new SparkConf in cluster mode
//
// Both cases create a new SparkConf object which reads these configs from system properties.
sparkConf.getAll.foreach { case (k, v) =>
sys.props(k) = v
}
val yarnConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
master = new ApplicationMaster(amArgs, sparkConf, yarnConf)
val ugi = sparkConf.get(PRINCIPAL) match {
// We only need to log in with the keytab in cluster mode. In client mode, the driver
// handles the user keytab.
case Some(principal) if master.isClusterMode =>
val originalCreds = UserGroupInformation.getCurrentUser().getCredentials()
SparkHadoopUtil.get.loginUserFromKeytab(principal, sparkConf.get(KEYTAB).orNull)
val newUGI = UserGroupInformation.getCurrentUser()
if (master.appAttemptId == null || master.appAttemptId.getAttemptId > 1) {
// Re-obtain delegation tokens if this is not a first attempt, as they might be outdated
// as of now. Add the fresh tokens on top of the original user's credentials (overwrite).
// Set the context class loader so that the token manager has access to jars
// distributed by the user.
Utils.withContextClassLoader(master.userClassLoader) {
val credentialManager = new HadoopDelegationTokenManager(sparkConf, yarnConf, null)
credentialManager.obtainDelegationTokens(originalCreds)
}
}
// Transfer the original user's tokens to the new user, since it may contain needed tokens
// (such as those user to connect to YARN).
newUGI.addCredentials(originalCreds)
newUGI
case _ =>
SparkHadoopUtil.get.createSparkUser()
}
ugi.doAs(new PrivilegedExceptionAction[Unit]() {
override def run(): Unit = System.exit(master.run())
})
}
private[spark] def sparkContextInitialized(sc: SparkContext): Unit = {
master.sparkContextInitialized(sc)
}
private[spark] def getAttemptId(): ApplicationAttemptId = {
master.appAttemptId
}
private[spark] def getHistoryServerAddress(
sparkConf: SparkConf,
yarnConf: YarnConfiguration,
appId: String,
attemptId: String): String = {
sparkConf.get(HISTORY_SERVER_ADDRESS)
.map { text => SparkHadoopUtil.get.substituteHadoopVariables(text, yarnConf) }
.map { address => s"${address}${HistoryServer.UI_PATH_PREFIX}/${appId}/${attemptId}" }
.getOrElse("")
}
}
/**
* This object does not provide any special functionality. It exists so that it's easy to tell
* apart the client-mode AM from the cluster-mode AM when using tools such as ps or jps.
*/
object ExecutorLauncher {
def main(args: Array[String]): Unit = {
ApplicationMaster.main(args)
}
}
|
zuotingbing/spark
|
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala
|
Scala
|
apache-2.0
| 37,140
|
package HandlerSocket.Protocol
import java.io.ByteArrayOutputStream
import akka.util.ByteString
import java.util
import org.slf4j.LoggerFactory
class TokenEnumerator(input:ByteString) {
private var v:ByteString = input
def filter(bytes:ByteString):String = {
val out = new ByteArrayOutputStream
var convert = false
bytes.foreach(ch =>{
if (ch == 0x01) convert = true
else if (convert) {
convert = false
out.write(ch^0x40)
} else out.write(ch)
})
new String(out.toByteArray, ResultDecoder.encoding)
}
def readToken(): String = {
val tokenBytes = v.takeWhile(ch => (ch!='\\t') )
v = v.drop(tokenBytes.size+1) // skip the '\\t' and sliding forward
filter(tokenBytes)
}
def left = v
}
object ResultDecoder {
val encoding = "utf-8"
val log = LoggerFactory.getLogger("HandlerSocket")
def assembly(input:ByteString):HsResult = {
val source = new TokenEnumerator(input)
val errorCode = source.readToken().toInt
val columnNumber = source.readToken().toInt
if (source.left.size<=1) HsResult(errorCode, columnNumber, Array[String]())
else {
val colList = new util.ArrayList[String]()
//Fix: Change from >1 to >=1 as one result:"0" was dropped at last char
while (source.left.size>=1) colList.add(source.readToken())
val columns:Array[String] = new Array[String](colList.size())
colList.toArray[String](columns)
HsResult(errorCode, columnNumber,columns)
}
}
}
|
Mevolutrix/MevoFramework
|
Utils/src/main/scala/HandlerSocket/Protocol/ResultDecoder.scala
|
Scala
|
apache-2.0
| 1,493
|
package com.atomist.param
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonNode}
trait ParameterValue {
def getName: String
def getValue: AnyRef
}
object ParameterValueDeserializer extends JsonDeserializer[ParameterValue] {
override def deserialize(jsonParser: JsonParser, deserializationContext: DeserializationContext): ParameterValue = {
val node: JsonNode = jsonParser.getCodec.readTree(jsonParser)
val name = node.get("name").asText
if (name == null) throw new IllegalArgumentException("Expected 'name' parameter of TemplateParameterValue")
val value = node.get("value").asText
if (value == null) throw new IllegalArgumentException("Expected 'value' parameter of TemplateParameterValue")
SimpleParameterValue(name, value)
}
}
|
atomist/rug
|
src/main/scala/com/atomist/param/ParameterValue.scala
|
Scala
|
gpl-3.0
| 853
|
package net.xylophones.planetoid.game.logic
import net.xylophones.planetoid.game.maths.Vector2D
import net.xylophones.planetoid.game.model.GameEvent.GameEvent
import net.xylophones.planetoid.game.model._
import org.junit.runner.RunWith
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, FunSuite}
import org.scalatest.junit.JUnitRunner
import net.xylophones.planetoid.game.logic.ModelTestObjectMother._
@RunWith(classOf[JUnitRunner])
class ExplosionHandlingGameUpdaterTest extends FunSuite with Matchers with MockitoSugar {
val underTest = new ExplosionHandlingGameUpdater
test("explosions created for both players, round countdown created and explosion event created") {
// given
val events = Set(GameEvent.PlayerLoseLife, GameEvent.Player1LoseLife, GameEvent.Player2LoseLife)
val player1 = createDummyPlayerAtPosition(Vector2D(10, 10))
val player2 = createDummyPlayerAtPosition(Vector2D(100, 100))
val players = Players(player1, player2)
val model = GameModel(createDummyPlanet(), players)
val gameResults: GameModelUpdateResult = new GameModelUpdateResult(model, events)
// when
val results = underTest.update(gameResults, new GamePhysics, null)
// then
results.model.explosions.size shouldBe 2
val explosion1 = Explosion(player1.rocket.position, player1.rocket.radius)
val explosion2 = Explosion(player2.rocket.position, player2.rocket.radius)
results.model.explosions should contain (explosion1)
results.model.explosions should contain (explosion2)
results.model.roundEndTimer.isDefined shouldBe true
results.events should contain(GameEvent.Explosion)
}
test("explosions NOT created when no lives lost") {
// given
val events: Set[GameEvent] = Set.empty
val model = GameModel(createDummyPlanet(), createDummyPlayers())
val gameResults: GameModelUpdateResult = new GameModelUpdateResult(model, events)
// when
val results = underTest.update(gameResults, new GamePhysics, null)
// then
results.model.explosions.size shouldBe 0
}
}
|
wjsrobertson/planetoid
|
game/src/test/scala/net/xylophones/planetoid/game/logic/ExplosionHandlingGameUpdaterTest.scala
|
Scala
|
apache-2.0
| 2,076
|
package mesosphere.marathon
package raml
trait ConstraintConversion {
implicit val constraintRamlReader: Reads[Constraint, Protos.Constraint] = Reads { raml =>
val operator = raml.operator match {
case ConstraintOperator.Unique => Protos.Constraint.Operator.UNIQUE
case ConstraintOperator.Cluster => Protos.Constraint.Operator.CLUSTER
case ConstraintOperator.GroupBy => Protos.Constraint.Operator.GROUP_BY
case ConstraintOperator.Like => Protos.Constraint.Operator.LIKE
case ConstraintOperator.Unlike => Protos.Constraint.Operator.UNLIKE
case ConstraintOperator.MaxPer => Protos.Constraint.Operator.MAX_PER
case ConstraintOperator.Is => Protos.Constraint.Operator.IS
}
val builder = Protos.Constraint.newBuilder().setField(raml.fieldName).setOperator(operator)
raml.value.foreach(builder.setValue)
builder.build()
}
implicit val appConstraintRamlReader: Reads[Seq[String], Protos.Constraint] = Reads { raw =>
// this is not a substite for validation, but does ensure that we're not translating invalid operators
def validOperator(op: String): Boolean = ConstraintConversion.ValidOperators.contains(op)
val result: Protos.Constraint = (raw.lift(0), raw.lift(1), raw.lift(2)) match {
case (Some(field), Some(op), None) if validOperator(op) =>
Protos.Constraint.newBuilder()
.setField(field)
.setOperator(Protos.Constraint.Operator.valueOf(op))
.build()
case (Some(field), Some(op), Some(value)) if validOperator(op) =>
Protos.Constraint.newBuilder()
.setField(field)
.setOperator(Protos.Constraint.Operator.valueOf(op))
.setValue(value)
.build()
case _ => throw SerializationFailedException(s"illegal constraint specification ${raw.mkString(",")}")
}
result
}
implicit val constraintRamlWriter: Writes[Protos.Constraint, Constraint] = Writes { c =>
val operator = c.getOperator match {
case Protos.Constraint.Operator.UNIQUE => ConstraintOperator.Unique
case Protos.Constraint.Operator.CLUSTER => ConstraintOperator.Cluster
case Protos.Constraint.Operator.GROUP_BY => ConstraintOperator.GroupBy
case Protos.Constraint.Operator.LIKE => ConstraintOperator.Like
case Protos.Constraint.Operator.UNLIKE => ConstraintOperator.Unlike
case Protos.Constraint.Operator.MAX_PER => ConstraintOperator.MaxPer
case Protos.Constraint.Operator.IS => ConstraintOperator.Is
}
Constraint(c.getField, operator, if (c.hasValue) Some(c.getValue) else None)
}
implicit val constraintToSeqStringWrites: Writes[Protos.Constraint, Seq[String]] = Writes { constraint =>
val builder = Seq.newBuilder[String]
builder += constraint.getField
builder += constraint.getOperator.name
if (constraint.hasValue) builder += constraint.getValue
builder.result()
}
}
object ConstraintConversion extends ConstraintConversion {
val ValidOperators: Set[String] = Protos.Constraint.Operator.values().map(_.toString)(collection.breakOut)
}
|
guenter/marathon
|
src/main/scala/mesosphere/marathon/raml/ConstraintConversion.scala
|
Scala
|
apache-2.0
| 3,078
|
package common.api
import scala.collection.SortedMap
object PermissionLevel extends Enumeration {
type PermissionLevel = Value
val EVERYONE: PermissionLevel = Value(0)
val SUBSCRIBERS: PermissionLevel = Value(1)
val REGULARS: PermissionLevel = Value(2)
val MODERATORS: PermissionLevel = Value(3)
val OWNER: PermissionLevel = Value(4)
val map: SortedMap[String, String] = SortedMap(values.map(p =>
p.id.toString -> s"global.permissionLevels.${p.toString.toLowerCase}"
).toSeq: _*)
}
|
Cobbleopolis/MonsterTruckBot
|
modules/common/app/common/api/PermissionLevel.scala
|
Scala
|
mit
| 525
|
import sbt._
import Keys._
import com.typesafe.sbt._
import pgp.PgpKeys._
object PublishSettings {
type Sett = Def.Setting[_]
lazy val all = Seq[Sett](
pom
, publish
, publishMavenStyle := true
, publishArtifact in Test := false
, pomIncludeRepository := { _ => false }
, licenses := Seq("BSD-3-Clause" -> url("http://www.opensource.org/licenses/BSD-3-Clause"))
, homepage := Some(url("https://github.com/NICTA/rng"))
, useGpg := true
, credentials := Seq(Credentials(Path.userHome / ".sbt" / "scoobi.credentials"))
)
lazy val pom: Sett =
pomExtra := (
<scm>
<url>git@github.com:NICTA/rng.git</url>
<connection>scm:git@github.com:NICTA/rng.git</connection>
</scm>
<developers>
<developer>
<id>tonymorris</id>
<name>Tony Morris</name>
<url>http://tmorris.net/</url>
</developer>
<developer>
<id>markhibberd</id>
<name>Mark Hibberd</name>
<url>http://mth.io/</url>
</developer>
</developers>
)
lazy val publish: Sett =
publishTo <<= version.apply(v => {
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
})
}
|
NICTA/rng
|
project/PublishSettings.scala
|
Scala
|
bsd-3-clause
| 1,382
|
package mesosphere.marathon.test
import java.time._
import scala.concurrent.duration.FiniteDuration
class SettableClock(private[this] var clock: Clock = Clock.fixed(Instant.now, ZoneOffset.UTC)) extends Clock {
override def getZone: ZoneId = clock.getZone
override def instant(): Instant = clock.instant()
override def withZone(zoneId: ZoneId): Clock = new SettableClock(clock.withZone(zoneId))
def plus(duration: FiniteDuration): this.type = {
clock = Clock.offset(clock, Duration.ofMillis(duration.toMillis))
this
}
def plus(duration: Duration): this.type = {
clock = Clock.offset(clock, duration)
this
}
def at(instant: Instant): this.type = {
clock = Clock.fixed(instant, clock.getZone)
this
}
}
|
timcharper/marathon
|
src/test/scala/mesosphere/marathon/test/SettableClock.scala
|
Scala
|
apache-2.0
| 751
|
package com.lonelyplanet.openplanet.client.apis
import com.lonelyplanet.openplanet.client.{FilterParameter, IncludeParameter, OpenPlanetClient, Shop}
import spray.json.JsValue
import scala.collection.immutable.Seq
trait OpShop extends Shop {
val client: OpenPlanetClient
override def product(id: String, include: Seq[IncludeParameter] = Seq.empty): JsValue = {
client.getSingle(s"/products/$id", include)
}
override def products(limit: Int, offset: Int, filter: Seq[FilterParameter], include: Seq[IncludeParameter]): JsValue = {
client.getCollection(s"/products", limit, offset, filter, include)
}
}
|
lonelyplanet/open-planet-scala-client
|
src/main/scala/com/lonelyplanet/openplanet/client/apis/OpShop.scala
|
Scala
|
mit
| 623
|
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Assert.assertEquals
import org.junit.Assert.assertTrue
import org.scalastyle.Checker
import org.scalastyle.StyleError
import java.util.Set
import org.junit.Before
import org.junit.Test
// scalastyle:off magic.number multiple.string.literals
class CommentFilterTest extends AssertionsForJUnit with CheckerTest {
val key = "class.name"
val classUnderTest = classOf[ClassNamesChecker]
@Test def testOnOff(): Unit = {
val source = """
package foobar
class foobar {
// scalastyle:off
class barbar { }
// scalastyle:on
// scalastyle:off class.name
class bazbaz {}
// scalastyle:on class.name
// scalastyle:off object.name
val s = " // scalastyle:off "
class g { }
}
""";
assertErrors(List(columnError(4, 6, List("^[A-Z][A-Za-z]*$")), columnError(14, 8, List("^[A-Z][A-Za-z]*$"))), source)
}
@Test def testOnOffIgnore(): Unit = {
val source = """
package foobar
class foobar {
// scalastyle:on class.name
class barbar1 { } // scalastyle:ignore class.name
//
// scalastyle:on
class barbar2 { } // scalastyle:ignore
// scalastyle:off
// scalastyle:on
class barbar3 { } // scalastyle:ignore class.name
// scalastyle:off
// scalastyle:on
class barbar4 { } // scalastyle:ignore magic.number
// scalastyle:off
}
""";
assertErrors(List(columnError(4, 6, List("^[A-Z][A-Za-z]*$")), columnError(18, 8, List("^[A-Z][A-Za-z]*$"))), source)
}
}
|
kahosato/scalastyle
|
src/test/scala/org/scalastyle/scalariform/CommentFilterTest.scala
|
Scala
|
apache-2.0
| 2,276
|
package org.bizzle.plugin.ghosthost
import
java.{ io, net },
io.IOException,
net.URLClassLoader
import
scala.io.Source
import
sbt.{ File, IO, Logger, richFile, stringToProcess, TestsFailedException, UpdateReport }
object TestRunner {
def apply(config: GhostHostConfig, debug: DebugSettings, specs: Seq[ModuleSpec])
(implicit logger: Logger, update: UpdateReport): Unit = {
logger.info(s"Running tests headlessly with PhantomJS v${findVersionStr()}...")
IO.withTemporaryDirectory {
tempDir =>
val (htmlFile, qunitBootstrap) = moveFiles(tempDir, config, specs)
if (debug.enabled)
debug.action(htmlFile.getAbsolutePath)
else
runPhantomJS(qunitBootstrap.getAbsolutePath, htmlFile.getAbsolutePath)
}
}
private def moveFiles(tempDir: File, config: GhostHostConfig, specs: Seq[ModuleSpec])
(implicit update: UpdateReport): (File, File) = {
val writeResourceToPath = (classLoader: ClassLoader) => (resourcePath: String, filePath: String) => {
val rsrc = classLoader.getResourceAsStream(resourcePath)
val src = Source.fromInputStream(rsrc)
val lines = src.getLines().toList
src.close()
val newFile = tempDir / filePath
newFile.getParentFile.mkdirs()
newFile.createNewFile()
IO.writeLines(newFile, lines)
newFile
}
val writeJSToPath = (path: String) => writeResourceToPath(this.getClass.getClassLoader)(s"javascript/$path", path)
val htmlFile = tempDir / "test.html"
htmlFile.createNewFile()
IO.write(htmlFile, config.toHtmlStr)
val qunitBootstrap = writeJSToPath("run-qunit.js")
Seq("require.js", "qunit.js", "qunit.css") foreach writeJSToPath
val urls = specs map (_.findURL()) toArray
val loader = URLClassLoader.newInstance(urls, this.getClass.getClassLoader)
val writeManagedToPath: ((String, String)) => File = {
case (resourcePath, filePath) => writeResourceToPath(loader)(resourcePath, s"managed/$filePath")
}
specs map (_.getPathMappings) foreach (_ foreach writeManagedToPath)
IO.copyDirectory(config.baseDir, tempDir)
(htmlFile, qunitBootstrap)
}
private def runPhantomJS(phantomPath: String, htmlPath: String)(implicit logger: Logger): Unit = {
val result = s"phantomjs $phantomPath $htmlPath".lines_!
val TestRegex =
"""
|(?s)(.*?)
|Tests completed in \\d+ milliseconds\\.
|(\\d+) assertions of (\\d+) passed, (\\d+) failed\\.
""".trim.stripMargin.r
result.mkString("\\n") match {
case TestRegex(extra, assertions, successes, failures) =>
val status = s"$assertions Attempted, $successes Passed, $failures Failed"
if (failures == "0")
logger.info("All tests passed: " + status)
else {
logger.error(s"$failures test(s) failed: $status\\n$extra")
throw new TestsFailedException
}
case _ =>
logger.warn(s"Unexpected output from QUnit runner:\\n${result map (x => s"> $x") mkString "\\n"}")
}
}
private def findVersionStr()(implicit logger: Logger): String =
try "phantomjs --version".!!.init
catch {
case ex: IOException =>
logger.error("PhantomJS must be installed and on your $PATH in order to run QUnit tests!")
throw new TestsFailedException
}
}
|
TheBizzle/Ghost-Host
|
src/main/scala/org/bizzle/plugin/ghosthost/TestRunner.scala
|
Scala
|
bsd-3-clause
| 3,385
|
package com.github.tweets.common
import com.fasterxml.jackson.annotation.{JsonProperty, JsonIgnoreProperties}
@JsonIgnoreProperties(ignoreUnknown = true)
private[tweets] case class TweetInfo(text: String,
@JsonProperty("created_at") createdDate: String,
user: TwitterUser)
@JsonIgnoreProperties(ignoreUnknown = true)
private[tweets] case class TwitterUser(name: String, @JsonProperty("screen_name") alias: String)
|
andrei-l/tweets-for-github-projects-gatherer
|
src/main/scala/com/github/tweets/common/TweetInfo.scala
|
Scala
|
mit
| 490
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.admin.AdminUtils
import kafka.api.{ApiVersion, ElectLeadersRequestOps, KAFKA_0_11_0_IV0, KAFKA_2_3_IV0}
import kafka.common.OffsetAndMetadata
import kafka.controller.ReplicaAssignment
import kafka.coordinator.group._
import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator}
import kafka.log.AppendOrigin
import kafka.message.ZStdCompressionCodec
import kafka.network.RequestChannel
import kafka.server.QuotaFactory.{QuotaManagers, UnboundedQuota}
import kafka.server.metadata.ConfigRepository
import kafka.utils.Implicits._
import kafka.utils.{CoreUtils, Logging}
import org.apache.kafka.clients.admin.AlterConfigOp.OpType
import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry}
import org.apache.kafka.common.acl.AclOperation._
import org.apache.kafka.common.acl.AclOperation
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors._
import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME, isInternal}
import org.apache.kafka.common.internals.{FatalExitError, Topic}
import org.apache.kafka.common.message.AlterConfigsResponseData.AlterConfigsResourceResponse
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.{ReassignablePartitionResponse, ReassignableTopicResponse}
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic
import org.apache.kafka.common.message.CreateTopicsResponseData.{CreatableTopicResult, CreatableTopicResultCollection}
import org.apache.kafka.common.message.DeleteGroupsResponseData.{DeletableGroupResult, DeletableGroupResultCollection}
import org.apache.kafka.common.message.DeleteRecordsResponseData.{DeleteRecordsPartitionResult, DeleteRecordsTopicResult}
import org.apache.kafka.common.message.DeleteTopicsResponseData.{DeletableTopicResult, DeletableTopicResultCollection}
import org.apache.kafka.common.message.ElectLeadersResponseData.{PartitionResult, ReplicaElectionResult}
import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition
import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse}
import org.apache.kafka.common.message.MetadataResponseData.{MetadataResponsePartition, MetadataResponseTopic}
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult, OffsetForLeaderTopicResultCollection}
import org.apache.kafka.common.message._
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.{ListenerName, Send}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record._
import org.apache.kafka.common.replica.ClientMetadata
import org.apache.kafka.common.replica.ClientMetadata.DefaultClientMetadata
import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType
import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests._
import org.apache.kafka.common.resource.Resource.CLUSTER_NAME
import org.apache.kafka.common.resource.ResourceType._
import org.apache.kafka.common.resource.{Resource, ResourceType}
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation}
import org.apache.kafka.common.utils.{ProducerIdAndEpoch, Time}
import org.apache.kafka.common.{Node, TopicPartition, Uuid}
import org.apache.kafka.server.authorizer._
import java.lang.{Long => JLong}
import java.nio.ByteBuffer
import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Collections, Optional}
import scala.annotation.nowarn
import scala.collection.{Map, Seq, Set, immutable, mutable}
import scala.jdk.CollectionConverters._
import scala.util.{Failure, Success, Try}
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val metadataSupport: MetadataSupport,
val replicaManager: ReplicaManager,
val groupCoordinator: GroupCoordinator,
val txnCoordinator: TransactionCoordinator,
val autoTopicCreationManager: AutoTopicCreationManager,
val brokerId: Int,
val config: KafkaConfig,
val configRepository: ConfigRepository,
val metadataCache: MetadataCache,
val metrics: Metrics,
val authorizer: Option[Authorizer],
val quotas: QuotaManagers,
val fetchManager: FetchManager,
brokerTopicStats: BrokerTopicStats,
val clusterId: String,
time: Time,
val tokenManager: DelegationTokenManager,
val apiVersionManager: ApiVersionManager) extends ApiRequestHandler with Logging {
type FetchResponseStats = Map[TopicPartition, RecordConversionStats]
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
val configHelper = new ConfigHelper(metadataCache, config, configRepository)
val authHelper = new AuthHelper(authorizer)
val requestHelper = new RequestHandlerHelper(requestChannel, quotas, time)
val aclApis = new AclApis(authHelper, authorizer, requestHelper, "broker", config)
def close(): Unit = {
aclApis.close()
info("Shutdown complete.")
}
private def isForwardingEnabled(request: RequestChannel.Request): Boolean = {
metadataSupport.forwardingManager.isDefined && request.context.principalSerde.isPresent
}
private def maybeForwardToController(
request: RequestChannel.Request,
handler: RequestChannel.Request => Unit
): Unit = {
def responseCallback(responseOpt: Option[AbstractResponse]): Unit = {
responseOpt match {
case Some(response) => requestHelper.sendForwardedResponse(request, response)
case None =>
info(s"The client connection will be closed due to controller responded " +
s"unsupported version exception during $request forwarding. " +
s"This could happen when the controller changed after the connection was established.")
requestChannel.closeConnection(request, Collections.emptyMap())
}
}
metadataSupport.maybeForward(request, handler, responseCallback)
}
private def forwardToControllerOrFail(
request: RequestChannel.Request
): Unit = {
def errorHandler(request: RequestChannel.Request): Unit = {
throw new IllegalStateException(s"Unable to forward $request to the controller")
}
maybeForwardToController(request, errorHandler)
}
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
override def handle(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
try {
trace(s"Handling request:${request.requestDesc(true)} from connection ${request.context.connectionId};" +
s"securityProtocol:${request.context.securityProtocol},principal:${request.context.principal}")
if (!apiVersionManager.isApiEnabled(request.header.apiKey)) {
// The socket server will reject APIs which are not exposed in this scope and close the connection
// before handing them to the request handler, so this path should not be exercised in practice
throw new IllegalStateException(s"API ${request.header.apiKey} is not enabled")
}
request.header.apiKey match {
case ApiKeys.PRODUCE => handleProduceRequest(request, requestLocal)
case ApiKeys.FETCH => handleFetchRequest(request)
case ApiKeys.LIST_OFFSETS => handleListOffsetRequest(request)
case ApiKeys.METADATA => handleTopicMetadataRequest(request)
case ApiKeys.LEADER_AND_ISR => handleLeaderAndIsrRequest(request)
case ApiKeys.STOP_REPLICA => handleStopReplicaRequest(request)
case ApiKeys.UPDATE_METADATA => handleUpdateMetadataRequest(request, requestLocal)
case ApiKeys.CONTROLLED_SHUTDOWN => handleControlledShutdownRequest(request)
case ApiKeys.OFFSET_COMMIT => handleOffsetCommitRequest(request, requestLocal)
case ApiKeys.OFFSET_FETCH => handleOffsetFetchRequest(request)
case ApiKeys.FIND_COORDINATOR => handleFindCoordinatorRequest(request)
case ApiKeys.JOIN_GROUP => handleJoinGroupRequest(request, requestLocal)
case ApiKeys.HEARTBEAT => handleHeartbeatRequest(request)
case ApiKeys.LEAVE_GROUP => handleLeaveGroupRequest(request)
case ApiKeys.SYNC_GROUP => handleSyncGroupRequest(request, requestLocal)
case ApiKeys.DESCRIBE_GROUPS => handleDescribeGroupRequest(request)
case ApiKeys.LIST_GROUPS => handleListGroupsRequest(request)
case ApiKeys.SASL_HANDSHAKE => handleSaslHandshakeRequest(request)
case ApiKeys.API_VERSIONS => handleApiVersionsRequest(request)
case ApiKeys.CREATE_TOPICS => maybeForwardToController(request, handleCreateTopicsRequest)
case ApiKeys.DELETE_TOPICS => maybeForwardToController(request, handleDeleteTopicsRequest)
case ApiKeys.DELETE_RECORDS => handleDeleteRecordsRequest(request)
case ApiKeys.INIT_PRODUCER_ID => handleInitProducerIdRequest(request, requestLocal)
case ApiKeys.OFFSET_FOR_LEADER_EPOCH => handleOffsetForLeaderEpochRequest(request)
case ApiKeys.ADD_PARTITIONS_TO_TXN => handleAddPartitionToTxnRequest(request, requestLocal)
case ApiKeys.ADD_OFFSETS_TO_TXN => handleAddOffsetsToTxnRequest(request, requestLocal)
case ApiKeys.END_TXN => handleEndTxnRequest(request, requestLocal)
case ApiKeys.WRITE_TXN_MARKERS => handleWriteTxnMarkersRequest(request, requestLocal)
case ApiKeys.TXN_OFFSET_COMMIT => handleTxnOffsetCommitRequest(request, requestLocal)
case ApiKeys.DESCRIBE_ACLS => handleDescribeAcls(request)
case ApiKeys.CREATE_ACLS => maybeForwardToController(request, handleCreateAcls)
case ApiKeys.DELETE_ACLS => maybeForwardToController(request, handleDeleteAcls)
case ApiKeys.ALTER_CONFIGS => maybeForwardToController(request, handleAlterConfigsRequest)
case ApiKeys.DESCRIBE_CONFIGS => handleDescribeConfigsRequest(request)
case ApiKeys.ALTER_REPLICA_LOG_DIRS => handleAlterReplicaLogDirsRequest(request)
case ApiKeys.DESCRIBE_LOG_DIRS => handleDescribeLogDirsRequest(request)
case ApiKeys.SASL_AUTHENTICATE => handleSaslAuthenticateRequest(request)
case ApiKeys.CREATE_PARTITIONS => maybeForwardToController(request, handleCreatePartitionsRequest)
case ApiKeys.CREATE_DELEGATION_TOKEN => maybeForwardToController(request, handleCreateTokenRequest)
case ApiKeys.RENEW_DELEGATION_TOKEN => maybeForwardToController(request, handleRenewTokenRequest)
case ApiKeys.EXPIRE_DELEGATION_TOKEN => maybeForwardToController(request, handleExpireTokenRequest)
case ApiKeys.DESCRIBE_DELEGATION_TOKEN => handleDescribeTokensRequest(request)
case ApiKeys.DELETE_GROUPS => handleDeleteGroupsRequest(request, requestLocal)
case ApiKeys.ELECT_LEADERS => handleElectReplicaLeader(request)
case ApiKeys.INCREMENTAL_ALTER_CONFIGS => maybeForwardToController(request, handleIncrementalAlterConfigsRequest)
case ApiKeys.ALTER_PARTITION_REASSIGNMENTS => maybeForwardToController(request, handleAlterPartitionReassignmentsRequest)
case ApiKeys.LIST_PARTITION_REASSIGNMENTS => maybeForwardToController(request, handleListPartitionReassignmentsRequest)
case ApiKeys.OFFSET_DELETE => handleOffsetDeleteRequest(request, requestLocal)
case ApiKeys.DESCRIBE_CLIENT_QUOTAS => handleDescribeClientQuotasRequest(request)
case ApiKeys.ALTER_CLIENT_QUOTAS => maybeForwardToController(request, handleAlterClientQuotasRequest)
case ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS => handleDescribeUserScramCredentialsRequest(request)
case ApiKeys.ALTER_USER_SCRAM_CREDENTIALS => maybeForwardToController(request, handleAlterUserScramCredentialsRequest)
case ApiKeys.ALTER_ISR => handleAlterIsrRequest(request)
case ApiKeys.UPDATE_FEATURES => maybeForwardToController(request, handleUpdateFeatures)
case ApiKeys.ENVELOPE => handleEnvelope(request, requestLocal)
case ApiKeys.DESCRIBE_CLUSTER => handleDescribeCluster(request)
case ApiKeys.DESCRIBE_PRODUCERS => handleDescribeProducersRequest(request)
case ApiKeys.DESCRIBE_TRANSACTIONS => handleDescribeTransactionsRequest(request)
case ApiKeys.LIST_TRANSACTIONS => handleListTransactionsRequest(request)
case ApiKeys.ALLOCATE_PRODUCER_IDS => handleAllocateProducerIdsRequest(request)
case ApiKeys.DESCRIBE_QUORUM => forwardToControllerOrFail(request)
case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}")
}
} catch {
case e: FatalExitError => throw e
case e: Throwable =>
error(s"Unexpected error handling request ${request.requestDesc(true)} " +
s"with context ${request.context}", e)
requestHelper.handleError(request, e)
} finally {
// try to complete delayed action. In order to avoid conflicting locking, the actions to complete delayed requests
// are kept in a queue. We add the logic to check the ReplicaManager queue at the end of KafkaApis.handle() and the
// expiration thread for certain delayed operations (e.g. DelayedJoin)
replicaManager.tryCompleteActions()
// The local completion time may be set while processing the request. Only record it if it's unset.
if (request.apiLocalCompleteTimeNanos < 0)
request.apiLocalCompleteTimeNanos = time.nanoseconds
}
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val correlationId = request.header.correlationId
val leaderAndIsrRequest = request.body[LeaderAndIsrRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
if (isBrokerEpochStale(zkSupport, leaderAndIsrRequest.brokerEpoch)) {
// When the broker restarts very quickly, it is possible for this broker to receive request intended
// for its previous generation so the broker should skip the stale request.
info("Received LeaderAndIsr request with broker epoch " +
s"${leaderAndIsrRequest.brokerEpoch} smaller than the current broker epoch ${zkSupport.controller.brokerEpoch}")
requestHelper.sendResponseExemptThrottle(request, leaderAndIsrRequest.getErrorResponse(0, Errors.STALE_BROKER_EPOCH.exception))
} else {
val response = replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest,
RequestHandlerHelper.onLeadershipChange(groupCoordinator, txnCoordinator, _, _))
requestHelper.sendResponseExemptThrottle(request, response)
}
}
def handleStopReplicaRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stopReplicaRequest = request.body[StopReplicaRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
if (isBrokerEpochStale(zkSupport, stopReplicaRequest.brokerEpoch)) {
// When the broker restarts very quickly, it is possible for this broker to receive request intended
// for its previous generation so the broker should skip the stale request.
info("Received StopReplica request with broker epoch " +
s"${stopReplicaRequest.brokerEpoch} smaller than the current broker epoch ${zkSupport.controller.brokerEpoch}")
requestHelper.sendResponseExemptThrottle(request, new StopReplicaResponse(
new StopReplicaResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code)))
} else {
val partitionStates = stopReplicaRequest.partitionStates().asScala
val (result, error) = replicaManager.stopReplicas(
request.context.correlationId,
stopReplicaRequest.controllerId,
stopReplicaRequest.controllerEpoch,
partitionStates)
// Clear the coordinator caches in case we were the leader. In the case of a reassignment, we
// cannot rely on the LeaderAndIsr API for this since it is only sent to active replicas.
result.forKeyValue { (topicPartition, error) =>
if (error == Errors.NONE) {
val partitionState = partitionStates(topicPartition)
if (topicPartition.topic == GROUP_METADATA_TOPIC_NAME
&& partitionState.deletePartition) {
val leaderEpoch = if (partitionState.leaderEpoch >= 0)
Some(partitionState.leaderEpoch)
else
None
groupCoordinator.onResignation(topicPartition.partition, leaderEpoch)
} else if (topicPartition.topic == TRANSACTION_STATE_TOPIC_NAME
&& partitionState.deletePartition) {
val leaderEpoch = if (partitionState.leaderEpoch >= 0)
Some(partitionState.leaderEpoch)
else
None
txnCoordinator.onResignation(topicPartition.partition, coordinatorEpoch = leaderEpoch)
}
}
}
def toStopReplicaPartition(tp: TopicPartition, error: Errors) =
new StopReplicaResponseData.StopReplicaPartitionError()
.setTopicName(tp.topic)
.setPartitionIndex(tp.partition)
.setErrorCode(error.code)
requestHelper.sendResponseExemptThrottle(request, new StopReplicaResponse(new StopReplicaResponseData()
.setErrorCode(error.code)
.setPartitionErrors(result.map {
case (tp, error) => toStopReplicaPartition(tp, error)
}.toBuffer.asJava)))
}
CoreUtils.swallow(replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads(), this)
}
def handleUpdateMetadataRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
val correlationId = request.header.correlationId
val updateMetadataRequest = request.body[UpdateMetadataRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
if (isBrokerEpochStale(zkSupport, updateMetadataRequest.brokerEpoch)) {
// When the broker restarts very quickly, it is possible for this broker to receive request intended
// for its previous generation so the broker should skip the stale request.
info("Received update metadata request with broker epoch " +
s"${updateMetadataRequest.brokerEpoch} smaller than the current broker epoch ${zkSupport.controller.brokerEpoch}")
requestHelper.sendResponseExemptThrottle(request,
new UpdateMetadataResponse(new UpdateMetadataResponseData().setErrorCode(Errors.STALE_BROKER_EPOCH.code)))
} else {
val deletedPartitions = replicaManager.maybeUpdateMetadataCache(correlationId, updateMetadataRequest)
if (deletedPartitions.nonEmpty)
groupCoordinator.handleDeletedPartitions(deletedPartitions, requestLocal)
if (zkSupport.adminManager.hasDelayedTopicOperations) {
updateMetadataRequest.partitionStates.forEach { partitionState =>
zkSupport.adminManager.tryCompleteDelayedTopicOperations(partitionState.topicName)
}
}
quotas.clientQuotaCallback.foreach { callback =>
if (callback.updateClusterMetadata(metadataCache.getClusterMetadata(clusterId, request.context.listenerName))) {
quotas.fetch.updateQuotaMetricConfigs()
quotas.produce.updateQuotaMetricConfigs()
quotas.request.updateQuotaMetricConfigs()
quotas.controllerMutation.updateQuotaMetricConfigs()
}
}
if (replicaManager.hasDelayedElectionOperations) {
updateMetadataRequest.partitionStates.forEach { partitionState =>
val tp = new TopicPartition(partitionState.topicName, partitionState.partitionIndex)
replicaManager.tryCompleteElection(TopicPartitionOperationKey(tp))
}
}
requestHelper.sendResponseExemptThrottle(request, new UpdateMetadataResponse(
new UpdateMetadataResponseData().setErrorCode(Errors.NONE.code)))
}
}
def handleControlledShutdownRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val controlledShutdownRequest = request.body[ControlledShutdownRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
def controlledShutdownCallback(controlledShutdownResult: Try[Set[TopicPartition]]): Unit = {
val response = controlledShutdownResult match {
case Success(partitionsRemaining) =>
ControlledShutdownResponse.prepareResponse(Errors.NONE, partitionsRemaining.asJava)
case Failure(throwable) =>
controlledShutdownRequest.getErrorResponse(throwable)
}
requestHelper.sendResponseExemptThrottle(request, response)
}
zkSupport.controller.controlledShutdown(controlledShutdownRequest.data.brokerId, controlledShutdownRequest.data.brokerEpoch, controlledShutdownCallback)
}
/**
* Handle an offset commit request
*/
def handleOffsetCommitRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val header = request.header
val offsetCommitRequest = request.body[OffsetCommitRequest]
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
// the callback for sending an offset commit response
def sendResponseCallback(commitStatus: Map[TopicPartition, Errors]): Unit = {
val combinedCommitStatus = commitStatus ++ unauthorizedTopicErrors ++ nonExistingTopicErrors
if (isDebugEnabled)
combinedCommitStatus.forKeyValue { (topicPartition, error) =>
if (error != Errors.NONE) {
debug(s"Offset commit request with correlation id ${header.correlationId} from client ${header.clientId} " +
s"on partition $topicPartition failed due to ${error.exceptionName}")
}
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new OffsetCommitResponse(requestThrottleMs, combinedCommitStatus.asJava))
}
// reject the request if not authorized to the group
if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) {
val error = Errors.GROUP_AUTHORIZATION_FAILED
val responseTopicList = OffsetCommitRequest.getErrorResponseTopics(
offsetCommitRequest.data.topics,
error)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new OffsetCommitResponse(
new OffsetCommitResponseData()
.setTopics(responseTopicList)
.setThrottleTimeMs(requestThrottleMs)
))
} else if (offsetCommitRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
val errorMap = new mutable.HashMap[TopicPartition, Errors]
for (topicData <- offsetCommitRequest.data.topics.asScala) {
for (partitionData <- topicData.partitions.asScala) {
val topicPartition = new TopicPartition(topicData.name, partitionData.partitionIndex)
errorMap += topicPartition -> Errors.UNSUPPORTED_VERSION
}
}
sendResponseCallback(errorMap.toMap)
} else {
val authorizedTopicRequestInfoBldr = immutable.Map.newBuilder[TopicPartition, OffsetCommitRequestData.OffsetCommitRequestPartition]
val topics = offsetCommitRequest.data.topics.asScala
val authorizedTopics = authHelper.filterByAuthorized(request.context, READ, TOPIC, topics)(_.name)
for (topicData <- topics) {
for (partitionData <- topicData.partitions.asScala) {
val topicPartition = new TopicPartition(topicData.name, partitionData.partitionIndex)
if (!authorizedTopics.contains(topicData.name))
unauthorizedTopicErrors += (topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += (topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
authorizedTopicRequestInfoBldr += (topicPartition -> partitionData)
}
}
val authorizedTopicRequestInfo = authorizedTopicRequestInfoBldr.result()
if (authorizedTopicRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else if (header.apiVersion == 0) {
// for version 0 always store offsets to ZK
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.unsupported("Version 0 offset commit requests"))
val responseInfo = authorizedTopicRequestInfo.map {
case (topicPartition, partitionData) =>
try {
if (partitionData.committedMetadata() != null
&& partitionData.committedMetadata().length > config.offsetMetadataMaxSize)
(topicPartition, Errors.OFFSET_METADATA_TOO_LARGE)
else {
zkSupport.zkClient.setOrCreateConsumerOffset(
offsetCommitRequest.data.groupId,
topicPartition,
partitionData.committedOffset)
(topicPartition, Errors.NONE)
}
} catch {
case e: Throwable => (topicPartition, Errors.forException(e))
}
}
sendResponseCallback(responseInfo)
} else {
// for version 1 and beyond store offsets in offset manager
// "default" expiration timestamp is now + retention (and retention may be overridden if v2)
// expire timestamp is computed differently for v1 and v2.
// - If v1 and no explicit commit timestamp is provided we treat it the same as v5.
// - If v1 and explicit retention time is provided we calculate expiration timestamp based on that
// - If v2/v3/v4 (no explicit commit timestamp) we treat it the same as v5.
// - For v5 and beyond there is no per partition expiration timestamp, so this field is no longer in effect
val currentTimestamp = time.milliseconds
val partitionData = authorizedTopicRequestInfo.map { case (k, partitionData) =>
val metadata = if (partitionData.committedMetadata == null)
OffsetAndMetadata.NoMetadata
else
partitionData.committedMetadata
val leaderEpochOpt = if (partitionData.committedLeaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH)
Optional.empty[Integer]
else
Optional.of[Integer](partitionData.committedLeaderEpoch)
k -> new OffsetAndMetadata(
offset = partitionData.committedOffset,
leaderEpoch = leaderEpochOpt,
metadata = metadata,
commitTimestamp = partitionData.commitTimestamp match {
case OffsetCommitRequest.DEFAULT_TIMESTAMP => currentTimestamp
case customTimestamp => customTimestamp
},
expireTimestamp = offsetCommitRequest.data.retentionTimeMs match {
case OffsetCommitRequest.DEFAULT_RETENTION_TIME => None
case retentionTime => Some(currentTimestamp + retentionTime)
}
)
}
// call coordinator to handle commit offset
groupCoordinator.handleCommitOffsets(
offsetCommitRequest.data.groupId,
offsetCommitRequest.data.memberId,
Option(offsetCommitRequest.data.groupInstanceId),
offsetCommitRequest.data.generationId,
partitionData,
sendResponseCallback,
requestLocal)
}
}
}
/**
* Handle a produce request
*/
def handleProduceRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val produceRequest = request.body[ProduceRequest]
val requestSize = request.sizeInBytes
if (RequestUtils.hasTransactionalRecords(produceRequest)) {
val isAuthorizedTransactional = produceRequest.transactionalId != null &&
authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, produceRequest.transactionalId)
if (!isAuthorizedTransactional) {
requestHelper.sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
return
}
}
val unauthorizedTopicResponses = mutable.Map[TopicPartition, PartitionResponse]()
val nonExistingTopicResponses = mutable.Map[TopicPartition, PartitionResponse]()
val invalidRequestResponses = mutable.Map[TopicPartition, PartitionResponse]()
val authorizedRequestInfo = mutable.Map[TopicPartition, MemoryRecords]()
// cache the result to avoid redundant authorization calls
val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC,
produceRequest.data().topicData().asScala)(_.name())
produceRequest.data.topicData.forEach(topic => topic.partitionData.forEach { partition =>
val topicPartition = new TopicPartition(topic.name, partition.index)
// This caller assumes the type is MemoryRecords and that is true on current serialization
// We cast the type to avoid causing big change to code base.
// https://issues.apache.org/jira/browse/KAFKA-10698
val memoryRecords = partition.records.asInstanceOf[MemoryRecords]
if (!authorizedTopics.contains(topicPartition.topic))
unauthorizedTopicResponses += topicPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicResponses += topicPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
try {
ProduceRequest.validateRecords(request.header.apiVersion, memoryRecords)
authorizedRequestInfo += (topicPartition -> memoryRecords)
} catch {
case e: ApiException =>
invalidRequestResponses += topicPartition -> new PartitionResponse(Errors.forException(e))
}
})
// the callback for sending a produce response
// The construction of ProduceResponse is able to accept auto-generated protocol data so
// KafkaApis#handleProduceRequest should apply auto-generated protocol to avoid extra conversion.
// https://issues.apache.org/jira/browse/KAFKA-10730
@nowarn("cat=deprecation")
def sendResponseCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = {
val mergedResponseStatus = responseStatus ++ unauthorizedTopicResponses ++ nonExistingTopicResponses ++ invalidRequestResponses
var errorInResponse = false
mergedResponseStatus.forKeyValue { (topicPartition, status) =>
if (status.error != Errors.NONE) {
errorInResponse = true
debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
request.header.correlationId,
request.header.clientId,
topicPartition,
status.error.exceptionName))
}
}
// Record both bandwidth and request quota-specific values and throttle by muting the channel if any of the quotas
// have been violated. If both quotas have been violated, use the max throttle time between the two quotas. Note
// that the request quota is not enforced if acks == 0.
val timeMs = time.milliseconds()
val bandwidthThrottleTimeMs = quotas.produce.maybeRecordAndGetThrottleTimeMs(request, requestSize, timeMs)
val requestThrottleTimeMs =
if (produceRequest.acks == 0) 0
else quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs)
val maxThrottleTimeMs = Math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs)
if (maxThrottleTimeMs > 0) {
request.apiThrottleTimeMs = maxThrottleTimeMs
if (bandwidthThrottleTimeMs > requestThrottleTimeMs) {
requestHelper.throttle(quotas.produce, request, bandwidthThrottleTimeMs)
} else {
requestHelper.throttle(quotas.request, request, requestThrottleTimeMs)
}
}
// Send the response immediately. In case of throttling, the channel has already been muted.
if (produceRequest.acks == 0) {
// no operation needed if producer request.required.acks = 0; however, if there is any error in handling
// the request, since no response is expected by the producer, the server will close socket server so that
// the producer client will know that some error has happened and will refresh its metadata
if (errorInResponse) {
val exceptionsSummary = mergedResponseStatus.map { case (topicPartition, status) =>
topicPartition -> status.error.exceptionName
}.mkString(", ")
info(
s"Closing connection due to error during produce request with correlation id ${request.header.correlationId} " +
s"from client id ${request.header.clientId} with ack=0\\n" +
s"Topic and partition to exceptions: $exceptionsSummary"
)
requestChannel.closeConnection(request, new ProduceResponse(mergedResponseStatus.asJava).errorCounts)
} else {
// Note that although request throttling is exempt for acks == 0, the channel may be throttled due to
// bandwidth quota violation.
requestHelper.sendNoOpResponseExemptThrottle(request)
}
} else {
requestChannel.sendResponse(request, new ProduceResponse(mergedResponseStatus.asJava, maxThrottleTimeMs), None)
}
}
def processingStatsCallback(processingStats: FetchResponseStats): Unit = {
processingStats.forKeyValue { (tp, info) =>
updateRecordConversionStats(request, tp, info)
}
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else {
val internalTopicsAllowed = request.header.clientId == AdminUtils.AdminClientId
// call the replica manager to append messages to the replicas
replicaManager.appendRecords(
timeout = produceRequest.timeout.toLong,
requiredAcks = produceRequest.acks,
internalTopicsAllowed = internalTopicsAllowed,
origin = AppendOrigin.Client,
entriesPerPartition = authorizedRequestInfo,
requestLocal = requestLocal,
responseCallback = sendResponseCallback,
recordConversionStatsCallback = processingStatsCallback)
// if the request is put into the purgatory, it will have a held reference and hence cannot be garbage collected;
// hence we clear its data here in order to let GC reclaim its memory since it is already appended to log
produceRequest.clearPartitionRecords()
}
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request): Unit = {
val versionId = request.header.apiVersion
val clientId = request.header.clientId
val fetchRequest = request.body[FetchRequest]
val (topicIds, topicNames) =
if (fetchRequest.version() >= 13)
metadataCache.topicIdInfo()
else
(Collections.emptyMap[String, Uuid](), Collections.emptyMap[Uuid, String]())
// If fetchData or forgottenTopics contain an unknown topic ID, return a top level error.
var fetchData: util.Map[TopicPartition, FetchRequest.PartitionData] = null
var forgottenTopics: util.List[TopicPartition] = null
try {
fetchData = fetchRequest.fetchData(topicNames)
forgottenTopics = fetchRequest.forgottenTopics(topicNames)
} catch {
case e: UnknownTopicIdException => throw e
}
val fetchContext = fetchManager.newContext(
fetchRequest.version,
fetchRequest.metadata,
fetchRequest.isFromFollower,
fetchData,
forgottenTopics,
topicIds)
val clientMetadata: Option[ClientMetadata] = if (versionId >= 11) {
// Fetch API version 11 added preferred replica logic
Some(new DefaultClientMetadata(
fetchRequest.rackId,
clientId,
request.context.clientAddress,
request.context.principal,
request.context.listenerName.value))
} else {
None
}
val erroneous = mutable.ArrayBuffer[(TopicPartition, FetchResponseData.PartitionData)]()
val interesting = mutable.ArrayBuffer[(TopicPartition, FetchRequest.PartitionData)]()
val sessionTopicIds = mutable.Map[String, Uuid]()
if (fetchRequest.isFromFollower) {
// The follower must have ClusterAction on ClusterResource in order to fetch partition data.
if (authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME)) {
fetchContext.foreachPartition { (topicPartition, topicId, data) =>
sessionTopicIds.put(topicPartition.topic(), topicId)
if (!metadataCache.contains(topicPartition))
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
interesting += (topicPartition -> data)
}
} else {
fetchContext.foreachPartition { (part, topicId, _) =>
sessionTopicIds.put(part.topic(), topicId)
erroneous += part -> FetchResponse.partitionResponse(part.partition, Errors.TOPIC_AUTHORIZATION_FAILED)
}
}
} else {
// Regular Kafka consumers need READ permission on each partition they are fetching.
val partitionDatas = new mutable.ArrayBuffer[(TopicPartition, FetchRequest.PartitionData)]
fetchContext.foreachPartition { (topicPartition, topicId, partitionData) =>
partitionDatas += topicPartition -> partitionData
sessionTopicIds.put(topicPartition.topic(), topicId)
}
val authorizedTopics = authHelper.filterByAuthorized(request.context, READ, TOPIC, partitionDatas)(_._1.topic)
partitionDatas.foreach { case (topicPartition, data) =>
if (!authorizedTopics.contains(topicPartition.topic))
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
erroneous += topicPartition -> FetchResponse.partitionResponse(topicPartition.partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
interesting += (topicPartition -> data)
}
}
def maybeDownConvertStorageError(error: Errors): Errors = {
// If consumer sends FetchRequest V5 or earlier, the client library is not guaranteed to recognize the error code
// for KafkaStorageException. In this case the client library will translate KafkaStorageException to
// UnknownServerException which is not retriable. We can ensure that consumer will update metadata and retry
// by converting the KafkaStorageException to NotLeaderOrFollowerException in the response if FetchRequest version <= 5
if (error == Errors.KAFKA_STORAGE_ERROR && versionId <= 5) {
Errors.NOT_LEADER_OR_FOLLOWER
} else {
error
}
}
def maybeConvertFetchedData(tp: TopicPartition,
partitionData: FetchResponseData.PartitionData): FetchResponseData.PartitionData = {
val logConfig = replicaManager.getLogConfig(tp)
if (logConfig.exists(_.compressionType == ZStdCompressionCodec.name) && versionId < 10) {
trace(s"Fetching messages is disabled for ZStandard compressed partition $tp. Sending unsupported version response to $clientId.")
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_COMPRESSION_TYPE)
} else {
// Down-conversion of fetched records is needed when the on-disk magic value is greater than what is
// supported by the fetch request version.
// If the inter-broker protocol version is `3.0` or higher, the log config message format version is
// always `3.0` (i.e. magic value is `v2`). As a result, we always go through the down-conversion
// path if the fetch version is 3 or lower (in rare cases the down-conversion may not be needed, but
// it's not worth optimizing for them).
// If the inter-broker protocol version is lower than `3.0`, we rely on the log config message format
// version as a proxy for the on-disk magic value to maintain the long-standing behavior originally
// introduced in Kafka 0.10.0. An important implication is that it's unsafe to downgrade the message
// format version after a single message has been produced (the broker would return the message(s)
// without down-conversion irrespective of the fetch version).
val unconvertedRecords = FetchResponse.recordsOrFail(partitionData)
val downConvertMagic =
logConfig.map(_.recordVersion.value).flatMap { magic =>
if (magic > RecordBatch.MAGIC_VALUE_V0 && versionId <= 1)
Some(RecordBatch.MAGIC_VALUE_V0)
else if (magic > RecordBatch.MAGIC_VALUE_V1 && versionId <= 3)
Some(RecordBatch.MAGIC_VALUE_V1)
else
None
}
downConvertMagic match {
case Some(magic) =>
// For fetch requests from clients, check if down-conversion is disabled for the particular partition
if (!fetchRequest.isFromFollower && !logConfig.forall(_.messageDownConversionEnable)) {
trace(s"Conversion to message format ${downConvertMagic.get} is disabled for partition $tp. Sending unsupported version response to $clientId.")
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_VERSION)
} else {
try {
trace(s"Down converting records from partition $tp to message format version $magic for fetch request from $clientId")
// Because down-conversion is extremely memory intensive, we want to try and delay the down-conversion as much
// as possible. With KIP-283, we have the ability to lazily down-convert in a chunked manner. The lazy, chunked
// down-conversion always guarantees that at least one batch of messages is down-converted and sent out to the
// client.
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code)
.setHighWatermark(partitionData.highWatermark)
.setLastStableOffset(partitionData.lastStableOffset)
.setLogStartOffset(partitionData.logStartOffset)
.setAbortedTransactions(partitionData.abortedTransactions)
.setRecords(new LazyDownConversionRecords(tp, unconvertedRecords, magic, fetchContext.getFetchOffset(tp).get, time))
.setPreferredReadReplica(partitionData.preferredReadReplica())
} catch {
case e: UnsupportedCompressionTypeException =>
trace("Received unsupported compression type error during down-conversion", e)
FetchResponse.partitionResponse(tp.partition, Errors.UNSUPPORTED_COMPRESSION_TYPE)
}
}
case None =>
new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(Errors.forCode(partitionData.errorCode)).code)
.setHighWatermark(partitionData.highWatermark)
.setLastStableOffset(partitionData.lastStableOffset)
.setLogStartOffset(partitionData.logStartOffset)
.setAbortedTransactions(partitionData.abortedTransactions)
.setRecords(unconvertedRecords)
.setPreferredReadReplica(partitionData.preferredReadReplica)
.setDivergingEpoch(partitionData.divergingEpoch)
}
}
}
// the callback for process a fetch response, invoked before throttling
def processResponseCallback(responsePartitionData: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
val partitions = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val reassigningPartitions = mutable.Set[TopicPartition]()
responsePartitionData.foreach { case (tp, data) =>
val abortedTransactions = data.abortedTransactions.map(_.asJava).orNull
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
if (data.isReassignmentFetch) reassigningPartitions.add(tp)
val partitionData = new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(maybeDownConvertStorageError(data.error).code)
.setHighWatermark(data.highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(data.logStartOffset)
.setAbortedTransactions(abortedTransactions)
.setRecords(data.records)
.setPreferredReadReplica(data.preferredReadReplica.getOrElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))
data.divergingEpoch.foreach(partitionData.setDivergingEpoch)
partitions.put(tp, partitionData)
}
erroneous.foreach { case (tp, data) => partitions.put(tp, data) }
var unconvertedFetchResponse: FetchResponse = null
def createResponse(throttleTimeMs: Int): FetchResponse = {
// Down-convert messages for each partition if required
val convertedData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
unconvertedFetchResponse.data().responses().forEach { topicResponse =>
topicResponse.partitions().forEach { unconvertedPartitionData =>
val tp = new TopicPartition(topicResponse.topic(), unconvertedPartitionData.partitionIndex())
val error = Errors.forCode(unconvertedPartitionData.errorCode)
if (error != Errors.NONE)
debug(s"Fetch request with correlation id ${request.header.correlationId} from client $clientId " +
s"on partition $tp failed due to ${error.exceptionName}")
convertedData.put(tp, maybeConvertFetchedData(tp, unconvertedPartitionData))
}
}
// Prepare fetch response from converted data
val response =
FetchResponse.of(unconvertedFetchResponse.error, throttleTimeMs, unconvertedFetchResponse.sessionId, convertedData, sessionTopicIds.asJava)
// record the bytes out metrics only when the response is being sent
response.data().responses().forEach { topicResponse =>
topicResponse.partitions().forEach { data =>
val tp = new TopicPartition(topicResponse.topic(), data.partitionIndex())
brokerTopicStats.updateBytesOut(tp.topic, fetchRequest.isFromFollower, reassigningPartitions.contains(tp), FetchResponse.recordsSize(data))
}
}
response
}
def updateConversionStats(send: Send): Unit = {
send match {
case send: MultiRecordsSend if send.recordConversionStats != null =>
send.recordConversionStats.asScala.toMap.foreach {
case (tp, stats) => updateRecordConversionStats(request, tp, stats)
}
case _ =>
}
}
if (fetchRequest.isFromFollower) {
// We've already evaluated against the quota and are good to go. Just need to record it now.
unconvertedFetchResponse = fetchContext.updateAndGenerateResponseData(partitions)
val responseSize = KafkaApis.sizeOfThrottledPartitions(versionId, unconvertedFetchResponse, quotas.leader, sessionTopicIds.asJava)
quotas.leader.record(responseSize)
val responsePartitionsSize = unconvertedFetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum()
trace(s"Sending Fetch response with partitions.size=$responsePartitionsSize, " +
s"metadata=${unconvertedFetchResponse.sessionId}")
requestHelper.sendResponseExemptThrottle(request, createResponse(0), Some(updateConversionStats))
} else {
// Fetch size used to determine throttle time is calculated before any down conversions.
// This may be slightly different from the actual response size. But since down conversions
// result in data being loaded into memory, we should do this only when we are not going to throttle.
//
// Record both bandwidth and request quota-specific values and throttle by muting the channel if any of the
// quotas have been violated. If both quotas have been violated, use the max throttle time between the two
// quotas. When throttled, we unrecord the recorded bandwidth quota value
val responseSize = fetchContext.getResponseSize(partitions, versionId)
val timeMs = time.milliseconds()
val requestThrottleTimeMs = quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs)
val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs)
val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs)
if (maxThrottleTimeMs > 0) {
request.apiThrottleTimeMs = maxThrottleTimeMs
// Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value
// from the fetch quota because we are going to return an empty response.
quotas.fetch.unrecordQuotaSensor(request, responseSize, timeMs)
if (bandwidthThrottleTimeMs > requestThrottleTimeMs) {
requestHelper.throttle(quotas.fetch, request, bandwidthThrottleTimeMs)
} else {
requestHelper.throttle(quotas.request, request, requestThrottleTimeMs)
}
// If throttling is required, return an empty response.
unconvertedFetchResponse = fetchContext.getThrottledResponse(maxThrottleTimeMs)
} else {
// Get the actual response. This will update the fetch context.
unconvertedFetchResponse = fetchContext.updateAndGenerateResponseData(partitions)
val responsePartitionsSize = unconvertedFetchResponse.data().responses().stream().mapToInt(_.partitions().size()).sum()
trace(s"Sending Fetch response with partitions.size=$responsePartitionsSize, " +
s"metadata=${unconvertedFetchResponse.sessionId}")
}
// Send the response immediately.
requestChannel.sendResponse(request, createResponse(maxThrottleTimeMs), Some(updateConversionStats))
}
}
// for fetch from consumer, cap fetchMaxBytes to the maximum bytes that could be fetched without being throttled given
// no bytes were recorded in the recent quota window
// trying to fetch more bytes would result in a guaranteed throttling potentially blocking consumer progress
val maxQuotaWindowBytes = if (fetchRequest.isFromFollower)
Int.MaxValue
else
quotas.fetch.getMaxValueInQuotaWindow(request.session, clientId).toInt
val fetchMaxBytes = Math.min(Math.min(fetchRequest.maxBytes, config.fetchMaxBytes), maxQuotaWindowBytes)
val fetchMinBytes = Math.min(fetchRequest.minBytes, fetchMaxBytes)
if (interesting.isEmpty)
processResponseCallback(Seq.empty)
else {
// call the replica manager to fetch messages from the local replica
replicaManager.fetchMessages(
fetchRequest.maxWait.toLong,
fetchRequest.replicaId,
fetchMinBytes,
fetchMaxBytes,
versionId <= 2,
interesting,
sessionTopicIds.asJava,
replicationQuota(fetchRequest),
processResponseCallback,
fetchRequest.isolationLevel,
clientMetadata)
}
}
def replicationQuota(fetchRequest: FetchRequest): ReplicaQuota =
if (fetchRequest.isFromFollower) quotas.leader else UnboundedQuota
def handleListOffsetRequest(request: RequestChannel.Request): Unit = {
val version = request.header.apiVersion
val topics = if (version == 0)
handleListOffsetRequestV0(request)
else
handleListOffsetRequestV1AndAbove(request)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new ListOffsetsResponse(new ListOffsetsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setTopics(topics.asJava)))
}
private def handleListOffsetRequestV0(request : RequestChannel.Request) : List[ListOffsetsTopicResponse] = {
val correlationId = request.header.correlationId
val clientId = request.header.clientId
val offsetRequest = request.body[ListOffsetsRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = authHelper.partitionSeqByAuthorized(request.context,
DESCRIBE, TOPIC, offsetRequest.topics.asScala.toSeq)(_.name)
val unauthorizedResponseStatus = unauthorizedRequestInfo.map(topic =>
new ListOffsetsTopicResponse()
.setName(topic.name)
.setPartitions(topic.partitions.asScala.map(partition =>
new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)).asJava)
)
val responseTopics = authorizedRequestInfo.map { topic =>
val responsePartitions = topic.partitions.asScala.map { partition =>
val topicPartition = new TopicPartition(topic.name, partition.partitionIndex)
try {
val offsets = replicaManager.legacyFetchOffsetsForTimestamp(
topicPartition = topicPartition,
timestamp = partition.timestamp,
maxNumOffsets = partition.maxNumOffsets,
isFromConsumer = offsetRequest.replicaId == ListOffsetsRequest.CONSUMER_REPLICA_ID,
fetchOnlyFromLeader = offsetRequest.replicaId != ListOffsetsRequest.DEBUGGING_REPLICA_ID)
new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(Errors.NONE.code)
.setOldStyleOffsets(offsets.map(JLong.valueOf).asJava)
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderOrFollowerException are special cases since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case e @ (_ : UnknownTopicOrPartitionException |
_ : NotLeaderOrFollowerException |
_ : KafkaStorageException) =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
correlationId, clientId, topicPartition, e.getMessage))
new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(Errors.forException(e).code)
case e: Throwable =>
error("Error while responding to offset request", e)
new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(Errors.forException(e).code)
}
}
new ListOffsetsTopicResponse().setName(topic.name).setPartitions(responsePartitions.asJava)
}
(responseTopics ++ unauthorizedResponseStatus).toList
}
private def handleListOffsetRequestV1AndAbove(request : RequestChannel.Request): List[ListOffsetsTopicResponse] = {
val correlationId = request.header.correlationId
val clientId = request.header.clientId
val offsetRequest = request.body[ListOffsetsRequest]
val version = request.header.apiVersion
def buildErrorResponse(e: Errors, partition: ListOffsetsPartition): ListOffsetsPartitionResponse = {
new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(e.code)
.setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP)
.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)
}
val (authorizedRequestInfo, unauthorizedRequestInfo) = authHelper.partitionSeqByAuthorized(request.context,
DESCRIBE, TOPIC, offsetRequest.topics.asScala.toSeq)(_.name)
val unauthorizedResponseStatus = unauthorizedRequestInfo.map(topic =>
new ListOffsetsTopicResponse()
.setName(topic.name)
.setPartitions(topic.partitions.asScala.map(partition =>
buildErrorResponse(Errors.TOPIC_AUTHORIZATION_FAILED, partition)).asJava)
)
val responseTopics = authorizedRequestInfo.map { topic =>
val responsePartitions = topic.partitions.asScala.map { partition =>
val topicPartition = new TopicPartition(topic.name, partition.partitionIndex)
if (offsetRequest.duplicatePartitions.contains(topicPartition)) {
debug(s"OffsetRequest with correlation id $correlationId from client $clientId on partition $topicPartition " +
s"failed because the partition is duplicated in the request.")
buildErrorResponse(Errors.INVALID_REQUEST, partition)
} else {
try {
val fetchOnlyFromLeader = offsetRequest.replicaId != ListOffsetsRequest.DEBUGGING_REPLICA_ID
val isClientRequest = offsetRequest.replicaId == ListOffsetsRequest.CONSUMER_REPLICA_ID
val isolationLevelOpt = if (isClientRequest)
Some(offsetRequest.isolationLevel)
else
None
val foundOpt = replicaManager.fetchOffsetForTimestamp(topicPartition,
partition.timestamp,
isolationLevelOpt,
if (partition.currentLeaderEpoch == ListOffsetsResponse.UNKNOWN_EPOCH) Optional.empty() else Optional.of(partition.currentLeaderEpoch),
fetchOnlyFromLeader)
val response = foundOpt match {
case Some(found) =>
val partitionResponse = new ListOffsetsPartitionResponse()
.setPartitionIndex(partition.partitionIndex)
.setErrorCode(Errors.NONE.code)
.setTimestamp(found.timestamp)
.setOffset(found.offset)
if (found.leaderEpoch.isPresent && version >= 4)
partitionResponse.setLeaderEpoch(found.leaderEpoch.get)
partitionResponse
case None =>
buildErrorResponse(Errors.NONE, partition)
}
response
} catch {
// NOTE: These exceptions are special cases since these error messages are typically transient or the client
// would have received a clear exception and there is no value in logging the entire stack trace for the same
case e @ (_ : UnknownTopicOrPartitionException |
_ : NotLeaderOrFollowerException |
_ : UnknownLeaderEpochException |
_ : FencedLeaderEpochException |
_ : KafkaStorageException |
_ : UnsupportedForMessageFormatException) =>
debug(s"Offset request with correlation id $correlationId from client $clientId on " +
s"partition $topicPartition failed due to ${e.getMessage}")
buildErrorResponse(Errors.forException(e), partition)
// Only V5 and newer ListOffset calls should get OFFSET_NOT_AVAILABLE
case e: OffsetNotAvailableException =>
if (request.header.apiVersion >= 5) {
buildErrorResponse(Errors.forException(e), partition)
} else {
buildErrorResponse(Errors.LEADER_NOT_AVAILABLE, partition)
}
case e: Throwable =>
error("Error while responding to offset request", e)
buildErrorResponse(Errors.forException(e), partition)
}
}
}
new ListOffsetsTopicResponse().setName(topic.name).setPartitions(responsePartitions.asJava)
}
(responseTopics ++ unauthorizedResponseStatus).toList
}
private def metadataResponseTopic(error: Errors,
topic: String,
isInternal: Boolean,
partitionData: util.List[MetadataResponsePartition]): MetadataResponseTopic = {
new MetadataResponseTopic()
.setErrorCode(error.code)
.setName(topic)
.setIsInternal(isInternal)
.setPartitions(partitionData)
}
private def getTopicMetadata(
request: RequestChannel.Request,
fetchAllTopics: Boolean,
allowAutoTopicCreation: Boolean,
topics: Set[String],
listenerName: ListenerName,
errorUnavailableEndpoints: Boolean,
errorUnavailableListeners: Boolean
): Seq[MetadataResponseTopic] = {
val topicResponses = metadataCache.getTopicMetadata(topics, listenerName,
errorUnavailableEndpoints, errorUnavailableListeners)
if (topics.isEmpty || topicResponses.size == topics.size || fetchAllTopics) {
topicResponses
} else {
val nonExistingTopics = topics.diff(topicResponses.map(_.name).toSet)
val nonExistingTopicResponses = if (allowAutoTopicCreation) {
val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request)
autoTopicCreationManager.createTopics(nonExistingTopics, controllerMutationQuota, Some(request.context))
} else {
nonExistingTopics.map { topic =>
val error = try {
Topic.validate(topic)
Errors.UNKNOWN_TOPIC_OR_PARTITION
} catch {
case _: InvalidTopicException =>
Errors.INVALID_TOPIC_EXCEPTION
}
metadataResponseTopic(
error,
topic,
Topic.isInternal(topic),
util.Collections.emptyList()
)
}
}
topicResponses ++ nonExistingTopicResponses
}
}
def handleTopicMetadataRequest(request: RequestChannel.Request): Unit = {
val metadataRequest = request.body[MetadataRequest]
val requestVersion = request.header.apiVersion
// Topic IDs are not supported for versions 10 and 11. Topic names can not be null in these versions.
if (!metadataRequest.isAllTopics) {
metadataRequest.data.topics.forEach{ topic =>
if (topic.name == null) {
throw new InvalidRequestException(s"Topic name can not be null for version ${metadataRequest.version}")
} else if (topic.topicId != Uuid.ZERO_UUID) {
throw new InvalidRequestException(s"Topic IDs are not supported in requests for version ${metadataRequest.version}")
}
}
}
val topics = if (metadataRequest.isAllTopics)
metadataCache.getAllTopics()
else
metadataRequest.topics.asScala.toSet
val authorizedForDescribeTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC,
topics, logIfDenied = !metadataRequest.isAllTopics)(identity)
var (authorizedTopics, unauthorizedForDescribeTopics) = topics.partition(authorizedForDescribeTopics.contains)
var unauthorizedForCreateTopics = Set[String]()
if (authorizedTopics.nonEmpty) {
val nonExistingTopics = authorizedTopics.filterNot(metadataCache.contains(_))
if (metadataRequest.allowAutoTopicCreation && config.autoCreateTopicsEnable && nonExistingTopics.nonEmpty) {
if (!authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME, logIfDenied = false)) {
val authorizedForCreateTopics = authHelper.filterByAuthorized(request.context, CREATE, TOPIC,
nonExistingTopics)(identity)
unauthorizedForCreateTopics = nonExistingTopics.diff(authorizedForCreateTopics)
authorizedTopics = authorizedTopics.diff(unauthorizedForCreateTopics)
}
}
}
val unauthorizedForCreateTopicMetadata = unauthorizedForCreateTopics.map(topic =>
metadataResponseTopic(Errors.TOPIC_AUTHORIZATION_FAILED, topic, isInternal(topic), util.Collections.emptyList()))
// do not disclose the existence of topics unauthorized for Describe, so we've not even checked if they exist or not
val unauthorizedForDescribeTopicMetadata =
// In case of all topics, don't include topics unauthorized for Describe
if ((requestVersion == 0 && (metadataRequest.topics == null || metadataRequest.topics.isEmpty)) || metadataRequest.isAllTopics)
Set.empty[MetadataResponseTopic]
else
unauthorizedForDescribeTopics.map(topic =>
metadataResponseTopic(Errors.TOPIC_AUTHORIZATION_FAILED, topic, false, util.Collections.emptyList()))
// In version 0, we returned an error when brokers with replicas were unavailable,
// while in higher versions we simply don't include the broker in the returned broker list
val errorUnavailableEndpoints = requestVersion == 0
// In versions 5 and below, we returned LEADER_NOT_AVAILABLE if a matching listener was not found on the leader.
// From version 6 onwards, we return LISTENER_NOT_FOUND to enable diagnosis of configuration errors.
val errorUnavailableListeners = requestVersion >= 6
val allowAutoCreation = config.autoCreateTopicsEnable && metadataRequest.allowAutoTopicCreation && !metadataRequest.isAllTopics
val topicMetadata = getTopicMetadata(request, metadataRequest.isAllTopics, allowAutoCreation, authorizedTopics,
request.context.listenerName, errorUnavailableEndpoints, errorUnavailableListeners)
var clusterAuthorizedOperations = Int.MinValue // Default value in the schema
if (requestVersion >= 8) {
// get cluster authorized operations
if (requestVersion <= 10) {
if (metadataRequest.data.includeClusterAuthorizedOperations) {
if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME))
clusterAuthorizedOperations = authHelper.authorizedOperations(request, Resource.CLUSTER)
else
clusterAuthorizedOperations = 0
}
}
// get topic authorized operations
if (metadataRequest.data.includeTopicAuthorizedOperations) {
def setTopicAuthorizedOperations(topicMetadata: Seq[MetadataResponseTopic]): Unit = {
topicMetadata.foreach { topicData =>
topicData.setTopicAuthorizedOperations(authHelper.authorizedOperations(request, new Resource(ResourceType.TOPIC, topicData.name)))
}
}
setTopicAuthorizedOperations(topicMetadata)
}
}
val completeTopicMetadata = topicMetadata ++ unauthorizedForCreateTopicMetadata ++ unauthorizedForDescribeTopicMetadata
val brokers = metadataCache.getAliveBrokerNodes(request.context.listenerName)
trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(completeTopicMetadata.mkString(","),
brokers.mkString(","), request.header.correlationId, request.header.clientId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
MetadataResponse.prepareResponse(
requestVersion,
requestThrottleMs,
brokers.toList.asJava,
clusterId,
metadataSupport.controllerId.getOrElse(MetadataResponse.NO_CONTROLLER_ID),
completeTopicMetadata.asJava,
clusterAuthorizedOperations
))
}
/**
* Handle an offset fetch request
*/
def handleOffsetFetchRequest(request: RequestChannel.Request): Unit = {
val version = request.header.apiVersion
if (version == 0) {
// reading offsets from ZK
handleOffsetFetchRequestV0(request)
} else if (version >= 1 && version <= 7) {
// reading offsets from Kafka
handleOffsetFetchRequestBetweenV1AndV7(request)
} else {
// batching offset reads for multiple groups starts with version 8 and greater
handleOffsetFetchRequestV8AndAbove(request)
}
}
private def handleOffsetFetchRequestV0(request: RequestChannel.Request): Unit = {
val header = request.header
val offsetFetchRequest = request.body[OffsetFetchRequest]
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val offsetFetchResponse =
// reject the request if not authorized to the group
if (!authHelper.authorize(request.context, DESCRIBE, GROUP, offsetFetchRequest.groupId))
offsetFetchRequest.getErrorResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED)
else {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.unsupported("Version 0 offset fetch requests"))
val (authorizedPartitions, unauthorizedPartitions) = partitionByAuthorized(
offsetFetchRequest.partitions.asScala, request.context)
// version 0 reads offsets from ZK
val authorizedPartitionData = authorizedPartitions.map { topicPartition =>
try {
if (!metadataCache.contains(topicPartition))
(topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION)
else {
val payloadOpt = zkSupport.zkClient.getConsumerOffset(offsetFetchRequest.groupId, topicPartition)
payloadOpt match {
case Some(payload) =>
(topicPartition, new OffsetFetchResponse.PartitionData(payload.toLong,
Optional.empty(), OffsetFetchResponse.NO_METADATA, Errors.NONE))
case None =>
(topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION)
}
}
} catch {
case e: Throwable =>
(topicPartition, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET,
Optional.empty(), OffsetFetchResponse.NO_METADATA, Errors.forException(e)))
}
}.toMap
val unauthorizedPartitionData = unauthorizedPartitions.map(_ -> OffsetFetchResponse.UNAUTHORIZED_PARTITION).toMap
new OffsetFetchResponse(requestThrottleMs, Errors.NONE, (authorizedPartitionData ++ unauthorizedPartitionData).asJava)
}
trace(s"Sending offset fetch response $offsetFetchResponse for correlation id ${header.correlationId} to client ${header.clientId}.")
offsetFetchResponse
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
private def handleOffsetFetchRequestBetweenV1AndV7(request: RequestChannel.Request): Unit = {
val header = request.header
val offsetFetchRequest = request.body[OffsetFetchRequest]
val groupId = offsetFetchRequest.groupId()
val (error, partitionData) = fetchOffsets(groupId, offsetFetchRequest.isAllPartitions,
offsetFetchRequest.requireStable, offsetFetchRequest.partitions, request.context)
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val offsetFetchResponse =
if (error != Errors.NONE) {
offsetFetchRequest.getErrorResponse(requestThrottleMs, error)
} else {
new OffsetFetchResponse(requestThrottleMs, Errors.NONE, partitionData.asJava)
}
trace(s"Sending offset fetch response $offsetFetchResponse for correlation id ${header.correlationId} to client ${header.clientId}.")
offsetFetchResponse
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
private def handleOffsetFetchRequestV8AndAbove(request: RequestChannel.Request): Unit = {
val header = request.header
val offsetFetchRequest = request.body[OffsetFetchRequest]
val groupIds = offsetFetchRequest.groupIds().asScala
val groupToErrorMap = mutable.Map.empty[String, Errors]
val groupToPartitionData = mutable.Map.empty[String, util.Map[TopicPartition, PartitionData]]
val groupToTopicPartitions = offsetFetchRequest.groupIdsToPartitions()
groupIds.foreach(g => {
val (error, partitionData) = fetchOffsets(g,
offsetFetchRequest.isAllPartitionsForGroup(g),
offsetFetchRequest.requireStable(),
groupToTopicPartitions.get(g), request.context)
groupToErrorMap += (g -> error)
groupToPartitionData += (g -> partitionData.asJava)
})
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val offsetFetchResponse = new OffsetFetchResponse(requestThrottleMs,
groupToErrorMap.asJava, groupToPartitionData.asJava)
trace(s"Sending offset fetch response $offsetFetchResponse for correlation id ${header.correlationId} to client ${header.clientId}.")
offsetFetchResponse
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
private def fetchOffsets(groupId: String, isAllPartitions: Boolean, requireStable: Boolean,
partitions: util.List[TopicPartition], context: RequestContext): (Errors, Map[TopicPartition, OffsetFetchResponse.PartitionData]) = {
if (!authHelper.authorize(context, DESCRIBE, GROUP, groupId)) {
(Errors.GROUP_AUTHORIZATION_FAILED, Map.empty)
} else {
if (isAllPartitions) {
val (error, allPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable)
if (error != Errors.NONE) {
(error, allPartitionData)
} else {
// clients are not allowed to see offsets for topics that are not authorized for Describe
val (authorizedPartitionData, _) = authHelper.partitionMapByAuthorized(context,
DESCRIBE, TOPIC, allPartitionData)(_.topic)
(Errors.NONE, authorizedPartitionData)
}
} else {
val (authorizedPartitions, unauthorizedPartitions) = partitionByAuthorized(
partitions.asScala, context)
val (error, authorizedPartitionData) = groupCoordinator.handleFetchOffsets(groupId,
requireStable, Some(authorizedPartitions))
if (error != Errors.NONE) {
(error, authorizedPartitionData)
} else {
val unauthorizedPartitionData = unauthorizedPartitions.map(_ -> OffsetFetchResponse.UNAUTHORIZED_PARTITION).toMap
(Errors.NONE, authorizedPartitionData ++ unauthorizedPartitionData)
}
}
}
}
private def partitionByAuthorized(seq: Seq[TopicPartition], context: RequestContext):
(Seq[TopicPartition], Seq[TopicPartition]) =
authHelper.partitionSeqByAuthorized(context, DESCRIBE, TOPIC, seq)(_.topic)
def handleFindCoordinatorRequest(request: RequestChannel.Request): Unit = {
val version = request.header.apiVersion
if (version < 4) {
handleFindCoordinatorRequestLessThanV4(request)
} else {
handleFindCoordinatorRequestV4AndAbove(request)
}
}
private def handleFindCoordinatorRequestV4AndAbove(request: RequestChannel.Request): Unit = {
val findCoordinatorRequest = request.body[FindCoordinatorRequest]
val coordinators = findCoordinatorRequest.data.coordinatorKeys.asScala.map { key =>
val (error, node) = getCoordinator(request, findCoordinatorRequest.data.keyType, key)
new FindCoordinatorResponseData.Coordinator()
.setKey(key)
.setErrorCode(error.code)
.setHost(node.host)
.setNodeId(node.id)
.setPort(node.port)
}
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val response = new FindCoordinatorResponse(
new FindCoordinatorResponseData()
.setCoordinators(coordinators.asJava)
.setThrottleTimeMs(requestThrottleMs))
trace("Sending FindCoordinator response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
response
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
private def handleFindCoordinatorRequestLessThanV4(request: RequestChannel.Request): Unit = {
val findCoordinatorRequest = request.body[FindCoordinatorRequest]
val (error, node) = getCoordinator(request, findCoordinatorRequest.data.keyType, findCoordinatorRequest.data.key)
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new FindCoordinatorResponse(
new FindCoordinatorResponseData()
.setErrorCode(error.code)
.setErrorMessage(error.message())
.setNodeId(node.id)
.setHost(node.host)
.setPort(node.port)
.setThrottleTimeMs(requestThrottleMs))
trace("Sending FindCoordinator response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
responseBody
}
if (error == Errors.NONE) {
requestHelper.sendResponseMaybeThrottle(request, createResponse)
} else {
requestHelper.sendErrorResponseMaybeThrottle(request, error.exception)
}
}
private def getCoordinator(request: RequestChannel.Request, keyType: Byte, key: String): (Errors, Node) = {
if (keyType == CoordinatorType.GROUP.id &&
!authHelper.authorize(request.context, DESCRIBE, GROUP, key))
(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode)
else if (keyType == CoordinatorType.TRANSACTION.id &&
!authHelper.authorize(request.context, DESCRIBE, TRANSACTIONAL_ID, key))
(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, Node.noNode)
else {
val (partition, internalTopicName) = CoordinatorType.forId(keyType) match {
case CoordinatorType.GROUP =>
(groupCoordinator.partitionFor(key), GROUP_METADATA_TOPIC_NAME)
case CoordinatorType.TRANSACTION =>
(txnCoordinator.partitionFor(key), TRANSACTION_STATE_TOPIC_NAME)
}
val topicMetadata = metadataCache.getTopicMetadata(Set(internalTopicName), request.context.listenerName)
if (topicMetadata.headOption.isEmpty) {
val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request)
autoTopicCreationManager.createTopics(Seq(internalTopicName).toSet, controllerMutationQuota, None)
(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode)
} else {
if (topicMetadata.head.errorCode != Errors.NONE.code) {
(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode)
} else {
val coordinatorEndpoint = topicMetadata.head.partitions.asScala
.find(_.partitionIndex == partition)
.filter(_.leaderId != MetadataResponse.NO_LEADER_ID)
.flatMap(metadata => metadataCache.
getAliveBrokerNode(metadata.leaderId, request.context.listenerName))
coordinatorEndpoint match {
case Some(endpoint) =>
(Errors.NONE, endpoint)
case _ =>
(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode)
}
}
}
}
}
def handleDescribeGroupRequest(request: RequestChannel.Request): Unit = {
def sendResponseCallback(describeGroupsResponseData: DescribeGroupsResponseData): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
describeGroupsResponseData.setThrottleTimeMs(requestThrottleMs)
new DescribeGroupsResponse(describeGroupsResponseData)
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
val describeRequest = request.body[DescribeGroupsRequest]
val describeGroupsResponseData = new DescribeGroupsResponseData()
describeRequest.data.groups.forEach { groupId =>
if (!authHelper.authorize(request.context, DESCRIBE, GROUP, groupId)) {
describeGroupsResponseData.groups.add(DescribeGroupsResponse.forError(groupId, Errors.GROUP_AUTHORIZATION_FAILED))
} else {
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
val members = summary.members.map { member =>
new DescribeGroupsResponseData.DescribedGroupMember()
.setMemberId(member.memberId)
.setGroupInstanceId(member.groupInstanceId.orNull)
.setClientId(member.clientId)
.setClientHost(member.clientHost)
.setMemberAssignment(member.assignment)
.setMemberMetadata(member.metadata)
}
val describedGroup = new DescribeGroupsResponseData.DescribedGroup()
.setErrorCode(error.code)
.setGroupId(groupId)
.setGroupState(summary.state)
.setProtocolType(summary.protocolType)
.setProtocolData(summary.protocol)
.setMembers(members.asJava)
if (request.header.apiVersion >= 3) {
if (error == Errors.NONE && describeRequest.data.includeAuthorizedOperations) {
describedGroup.setAuthorizedOperations(authHelper.authorizedOperations(request, new Resource(ResourceType.GROUP, groupId)))
}
}
describeGroupsResponseData.groups.add(describedGroup)
}
}
sendResponseCallback(describeGroupsResponseData)
}
def handleListGroupsRequest(request: RequestChannel.Request): Unit = {
val listGroupsRequest = request.body[ListGroupsRequest]
val states = if (listGroupsRequest.data.statesFilter == null)
// Handle a null array the same as empty
immutable.Set[String]()
else
listGroupsRequest.data.statesFilter.asScala.toSet
def createResponse(throttleMs: Int, groups: List[GroupOverview], error: Errors): AbstractResponse = {
new ListGroupsResponse(new ListGroupsResponseData()
.setErrorCode(error.code)
.setGroups(groups.map { group =>
val listedGroup = new ListGroupsResponseData.ListedGroup()
.setGroupId(group.groupId)
.setProtocolType(group.protocolType)
.setGroupState(group.state.toString)
listedGroup
}.asJava)
.setThrottleTimeMs(throttleMs)
)
}
val (error, groups) = groupCoordinator.handleListGroups(states)
if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME))
// With describe cluster access all groups are returned. We keep this alternative for backward compatibility.
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
createResponse(requestThrottleMs, groups, error))
else {
val filteredGroups = groups.filter(group => authHelper.authorize(request.context, DESCRIBE, GROUP, group.groupId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
createResponse(requestThrottleMs, filteredGroups, error))
}
}
def handleJoinGroupRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val joinGroupRequest = request.body[JoinGroupRequest]
// the callback for sending a join-group response
def sendResponseCallback(joinResult: JoinGroupResult): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val protocolName = if (request.context.apiVersion() >= 7)
joinResult.protocolName.orNull
else
joinResult.protocolName.getOrElse(GroupCoordinator.NoProtocol)
val responseBody = new JoinGroupResponse(
new JoinGroupResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(joinResult.error.code)
.setGenerationId(joinResult.generationId)
.setProtocolType(joinResult.protocolType.orNull)
.setProtocolName(protocolName)
.setLeader(joinResult.leaderId)
.setMemberId(joinResult.memberId)
.setMembers(joinResult.members.asJava)
)
trace("Sending join group response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
responseBody
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
if (joinGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
sendResponseCallback(JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.UNSUPPORTED_VERSION))
} else if (!authHelper.authorize(request.context, READ, GROUP, joinGroupRequest.data.groupId)) {
sendResponseCallback(JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.GROUP_AUTHORIZATION_FAILED))
} else {
val groupInstanceId = Option(joinGroupRequest.data.groupInstanceId)
// Only return MEMBER_ID_REQUIRED error if joinGroupRequest version is >= 4
// and groupInstanceId is configured to unknown.
val requireKnownMemberId = joinGroupRequest.version >= 4 && groupInstanceId.isEmpty
// let the coordinator handle join-group
val protocols = joinGroupRequest.data.protocols.valuesList.asScala.map(protocol =>
(protocol.name, protocol.metadata)).toList
groupCoordinator.handleJoinGroup(
joinGroupRequest.data.groupId,
joinGroupRequest.data.memberId,
groupInstanceId,
requireKnownMemberId,
request.header.clientId,
request.context.clientAddress.toString,
joinGroupRequest.data.rebalanceTimeoutMs,
joinGroupRequest.data.sessionTimeoutMs,
joinGroupRequest.data.protocolType,
protocols,
sendResponseCallback,
requestLocal)
}
}
def handleSyncGroupRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val syncGroupRequest = request.body[SyncGroupRequest]
def sendResponseCallback(syncGroupResult: SyncGroupResult): Unit = {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new SyncGroupResponse(
new SyncGroupResponseData()
.setErrorCode(syncGroupResult.error.code)
.setProtocolType(syncGroupResult.protocolType.orNull)
.setProtocolName(syncGroupResult.protocolName.orNull)
.setAssignment(syncGroupResult.memberAssignment)
.setThrottleTimeMs(requestThrottleMs)
))
}
if (syncGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
sendResponseCallback(SyncGroupResult(Errors.UNSUPPORTED_VERSION))
} else if (!syncGroupRequest.areMandatoryProtocolTypeAndNamePresent()) {
// Starting from version 5, ProtocolType and ProtocolName fields are mandatory.
sendResponseCallback(SyncGroupResult(Errors.INCONSISTENT_GROUP_PROTOCOL))
} else if (!authHelper.authorize(request.context, READ, GROUP, syncGroupRequest.data.groupId)) {
sendResponseCallback(SyncGroupResult(Errors.GROUP_AUTHORIZATION_FAILED))
} else {
val assignmentMap = immutable.Map.newBuilder[String, Array[Byte]]
syncGroupRequest.data.assignments.forEach { assignment =>
assignmentMap += (assignment.memberId -> assignment.assignment)
}
groupCoordinator.handleSyncGroup(
syncGroupRequest.data.groupId,
syncGroupRequest.data.generationId,
syncGroupRequest.data.memberId,
Option(syncGroupRequest.data.protocolType),
Option(syncGroupRequest.data.protocolName),
Option(syncGroupRequest.data.groupInstanceId),
assignmentMap.result(),
sendResponseCallback,
requestLocal
)
}
}
def handleDeleteGroupsRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val deleteGroupsRequest = request.body[DeleteGroupsRequest]
val groups = deleteGroupsRequest.data.groupsNames.asScala.distinct
val (authorizedGroups, unauthorizedGroups) = authHelper.partitionSeqByAuthorized(request.context, DELETE, GROUP,
groups)(identity)
val groupDeletionResult = groupCoordinator.handleDeleteGroups(authorizedGroups.toSet, requestLocal) ++
unauthorizedGroups.map(_ -> Errors.GROUP_AUTHORIZATION_FAILED)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
val deletionCollections = new DeletableGroupResultCollection()
groupDeletionResult.forKeyValue { (groupId, error) =>
deletionCollections.add(new DeletableGroupResult()
.setGroupId(groupId)
.setErrorCode(error.code)
)
}
new DeleteGroupsResponse(new DeleteGroupsResponseData()
.setResults(deletionCollections)
.setThrottleTimeMs(requestThrottleMs)
)
})
}
def handleHeartbeatRequest(request: RequestChannel.Request): Unit = {
val heartbeatRequest = request.body[HeartbeatRequest]
// the callback for sending a heartbeat response
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val response = new HeartbeatResponse(
new HeartbeatResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(error.code))
trace("Sending heartbeat response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
response
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
if (heartbeatRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) {
// Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic
// until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard
// the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states.
sendResponseCallback(Errors.UNSUPPORTED_VERSION)
} else if (!authHelper.authorize(request.context, READ, GROUP, heartbeatRequest.data.groupId)) {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new HeartbeatResponse(
new HeartbeatResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)))
} else {
// let the coordinator to handle heartbeat
groupCoordinator.handleHeartbeat(
heartbeatRequest.data.groupId,
heartbeatRequest.data.memberId,
Option(heartbeatRequest.data.groupInstanceId),
heartbeatRequest.data.generationId,
sendResponseCallback)
}
}
def handleLeaveGroupRequest(request: RequestChannel.Request): Unit = {
val leaveGroupRequest = request.body[LeaveGroupRequest]
val members = leaveGroupRequest.members.asScala.toList
if (!authHelper.authorize(request.context, READ, GROUP, leaveGroupRequest.data.groupId)) {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
new LeaveGroupResponse(new LeaveGroupResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)
)
})
} else {
def sendResponseCallback(leaveGroupResult : LeaveGroupResult): Unit = {
val memberResponses = leaveGroupResult.memberResponses.map(
leaveGroupResult =>
new MemberResponse()
.setErrorCode(leaveGroupResult.error.code)
.setMemberId(leaveGroupResult.memberId)
.setGroupInstanceId(leaveGroupResult.groupInstanceId.orNull)
)
def createResponse(requestThrottleMs: Int): AbstractResponse = {
new LeaveGroupResponse(
memberResponses.asJava,
leaveGroupResult.topLevelError,
requestThrottleMs,
leaveGroupRequest.version)
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
groupCoordinator.handleLeaveGroup(
leaveGroupRequest.data.groupId,
members,
sendResponseCallback)
}
}
def handleSaslHandshakeRequest(request: RequestChannel.Request): Unit = {
val responseData = new SaslHandshakeResponseData().setErrorCode(Errors.ILLEGAL_SASL_STATE.code)
requestHelper.sendResponseMaybeThrottle(request, _ => new SaslHandshakeResponse(responseData))
}
def handleSaslAuthenticateRequest(request: RequestChannel.Request): Unit = {
val responseData = new SaslAuthenticateResponseData()
.setErrorCode(Errors.ILLEGAL_SASL_STATE.code)
.setErrorMessage("SaslAuthenticate request received after successful authentication")
requestHelper.sendResponseMaybeThrottle(request, _ => new SaslAuthenticateResponse(responseData))
}
def handleApiVersionsRequest(request: RequestChannel.Request): Unit = {
// Note that broker returns its full list of supported ApiKeys and versions regardless of current
// authentication state (e.g., before SASL authentication on an SASL listener, do note that no
// Kafka protocol requests may take place on an SSL listener before the SSL handshake is finished).
// If this is considered to leak information about the broker version a workaround is to use SSL
// with client authentication which is performed at an earlier stage of the connection where the
// ApiVersionRequest is not available.
def createResponseCallback(requestThrottleMs: Int): ApiVersionsResponse = {
val apiVersionRequest = request.body[ApiVersionsRequest]
if (apiVersionRequest.hasUnsupportedRequestVersion) {
apiVersionRequest.getErrorResponse(requestThrottleMs, Errors.UNSUPPORTED_VERSION.exception)
} else if (!apiVersionRequest.isValid) {
apiVersionRequest.getErrorResponse(requestThrottleMs, Errors.INVALID_REQUEST.exception)
} else {
apiVersionManager.apiVersionResponse(requestThrottleMs)
}
}
requestHelper.sendResponseMaybeThrottle(request, createResponseCallback)
}
def handleCreateTopicsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 6)
def sendResponseCallback(results: CreatableTopicResultCollection): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseData = new CreateTopicsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setTopics(results)
val responseBody = new CreateTopicsResponse(responseData)
trace(s"Sending create topics response $responseData for correlation id " +
s"${request.header.correlationId} to client ${request.header.clientId}.")
responseBody
}
requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, createResponse)
}
val createTopicsRequest = request.body[CreateTopicsRequest]
val results = new CreatableTopicResultCollection(createTopicsRequest.data.topics.size)
if (!zkSupport.controller.isActive) {
createTopicsRequest.data.topics.forEach { topic =>
results.add(new CreatableTopicResult().setName(topic.name)
.setErrorCode(Errors.NOT_CONTROLLER.code))
}
sendResponseCallback(results)
} else {
createTopicsRequest.data.topics.forEach { topic =>
results.add(new CreatableTopicResult().setName(topic.name))
}
val hasClusterAuthorization = authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME,
logIfDenied = false)
val topics = createTopicsRequest.data.topics.asScala.map(_.name)
val authorizedTopics =
if (hasClusterAuthorization) topics.toSet
else authHelper.filterByAuthorized(request.context, CREATE, TOPIC, topics)(identity)
val authorizedForDescribeConfigs = authHelper.filterByAuthorized(request.context, DESCRIBE_CONFIGS, TOPIC,
topics, logIfDenied = false)(identity).map(name => name -> results.find(name)).toMap
results.forEach { topic =>
if (results.findAll(topic.name).size > 1) {
topic.setErrorCode(Errors.INVALID_REQUEST.code)
topic.setErrorMessage("Found multiple entries for this topic.")
} else if (!authorizedTopics.contains(topic.name)) {
topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
topic.setErrorMessage("Authorization failed.")
}
if (!authorizedForDescribeConfigs.contains(topic.name)) {
topic.setTopicConfigErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
}
}
val toCreate = mutable.Map[String, CreatableTopic]()
createTopicsRequest.data.topics.forEach { topic =>
if (results.find(topic.name).errorCode == Errors.NONE.code) {
toCreate += topic.name -> topic
}
}
def handleCreateTopicsResults(errors: Map[String, ApiError]): Unit = {
errors.foreach { case (topicName, error) =>
val result = results.find(topicName)
result.setErrorCode(error.error.code)
.setErrorMessage(error.message)
// Reset any configs in the response if Create failed
if (error != ApiError.NONE) {
result.setConfigs(List.empty.asJava)
.setNumPartitions(-1)
.setReplicationFactor(-1)
.setTopicConfigErrorCode(Errors.NONE.code)
}
}
sendResponseCallback(results)
}
zkSupport.adminManager.createTopics(
createTopicsRequest.data.timeoutMs,
createTopicsRequest.data.validateOnly,
toCreate,
authorizedForDescribeConfigs,
controllerMutationQuota,
handleCreateTopicsResults)
}
}
def handleCreatePartitionsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val createPartitionsRequest = request.body[CreatePartitionsRequest]
val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 3)
def sendResponseCallback(results: Map[String, ApiError]): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val createPartitionsResults = results.map {
case (topic, error) => new CreatePartitionsTopicResult()
.setName(topic)
.setErrorCode(error.error.code)
.setErrorMessage(error.message)
}.toSeq
val responseBody = new CreatePartitionsResponse(new CreatePartitionsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setResults(createPartitionsResults.asJava))
trace(s"Sending create partitions response $responseBody for correlation id ${request.header.correlationId} to " +
s"client ${request.header.clientId}.")
responseBody
}
requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, createResponse)
}
if (!zkSupport.controller.isActive) {
val result = createPartitionsRequest.data.topics.asScala.map { topic =>
(topic.name, new ApiError(Errors.NOT_CONTROLLER, null))
}.toMap
sendResponseCallback(result)
} else {
// Special handling to add duplicate topics to the response
val topics = createPartitionsRequest.data.topics.asScala.toSeq
val dupes = topics.groupBy(_.name)
.filter { _._2.size > 1 }
.keySet
val notDuped = topics.filterNot(topic => dupes.contains(topic.name))
val (authorized, unauthorized) = authHelper.partitionSeqByAuthorized(request.context, ALTER, TOPIC,
notDuped)(_.name)
val (queuedForDeletion, valid) = authorized.partition { topic =>
zkSupport.controller.topicDeletionManager.isTopicQueuedUpForDeletion(topic.name)
}
val errors = dupes.map(_ -> new ApiError(Errors.INVALID_REQUEST, "Duplicate topic in request.")) ++
unauthorized.map(_.name -> new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "The topic authorization is failed.")) ++
queuedForDeletion.map(_.name -> new ApiError(Errors.INVALID_TOPIC_EXCEPTION, "The topic is queued for deletion."))
zkSupport.adminManager.createPartitions(
createPartitionsRequest.data.timeoutMs,
valid,
createPartitionsRequest.data.validateOnly,
controllerMutationQuota,
result => sendResponseCallback(result ++ errors))
}
}
def handleDeleteTopicsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 5)
def sendResponseCallback(results: DeletableTopicResultCollection): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseData = new DeleteTopicsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setResponses(results)
val responseBody = new DeleteTopicsResponse(responseData)
trace(s"Sending delete topics response $responseBody for correlation id ${request.header.correlationId} to client ${request.header.clientId}.")
responseBody
}
requestHelper.sendResponseMaybeThrottleWithControllerQuota(controllerMutationQuota, request, createResponse)
}
val deleteTopicRequest = request.body[DeleteTopicsRequest]
val results = new DeletableTopicResultCollection(deleteTopicRequest.numberOfTopics())
val toDelete = mutable.Set[String]()
if (!zkSupport.controller.isActive) {
deleteTopicRequest.topics().forEach { topic =>
results.add(new DeletableTopicResult()
.setName(topic.name())
.setTopicId(topic.topicId())
.setErrorCode(Errors.NOT_CONTROLLER.code))
}
sendResponseCallback(results)
} else if (!config.deleteTopicEnable) {
val error = if (request.context.apiVersion < 3) Errors.INVALID_REQUEST else Errors.TOPIC_DELETION_DISABLED
deleteTopicRequest.topics().forEach { topic =>
results.add(new DeletableTopicResult()
.setName(topic.name())
.setTopicId(topic.topicId())
.setErrorCode(error.code))
}
sendResponseCallback(results)
} else {
val topicIdsFromRequest = deleteTopicRequest.topicIds().asScala.filter(topicId => topicId != Uuid.ZERO_UUID).toSet
deleteTopicRequest.topics().forEach { topic =>
if (topic.name() != null && topic.topicId() != Uuid.ZERO_UUID)
throw new InvalidRequestException("Topic name and topic ID can not both be specified.")
val name = if (topic.topicId() == Uuid.ZERO_UUID) topic.name()
else zkSupport.controller.controllerContext.topicName(topic.topicId).orNull
results.add(new DeletableTopicResult()
.setName(name)
.setTopicId(topic.topicId()))
}
val authorizedDescribeTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC,
results.asScala.filter(result => result.name() != null))(_.name)
val authorizedDeleteTopics = authHelper.filterByAuthorized(request.context, DELETE, TOPIC,
results.asScala.filter(result => result.name() != null))(_.name)
results.forEach { topic =>
val unresolvedTopicId = topic.topicId() != Uuid.ZERO_UUID && topic.name() == null
if (unresolvedTopicId) {
topic.setErrorCode(Errors.UNKNOWN_TOPIC_ID.code)
} else if (topicIdsFromRequest.contains(topic.topicId) && !authorizedDescribeTopics.contains(topic.name)) {
// Because the client does not have Describe permission, the name should
// not be returned in the response. Note, however, that we do not consider
// the topicId itself to be sensitive, so there is no reason to obscure
// this case with `UNKNOWN_TOPIC_ID`.
topic.setName(null)
topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
} else if (!authorizedDeleteTopics.contains(topic.name)) {
topic.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
} else if (!metadataCache.contains(topic.name)) {
topic.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
} else {
toDelete += topic.name
}
}
// If no authorized topics return immediately
if (toDelete.isEmpty)
sendResponseCallback(results)
else {
def handleDeleteTopicsResults(errors: Map[String, Errors]): Unit = {
errors.foreach {
case (topicName, error) =>
results.find(topicName)
.setErrorCode(error.code)
}
sendResponseCallback(results)
}
zkSupport.adminManager.deleteTopics(
deleteTopicRequest.data.timeoutMs,
toDelete,
controllerMutationQuota,
handleDeleteTopicsResults
)
}
}
}
def handleDeleteRecordsRequest(request: RequestChannel.Request): Unit = {
val deleteRecordsRequest = request.body[DeleteRecordsRequest]
val unauthorizedTopicResponses = mutable.Map[TopicPartition, DeleteRecordsPartitionResult]()
val nonExistingTopicResponses = mutable.Map[TopicPartition, DeleteRecordsPartitionResult]()
val authorizedForDeleteTopicOffsets = mutable.Map[TopicPartition, Long]()
val topics = deleteRecordsRequest.data.topics.asScala
val authorizedTopics = authHelper.filterByAuthorized(request.context, DELETE, TOPIC, topics)(_.name)
val deleteTopicPartitions = topics.flatMap { deleteTopic =>
deleteTopic.partitions.asScala.map { deletePartition =>
new TopicPartition(deleteTopic.name, deletePartition.partitionIndex) -> deletePartition.offset
}
}
for ((topicPartition, offset) <- deleteTopicPartitions) {
if (!authorizedTopics.contains(topicPartition.topic))
unauthorizedTopicResponses += topicPartition -> new DeleteRecordsPartitionResult()
.setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK)
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicResponses += topicPartition -> new DeleteRecordsPartitionResult()
.setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK)
.setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
else
authorizedForDeleteTopicOffsets += (topicPartition -> offset)
}
// the callback for sending a DeleteRecordsResponse
def sendResponseCallback(authorizedTopicResponses: Map[TopicPartition, DeleteRecordsPartitionResult]): Unit = {
val mergedResponseStatus = authorizedTopicResponses ++ unauthorizedTopicResponses ++ nonExistingTopicResponses
mergedResponseStatus.forKeyValue { (topicPartition, status) =>
if (status.errorCode != Errors.NONE.code) {
debug("DeleteRecordsRequest with correlation id %d from client %s on partition %s failed due to %s".format(
request.header.correlationId,
request.header.clientId,
topicPartition,
Errors.forCode(status.errorCode).exceptionName))
}
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DeleteRecordsResponse(new DeleteRecordsResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setTopics(new DeleteRecordsResponseData.DeleteRecordsTopicResultCollection(mergedResponseStatus.groupBy(_._1.topic).map { case (topic, partitionMap) => {
new DeleteRecordsTopicResult()
.setName(topic)
.setPartitions(new DeleteRecordsResponseData.DeleteRecordsPartitionResultCollection(partitionMap.map { case (topicPartition, partitionResult) => {
new DeleteRecordsPartitionResult().setPartitionIndex(topicPartition.partition)
.setLowWatermark(partitionResult.lowWatermark)
.setErrorCode(partitionResult.errorCode)
}
}.toList.asJava.iterator()))
}
}.toList.asJava.iterator()))))
}
if (authorizedForDeleteTopicOffsets.isEmpty)
sendResponseCallback(Map.empty)
else {
// call the replica manager to append messages to the replicas
replicaManager.deleteRecords(
deleteRecordsRequest.data.timeoutMs.toLong,
authorizedForDeleteTopicOffsets,
sendResponseCallback)
}
}
def handleInitProducerIdRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val initProducerIdRequest = request.body[InitProducerIdRequest]
val transactionalId = initProducerIdRequest.data.transactionalId
if (transactionalId != null) {
if (!authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, transactionalId)) {
requestHelper.sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
return
}
} else if (!authHelper.authorize(request.context, IDEMPOTENT_WRITE, CLUSTER, CLUSTER_NAME, true, false)
&& !authHelper.authorizeByResourceType(request.context, AclOperation.WRITE, ResourceType.TOPIC)) {
requestHelper.sendErrorResponseMaybeThrottle(request, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)
return
}
def sendResponseCallback(result: InitProducerIdResult): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val finalError =
if (initProducerIdRequest.version < 4 && result.error == Errors.PRODUCER_FENCED) {
// For older clients, they could not understand the new PRODUCER_FENCED error code,
// so we need to return the INVALID_PRODUCER_EPOCH to have the same client handling logic.
Errors.INVALID_PRODUCER_EPOCH
} else {
result.error
}
val responseData = new InitProducerIdResponseData()
.setProducerId(result.producerId)
.setProducerEpoch(result.producerEpoch)
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(finalError.code)
val responseBody = new InitProducerIdResponse(responseData)
trace(s"Completed $transactionalId's InitProducerIdRequest with result $result from client ${request.header.clientId}.")
responseBody
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
val producerIdAndEpoch = (initProducerIdRequest.data.producerId, initProducerIdRequest.data.producerEpoch) match {
case (RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH) => Right(None)
case (RecordBatch.NO_PRODUCER_ID, _) | (_, RecordBatch.NO_PRODUCER_EPOCH) => Left(Errors.INVALID_REQUEST)
case (_, _) => Right(Some(new ProducerIdAndEpoch(initProducerIdRequest.data.producerId, initProducerIdRequest.data.producerEpoch)))
}
producerIdAndEpoch match {
case Right(producerIdAndEpoch) => txnCoordinator.handleInitProducerId(transactionalId, initProducerIdRequest.data.transactionTimeoutMs,
producerIdAndEpoch, sendResponseCallback, requestLocal)
case Left(error) => requestHelper.sendErrorResponseMaybeThrottle(request, error.exception)
}
}
def handleEndTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val endTxnRequest = request.body[EndTxnRequest]
val transactionalId = endTxnRequest.data.transactionalId
if (authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, transactionalId)) {
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val finalError =
if (endTxnRequest.version < 2 && error == Errors.PRODUCER_FENCED) {
// For older clients, they could not understand the new PRODUCER_FENCED error code,
// so we need to return the INVALID_PRODUCER_EPOCH to have the same client handling logic.
Errors.INVALID_PRODUCER_EPOCH
} else {
error
}
val responseBody = new EndTxnResponse(new EndTxnResponseData()
.setErrorCode(finalError.code)
.setThrottleTimeMs(requestThrottleMs))
trace(s"Completed ${endTxnRequest.data.transactionalId}'s EndTxnRequest " +
s"with committed: ${endTxnRequest.data.committed}, " +
s"errors: $error from client ${request.header.clientId}.")
responseBody
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleEndTransaction(endTxnRequest.data.transactionalId,
endTxnRequest.data.producerId,
endTxnRequest.data.producerEpoch,
endTxnRequest.result(),
sendResponseCallback,
requestLocal)
} else
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new EndTxnResponse(new EndTxnResponseData()
.setErrorCode(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.code)
.setThrottleTimeMs(requestThrottleMs))
)
}
def handleWriteTxnMarkersRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
val writeTxnMarkersRequest = request.body[WriteTxnMarkersRequest]
val errors = new ConcurrentHashMap[java.lang.Long, util.Map[TopicPartition, Errors]]()
val markers = writeTxnMarkersRequest.markers
val numAppends = new AtomicInteger(markers.size)
if (numAppends.get == 0) {
requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
return
}
def updateErrors(producerId: Long, currentErrors: ConcurrentHashMap[TopicPartition, Errors]): Unit = {
val previousErrors = errors.putIfAbsent(producerId, currentErrors)
if (previousErrors != null)
previousErrors.putAll(currentErrors)
}
/**
* This is the call back invoked when a log append of transaction markers succeeds. This can be called multiple
* times when handling a single WriteTxnMarkersRequest because there is one append per TransactionMarker in the
* request, so there could be multiple appends of markers to the log. The final response will be sent only
* after all appends have returned.
*/
def maybeSendResponseCallback(producerId: Long, result: TransactionResult)(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = {
trace(s"End transaction marker append for producer id $producerId completed with status: $responseStatus")
val currentErrors = new ConcurrentHashMap[TopicPartition, Errors](responseStatus.map { case (k, v) => k -> v.error }.asJava)
updateErrors(producerId, currentErrors)
val successfulOffsetsPartitions = responseStatus.filter { case (topicPartition, partitionResponse) =>
topicPartition.topic == GROUP_METADATA_TOPIC_NAME && partitionResponse.error == Errors.NONE
}.keys
if (successfulOffsetsPartitions.nonEmpty) {
// as soon as the end transaction marker has been written for a transactional offset commit,
// call to the group coordinator to materialize the offsets into the cache
try {
groupCoordinator.scheduleHandleTxnCompletion(producerId, successfulOffsetsPartitions, result)
} catch {
case e: Exception =>
error(s"Received an exception while trying to update the offsets cache on transaction marker append", e)
val updatedErrors = new ConcurrentHashMap[TopicPartition, Errors]()
successfulOffsetsPartitions.foreach(updatedErrors.put(_, Errors.UNKNOWN_SERVER_ERROR))
updateErrors(producerId, updatedErrors)
}
}
if (numAppends.decrementAndGet() == 0)
requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
}
// TODO: The current append API makes doing separate writes per producerId a little easier, but it would
// be nice to have only one append to the log. This requires pushing the building of the control records
// into Log so that we only append those having a valid producer epoch, and exposing a new appendControlRecord
// API in ReplicaManager. For now, we've done the simpler approach
var skippedMarkers = 0
for (marker <- markers.asScala) {
val producerId = marker.producerId
val partitionsWithCompatibleMessageFormat = new mutable.ArrayBuffer[TopicPartition]
val currentErrors = new ConcurrentHashMap[TopicPartition, Errors]()
marker.partitions.forEach { partition =>
replicaManager.getMagic(partition) match {
case Some(magic) =>
if (magic < RecordBatch.MAGIC_VALUE_V2)
currentErrors.put(partition, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT)
else
partitionsWithCompatibleMessageFormat += partition
case None =>
currentErrors.put(partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
}
}
if (!currentErrors.isEmpty)
updateErrors(producerId, currentErrors)
if (partitionsWithCompatibleMessageFormat.isEmpty) {
numAppends.decrementAndGet()
skippedMarkers += 1
} else {
val controlRecords = partitionsWithCompatibleMessageFormat.map { partition =>
val controlRecordType = marker.transactionResult match {
case TransactionResult.COMMIT => ControlRecordType.COMMIT
case TransactionResult.ABORT => ControlRecordType.ABORT
}
val endTxnMarker = new EndTransactionMarker(controlRecordType, marker.coordinatorEpoch)
partition -> MemoryRecords.withEndTransactionMarker(producerId, marker.producerEpoch, endTxnMarker)
}.toMap
replicaManager.appendRecords(
timeout = config.requestTimeoutMs.toLong,
requiredAcks = -1,
internalTopicsAllowed = true,
origin = AppendOrigin.Coordinator,
entriesPerPartition = controlRecords,
requestLocal = requestLocal,
responseCallback = maybeSendResponseCallback(producerId, marker.transactionResult))
}
}
// No log appends were written as all partitions had incorrect log format
// so we need to send the error response
if (skippedMarkers == markers.size)
requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
}
def ensureInterBrokerVersion(version: ApiVersion): Unit = {
if (config.interBrokerProtocolVersion < version)
throw new UnsupportedVersionException(s"inter.broker.protocol.version: ${config.interBrokerProtocolVersion.version} is less than the required version: ${version.version}")
}
def handleAddPartitionToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val addPartitionsToTxnRequest = request.body[AddPartitionsToTxnRequest]
val transactionalId = addPartitionsToTxnRequest.data.transactionalId
val partitionsToAdd = addPartitionsToTxnRequest.partitions.asScala
if (!authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, transactionalId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
addPartitionsToTxnRequest.getErrorResponse(requestThrottleMs, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception))
else {
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
val authorizedPartitions = mutable.Set[TopicPartition]()
val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC,
partitionsToAdd.filterNot(tp => Topic.isInternal(tp.topic)))(_.topic)
for (topicPartition <- partitionsToAdd) {
if (!authorizedTopics.contains(topicPartition.topic))
unauthorizedTopicErrors += topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION
else
authorizedPartitions.add(topicPartition)
}
if (unauthorizedTopicErrors.nonEmpty || nonExistingTopicErrors.nonEmpty) {
// Any failed partition check causes the entire request to fail. We send the appropriate error codes for the
// partitions which failed, and an 'OPERATION_NOT_ATTEMPTED' error code for the partitions which succeeded
// the authorization check to indicate that they were not added to the transaction.
val partitionErrors = unauthorizedTopicErrors ++ nonExistingTopicErrors ++
authorizedPartitions.map(_ -> Errors.OPERATION_NOT_ATTEMPTED)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddPartitionsToTxnResponse(requestThrottleMs, partitionErrors.asJava))
} else {
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val finalError =
if (addPartitionsToTxnRequest.version < 2 && error == Errors.PRODUCER_FENCED) {
// For older clients, they could not understand the new PRODUCER_FENCED error code,
// so we need to return the old INVALID_PRODUCER_EPOCH to have the same client handling logic.
Errors.INVALID_PRODUCER_EPOCH
} else {
error
}
val responseBody: AddPartitionsToTxnResponse = new AddPartitionsToTxnResponse(requestThrottleMs,
partitionsToAdd.map{tp => (tp, finalError)}.toMap.asJava)
trace(s"Completed $transactionalId's AddPartitionsToTxnRequest with partitions $partitionsToAdd: errors: $error from client ${request.header.clientId}")
responseBody
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleAddPartitionsToTransaction(transactionalId,
addPartitionsToTxnRequest.data.producerId,
addPartitionsToTxnRequest.data.producerEpoch,
authorizedPartitions,
sendResponseCallback,
requestLocal)
}
}
}
def handleAddOffsetsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest]
val transactionalId = addOffsetsToTxnRequest.data.transactionalId
val groupId = addOffsetsToTxnRequest.data.groupId
val offsetTopicPartition = new TopicPartition(GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId))
if (!authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, transactionalId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData()
.setErrorCode(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.code)
.setThrottleTimeMs(requestThrottleMs)))
else if (!authHelper.authorize(request.context, READ, GROUP, groupId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData()
.setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)
.setThrottleTimeMs(requestThrottleMs))
)
else {
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val finalError =
if (addOffsetsToTxnRequest.version < 2 && error == Errors.PRODUCER_FENCED) {
// For older clients, they could not understand the new PRODUCER_FENCED error code,
// so we need to return the old INVALID_PRODUCER_EPOCH to have the same client handling logic.
Errors.INVALID_PRODUCER_EPOCH
} else {
error
}
val responseBody: AddOffsetsToTxnResponse = new AddOffsetsToTxnResponse(
new AddOffsetsToTxnResponseData()
.setErrorCode(finalError.code)
.setThrottleTimeMs(requestThrottleMs))
trace(s"Completed $transactionalId's AddOffsetsToTxnRequest for group $groupId on partition " +
s"$offsetTopicPartition: errors: $error from client ${request.header.clientId}")
responseBody
}
requestHelper.sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleAddPartitionsToTransaction(transactionalId,
addOffsetsToTxnRequest.data.producerId,
addOffsetsToTxnRequest.data.producerEpoch,
Set(offsetTopicPartition),
sendResponseCallback,
requestLocal)
}
}
def handleTxnOffsetCommitRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val header = request.header
val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest]
// authorize for the transactionalId and the consumer group. Note that we skip producerId authorization
// since it is implied by transactionalId authorization
if (!authHelper.authorize(request.context, WRITE, TRANSACTIONAL_ID, txnOffsetCommitRequest.data.transactionalId))
requestHelper.sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
else if (!authHelper.authorize(request.context, READ, GROUP, txnOffsetCommitRequest.data.groupId))
requestHelper.sendErrorResponseMaybeThrottle(request, Errors.GROUP_AUTHORIZATION_FAILED.exception)
else {
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
val authorizedTopicCommittedOffsets = mutable.Map[TopicPartition, TxnOffsetCommitRequest.CommittedOffset]()
val committedOffsets = txnOffsetCommitRequest.offsets.asScala
val authorizedTopics = authHelper.filterByAuthorized(request.context, READ, TOPIC, committedOffsets)(_._1.topic)
for ((topicPartition, commitedOffset) <- committedOffsets) {
if (!authorizedTopics.contains(topicPartition.topic))
unauthorizedTopicErrors += topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION
else
authorizedTopicCommittedOffsets += (topicPartition -> commitedOffset)
}
// the callback for sending an offset commit response
def sendResponseCallback(authorizedTopicErrors: Map[TopicPartition, Errors]): Unit = {
val combinedCommitStatus = mutable.Map() ++= authorizedTopicErrors ++= unauthorizedTopicErrors ++= nonExistingTopicErrors
if (isDebugEnabled)
combinedCommitStatus.forKeyValue { (topicPartition, error) =>
if (error != Errors.NONE) {
debug(s"TxnOffsetCommit with correlation id ${header.correlationId} from client ${header.clientId} " +
s"on partition $topicPartition failed due to ${error.exceptionName}")
}
}
// We need to replace COORDINATOR_LOAD_IN_PROGRESS with COORDINATOR_NOT_AVAILABLE
// for older producer client from 0.11 to prior 2.0, which could potentially crash due
// to unexpected loading error. This bug is fixed later by KAFKA-7296. Clients using
// txn commit protocol >= 2 (version 2.3 and onwards) are guaranteed to have
// the fix to check for the loading error.
if (txnOffsetCommitRequest.version < 2) {
combinedCommitStatus ++= combinedCommitStatus.collect {
case (tp, error) if error == Errors.COORDINATOR_LOAD_IN_PROGRESS => tp -> Errors.COORDINATOR_NOT_AVAILABLE
}
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new TxnOffsetCommitResponse(requestThrottleMs, combinedCommitStatus.asJava))
}
if (authorizedTopicCommittedOffsets.isEmpty)
sendResponseCallback(Map.empty)
else {
val offsetMetadata = convertTxnOffsets(authorizedTopicCommittedOffsets.toMap)
groupCoordinator.handleTxnCommitOffsets(
txnOffsetCommitRequest.data.groupId,
txnOffsetCommitRequest.data.producerId,
txnOffsetCommitRequest.data.producerEpoch,
txnOffsetCommitRequest.data.memberId,
Option(txnOffsetCommitRequest.data.groupInstanceId),
txnOffsetCommitRequest.data.generationId,
offsetMetadata,
sendResponseCallback,
requestLocal)
}
}
}
private def convertTxnOffsets(offsetsMap: immutable.Map[TopicPartition, TxnOffsetCommitRequest.CommittedOffset]): immutable.Map[TopicPartition, OffsetAndMetadata] = {
val currentTimestamp = time.milliseconds
offsetsMap.map { case (topicPartition, partitionData) =>
val metadata = if (partitionData.metadata == null) OffsetAndMetadata.NoMetadata else partitionData.metadata
topicPartition -> new OffsetAndMetadata(
offset = partitionData.offset,
leaderEpoch = partitionData.leaderEpoch,
metadata = metadata,
commitTimestamp = currentTimestamp,
expireTimestamp = None)
}
}
def handleDescribeAcls(request: RequestChannel.Request): Unit = {
aclApis.handleDescribeAcls(request)
}
def handleCreateAcls(request: RequestChannel.Request): Unit = {
metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
aclApis.handleCreateAcls(request)
}
def handleDeleteAcls(request: RequestChannel.Request): Unit = {
metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
aclApis.handleDeleteAcls(request)
}
def handleOffsetForLeaderEpochRequest(request: RequestChannel.Request): Unit = {
val offsetForLeaderEpoch = request.body[OffsetsForLeaderEpochRequest]
val topics = offsetForLeaderEpoch.data.topics.asScala.toSeq
// The OffsetsForLeaderEpoch API was initially only used for inter-broker communication and required
// cluster permission. With KIP-320, the consumer now also uses this API to check for log truncation
// following a leader change, so we also allow topic describe permission.
val (authorizedTopics, unauthorizedTopics) =
if (authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME, logIfDenied = false))
(topics, Seq.empty[OffsetForLeaderTopic])
else authHelper.partitionSeqByAuthorized(request.context, DESCRIBE, TOPIC, topics)(_.topic)
val endOffsetsForAuthorizedPartitions = replicaManager.lastOffsetForLeaderEpoch(authorizedTopics)
val endOffsetsForUnauthorizedPartitions = unauthorizedTopics.map { offsetForLeaderTopic =>
val partitions = offsetForLeaderTopic.partitions.asScala.map { offsetForLeaderPartition =>
new EpochEndOffset()
.setPartition(offsetForLeaderPartition.partition)
.setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)
}
new OffsetForLeaderTopicResult()
.setTopic(offsetForLeaderTopic.topic)
.setPartitions(partitions.toList.asJava)
}
val endOffsetsForAllTopics = new OffsetForLeaderTopicResultCollection(
(endOffsetsForAuthorizedPartitions ++ endOffsetsForUnauthorizedPartitions).asJava.iterator
)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setTopics(endOffsetsForAllTopics)))
}
def handleAlterConfigsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val alterConfigsRequest = request.body[AlterConfigsRequest]
val (authorizedResources, unauthorizedResources) = alterConfigsRequest.configs.asScala.toMap.partition { case (resource, _) =>
resource.`type` match {
case ConfigResource.Type.BROKER_LOGGER =>
throw new InvalidRequestException(s"AlterConfigs is deprecated and does not support the resource type ${ConfigResource.Type.BROKER_LOGGER}")
case ConfigResource.Type.BROKER =>
authHelper.authorize(request.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME)
case ConfigResource.Type.TOPIC =>
authHelper.authorize(request.context, ALTER_CONFIGS, TOPIC, resource.name)
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt")
}
}
val authorizedResult = zkSupport.adminManager.alterConfigs(authorizedResources, alterConfigsRequest.validateOnly)
val unauthorizedResult = unauthorizedResources.keys.map { resource =>
resource -> configsAuthorizationApiError(resource)
}
def responseCallback(requestThrottleMs: Int): AlterConfigsResponse = {
val data = new AlterConfigsResponseData()
.setThrottleTimeMs(requestThrottleMs)
(authorizedResult ++ unauthorizedResult).foreach{ case (resource, error) =>
data.responses().add(new AlterConfigsResourceResponse()
.setErrorCode(error.error.code)
.setErrorMessage(error.message)
.setResourceName(resource.name)
.setResourceType(resource.`type`.id))
}
new AlterConfigsResponse(data)
}
requestHelper.sendResponseMaybeThrottle(request, responseCallback)
}
def handleAlterPartitionReassignmentsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
authHelper.authorizeClusterOperation(request, ALTER)
val alterPartitionReassignmentsRequest = request.body[AlterPartitionReassignmentsRequest]
def sendResponseCallback(result: Either[Map[TopicPartition, ApiError], ApiError]): Unit = {
val responseData = result match {
case Right(topLevelError) =>
new AlterPartitionReassignmentsResponseData().setErrorMessage(topLevelError.message).setErrorCode(topLevelError.error.code)
case Left(assignments) =>
val topicResponses = assignments.groupBy(_._1.topic).map {
case (topic, reassignmentsByTp) =>
val partitionResponses = reassignmentsByTp.map {
case (topicPartition, error) =>
new ReassignablePartitionResponse().setPartitionIndex(topicPartition.partition)
.setErrorCode(error.error.code).setErrorMessage(error.message)
}
new ReassignableTopicResponse().setName(topic).setPartitions(partitionResponses.toList.asJava)
}
new AlterPartitionReassignmentsResponseData().setResponses(topicResponses.toList.asJava)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterPartitionReassignmentsResponse(responseData.setThrottleTimeMs(requestThrottleMs))
)
}
val reassignments = alterPartitionReassignmentsRequest.data.topics.asScala.flatMap {
reassignableTopic => reassignableTopic.partitions.asScala.map {
reassignablePartition =>
val tp = new TopicPartition(reassignableTopic.name, reassignablePartition.partitionIndex)
if (reassignablePartition.replicas == null)
tp -> None // revert call
else
tp -> Some(reassignablePartition.replicas.asScala.map(_.toInt))
}
}.toMap
zkSupport.controller.alterPartitionReassignments(reassignments, sendResponseCallback)
}
def handleListPartitionReassignmentsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
authHelper.authorizeClusterOperation(request, DESCRIBE)
val listPartitionReassignmentsRequest = request.body[ListPartitionReassignmentsRequest]
def sendResponseCallback(result: Either[Map[TopicPartition, ReplicaAssignment], ApiError]): Unit = {
val responseData = result match {
case Right(error) => new ListPartitionReassignmentsResponseData().setErrorMessage(error.message).setErrorCode(error.error.code)
case Left(assignments) =>
val topicReassignments = assignments.groupBy(_._1.topic).map {
case (topic, reassignmentsByTp) =>
val partitionReassignments = reassignmentsByTp.map {
case (topicPartition, assignment) =>
new ListPartitionReassignmentsResponseData.OngoingPartitionReassignment()
.setPartitionIndex(topicPartition.partition)
.setAddingReplicas(assignment.addingReplicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]])
.setRemovingReplicas(assignment.removingReplicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]])
.setReplicas(assignment.replicas.toList.asJava.asInstanceOf[java.util.List[java.lang.Integer]])
}.toList
new ListPartitionReassignmentsResponseData.OngoingTopicReassignment().setName(topic)
.setPartitions(partitionReassignments.asJava)
}.toList
new ListPartitionReassignmentsResponseData().setTopics(topicReassignments.asJava)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new ListPartitionReassignmentsResponse(responseData.setThrottleTimeMs(requestThrottleMs))
)
}
val partitionsOpt = listPartitionReassignmentsRequest.data.topics match {
case topics: Any =>
Some(topics.iterator().asScala.flatMap { topic =>
topic.partitionIndexes.iterator().asScala
.map { tp => new TopicPartition(topic.name(), tp) }
}.toSet)
case _ => None
}
zkSupport.controller.listPartitionReassignments(partitionsOpt, sendResponseCallback)
}
private def configsAuthorizationApiError(resource: ConfigResource): ApiError = {
val error = resource.`type` match {
case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER => Errors.CLUSTER_AUTHORIZATION_FAILED
case ConfigResource.Type.TOPIC => Errors.TOPIC_AUTHORIZATION_FAILED
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.name}")
}
new ApiError(error, null)
}
def handleIncrementalAlterConfigsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val alterConfigsRequest = request.body[IncrementalAlterConfigsRequest]
val configs = alterConfigsRequest.data.resources.iterator.asScala.map { alterConfigResource =>
val configResource = new ConfigResource(ConfigResource.Type.forId(alterConfigResource.resourceType),
alterConfigResource.resourceName)
configResource -> alterConfigResource.configs.iterator.asScala.map {
alterConfig => new AlterConfigOp(new ConfigEntry(alterConfig.name, alterConfig.value),
OpType.forId(alterConfig.configOperation))
}.toBuffer
}.toMap
val (authorizedResources, unauthorizedResources) = configs.partition { case (resource, _) =>
resource.`type` match {
case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER =>
authHelper.authorize(request.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME)
case ConfigResource.Type.TOPIC =>
authHelper.authorize(request.context, ALTER_CONFIGS, TOPIC, resource.name)
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt")
}
}
val authorizedResult = zkSupport.adminManager.incrementalAlterConfigs(authorizedResources, alterConfigsRequest.data.validateOnly)
val unauthorizedResult = unauthorizedResources.keys.map { resource =>
resource -> configsAuthorizationApiError(resource)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new IncrementalAlterConfigsResponse(
requestThrottleMs, (authorizedResult ++ unauthorizedResult).asJava))
}
def handleDescribeConfigsRequest(request: RequestChannel.Request): Unit = {
val describeConfigsRequest = request.body[DescribeConfigsRequest]
val (authorizedResources, unauthorizedResources) = describeConfigsRequest.data.resources.asScala.partition { resource =>
ConfigResource.Type.forId(resource.resourceType) match {
case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER =>
authHelper.authorize(request.context, DESCRIBE_CONFIGS, CLUSTER, CLUSTER_NAME)
case ConfigResource.Type.TOPIC =>
authHelper.authorize(request.context, DESCRIBE_CONFIGS, TOPIC, resource.resourceName)
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.resourceName}")
}
}
val authorizedConfigs = configHelper.describeConfigs(authorizedResources.toList, describeConfigsRequest.data.includeSynonyms, describeConfigsRequest.data.includeDocumentation)
val unauthorizedConfigs = unauthorizedResources.map { resource =>
val error = ConfigResource.Type.forId(resource.resourceType) match {
case ConfigResource.Type.BROKER | ConfigResource.Type.BROKER_LOGGER => Errors.CLUSTER_AUTHORIZATION_FAILED
case ConfigResource.Type.TOPIC => Errors.TOPIC_AUTHORIZATION_FAILED
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.resourceName}")
}
new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(error.code)
.setErrorMessage(error.message)
.setConfigs(Collections.emptyList[DescribeConfigsResponseData.DescribeConfigsResourceResult])
.setResourceName(resource.resourceName)
.setResourceType(resource.resourceType)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeConfigsResponse(new DescribeConfigsResponseData().setThrottleTimeMs(requestThrottleMs)
.setResults((authorizedConfigs ++ unauthorizedConfigs).asJava)))
}
def handleAlterReplicaLogDirsRequest(request: RequestChannel.Request): Unit = {
val alterReplicaDirsRequest = request.body[AlterReplicaLogDirsRequest]
if (authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
val result = replicaManager.alterReplicaLogDirs(alterReplicaDirsRequest.partitionDirs.asScala)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData()
.setResults(result.groupBy(_._1.topic).map {
case (topic, errors) => new AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult()
.setTopicName(topic)
.setPartitions(errors.map {
case (tp, error) => new AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult()
.setPartitionIndex(tp.partition)
.setErrorCode(error.code)
}.toList.asJava)
}.toList.asJava)
.setThrottleTimeMs(requestThrottleMs)))
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
alterReplicaDirsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
}
}
def handleDescribeLogDirsRequest(request: RequestChannel.Request): Unit = {
val describeLogDirsDirRequest = request.body[DescribeLogDirsRequest]
val logDirInfos = {
if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME)) {
val partitions =
if (describeLogDirsDirRequest.isAllTopicPartitions)
replicaManager.logManager.allLogs.map(_.topicPartition).toSet
else
describeLogDirsDirRequest.data.topics.asScala.flatMap(
logDirTopic => logDirTopic.partitions.asScala.map(partitionIndex =>
new TopicPartition(logDirTopic.topic, partitionIndex))).toSet
replicaManager.describeLogDirs(partitions)
} else {
List.empty[DescribeLogDirsResponseData.DescribeLogDirsResult]
}
}
requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => new DescribeLogDirsResponse(new DescribeLogDirsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setResults(logDirInfos.asJava)))
}
def handleCreateTokenRequest(request: RequestChannel.Request): Unit = {
metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val createTokenRequest = request.body[CreateDelegationTokenRequest]
// the callback for sending a create token response
def sendResponseCallback(createResult: CreateTokenResult): Unit = {
trace(s"Sending create token response for correlation id ${request.header.correlationId} " +
s"to client ${request.header.clientId}.")
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
CreateDelegationTokenResponse.prepareResponse(requestThrottleMs, createResult.error, request.context.principal, createResult.issueTimestamp,
createResult.expiryTimestamp, createResult.maxTimestamp, createResult.tokenId, ByteBuffer.wrap(createResult.hmac)))
}
if (!allowTokenRequests(request))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
CreateDelegationTokenResponse.prepareResponse(requestThrottleMs, Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, request.context.principal))
else {
val renewerList = createTokenRequest.data.renewers.asScala.toList.map(entry =>
new KafkaPrincipal(entry.principalType, entry.principalName))
if (renewerList.exists(principal => principal.getPrincipalType != KafkaPrincipal.USER_TYPE)) {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
CreateDelegationTokenResponse.prepareResponse(requestThrottleMs, Errors.INVALID_PRINCIPAL_TYPE, request.context.principal))
}
else {
tokenManager.createToken(
request.context.principal,
renewerList,
createTokenRequest.data.maxLifetimeMs,
sendResponseCallback
)
}
}
}
def handleRenewTokenRequest(request: RequestChannel.Request): Unit = {
metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val renewTokenRequest = request.body[RenewDelegationTokenRequest]
// the callback for sending a renew token response
def sendResponseCallback(error: Errors, expiryTimestamp: Long): Unit = {
trace("Sending renew token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new RenewDelegationTokenResponse(
new RenewDelegationTokenResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(error.code)
.setExpiryTimestampMs(expiryTimestamp)))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, DelegationTokenManager.ErrorTimestamp)
else {
tokenManager.renewToken(
request.context.principal,
ByteBuffer.wrap(renewTokenRequest.data.hmac),
renewTokenRequest.data.renewPeriodMs,
sendResponseCallback
)
}
}
def handleExpireTokenRequest(request: RequestChannel.Request): Unit = {
metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val expireTokenRequest = request.body[ExpireDelegationTokenRequest]
// the callback for sending a expire token response
def sendResponseCallback(error: Errors, expiryTimestamp: Long): Unit = {
trace("Sending expire token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new ExpireDelegationTokenResponse(
new ExpireDelegationTokenResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setErrorCode(error.code)
.setExpiryTimestampMs(expiryTimestamp)))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, DelegationTokenManager.ErrorTimestamp)
else {
tokenManager.expireToken(
request.context.principal,
expireTokenRequest.hmac(),
expireTokenRequest.expiryTimePeriod(),
sendResponseCallback
)
}
}
def handleDescribeTokensRequest(request: RequestChannel.Request): Unit = {
val describeTokenRequest = request.body[DescribeDelegationTokenRequest]
// the callback for sending a describe token response
def sendResponseCallback(error: Errors, tokenDetails: List[DelegationToken]): Unit = {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeDelegationTokenResponse(requestThrottleMs, error, tokenDetails.asJava))
trace("Sending describe token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, List.empty)
else if (!config.tokenAuthEnabled)
sendResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, List.empty)
else {
val requestPrincipal = request.context.principal
if (describeTokenRequest.ownersListEmpty()) {
sendResponseCallback(Errors.NONE, List())
}
else {
val owners = if (describeTokenRequest.data.owners == null)
None
else
Some(describeTokenRequest.data.owners.asScala.map(p => new KafkaPrincipal(p.principalType(), p.principalName)).toList)
def authorizeToken(tokenId: String) = authHelper.authorize(request.context, DESCRIBE, DELEGATION_TOKEN, tokenId)
def eligible(token: TokenInformation) = DelegationTokenManager.filterToken(requestPrincipal, owners, token, authorizeToken)
val tokens = tokenManager.getTokens(eligible)
sendResponseCallback(Errors.NONE, tokens)
}
}
}
def allowTokenRequests(request: RequestChannel.Request): Boolean = {
val protocol = request.context.securityProtocol
if (request.context.principal.tokenAuthenticated ||
protocol == SecurityProtocol.PLAINTEXT ||
// disallow requests from 1-way SSL
(protocol == SecurityProtocol.SSL && request.context.principal == KafkaPrincipal.ANONYMOUS))
false
else
true
}
def handleElectReplicaLeader(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.notYetSupported(request))
val electionRequest = request.body[ElectLeadersRequest]
def sendResponseCallback(
error: ApiError
)(
results: Map[TopicPartition, ApiError]
): Unit = {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
val adjustedResults = if (electionRequest.data.topicPartitions == null) {
/* When performing elections across all of the partitions we should only return
* partitions for which there was an eleciton or resulted in an error. In other
* words, partitions that didn't need election because they ready have the correct
* leader are not returned to the client.
*/
results.filter { case (_, error) =>
error.error != Errors.ELECTION_NOT_NEEDED
}
} else results
val electionResults = new util.ArrayList[ReplicaElectionResult]()
adjustedResults
.groupBy { case (tp, _) => tp.topic }
.forKeyValue { (topic, ps) =>
val electionResult = new ReplicaElectionResult()
electionResult.setTopic(topic)
ps.forKeyValue { (topicPartition, error) =>
val partitionResult = new PartitionResult()
partitionResult.setPartitionId(topicPartition.partition)
partitionResult.setErrorCode(error.error.code)
partitionResult.setErrorMessage(error.message)
electionResult.partitionResult.add(partitionResult)
}
electionResults.add(electionResult)
}
new ElectLeadersResponse(
requestThrottleMs,
error.error.code,
electionResults,
electionRequest.version
)
})
}
if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
val error = new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, null)
val partitionErrors: Map[TopicPartition, ApiError] =
electionRequest.topicPartitions.iterator.map(partition => partition -> error).toMap
sendResponseCallback(error)(partitionErrors)
} else {
val partitions = if (electionRequest.data.topicPartitions == null) {
metadataCache.getAllTopics().flatMap(metadataCache.getTopicPartitions)
} else {
electionRequest.topicPartitions
}
replicaManager.electLeaders(
zkSupport.controller,
partitions,
electionRequest.electionType,
sendResponseCallback(ApiError.NONE),
electionRequest.data.timeoutMs
)
}
}
def handleOffsetDeleteRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val offsetDeleteRequest = request.body[OffsetDeleteRequest]
val groupId = offsetDeleteRequest.data.groupId
if (authHelper.authorize(request.context, DELETE, GROUP, groupId)) {
val topics = offsetDeleteRequest.data.topics.asScala
val authorizedTopics = authHelper.filterByAuthorized(request.context, READ, TOPIC, topics)(_.name)
val topicPartitionErrors = mutable.Map[TopicPartition, Errors]()
val topicPartitions = mutable.ArrayBuffer[TopicPartition]()
for (topic <- topics) {
for (partition <- topic.partitions.asScala) {
val tp = new TopicPartition(topic.name, partition.partitionIndex)
if (!authorizedTopics.contains(topic.name))
topicPartitionErrors(tp) = Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(tp))
topicPartitionErrors(tp) = Errors.UNKNOWN_TOPIC_OR_PARTITION
else
topicPartitions += tp
}
}
val (groupError, authorizedTopicPartitionsErrors) = groupCoordinator.handleDeleteOffsets(
groupId, topicPartitions, requestLocal)
topicPartitionErrors ++= authorizedTopicPartitionsErrors
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
if (groupError != Errors.NONE)
offsetDeleteRequest.getErrorResponse(requestThrottleMs, groupError)
else {
val topics = new OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection
topicPartitionErrors.groupBy(_._1.topic).forKeyValue { (topic, topicPartitions) =>
val partitions = new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection
topicPartitions.forKeyValue { (topicPartition, error) =>
partitions.add(
new OffsetDeleteResponseData.OffsetDeleteResponsePartition()
.setPartitionIndex(topicPartition.partition)
.setErrorCode(error.code)
)
}
topics.add(new OffsetDeleteResponseData.OffsetDeleteResponseTopic()
.setName(topic)
.setPartitions(partitions))
}
new OffsetDeleteResponse(new OffsetDeleteResponseData()
.setTopics(topics)
.setThrottleTimeMs(requestThrottleMs))
}
})
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
offsetDeleteRequest.getErrorResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED))
}
}
def handleDescribeClientQuotasRequest(request: RequestChannel.Request): Unit = {
val describeClientQuotasRequest = request.body[DescribeClientQuotasRequest]
if (!authHelper.authorize(request.context, DESCRIBE_CONFIGS, CLUSTER, CLUSTER_NAME)) {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
describeClientQuotasRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
} else {
metadataSupport match {
case ZkSupport(adminManager, controller, zkClient, forwardingManager, metadataCache) =>
val result = adminManager.describeClientQuotas(describeClientQuotasRequest.filter)
val entriesData = result.iterator.map { case (quotaEntity, quotaValues) =>
val entityData = quotaEntity.entries.asScala.iterator.map { case (entityType, entityName) =>
new DescribeClientQuotasResponseData.EntityData()
.setEntityType(entityType)
.setEntityName(entityName)
}.toBuffer
val valueData = quotaValues.iterator.map { case (key, value) =>
new DescribeClientQuotasResponseData.ValueData()
.setKey(key)
.setValue(value)
}.toBuffer
new DescribeClientQuotasResponseData.EntryData()
.setEntity(entityData.asJava)
.setValues(valueData.asJava)
}.toBuffer
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setEntries(entriesData.asJava)))
case RaftSupport(_, metadataCache) =>
val result = metadataCache.describeClientQuotas(describeClientQuotasRequest.data())
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
result.setThrottleTimeMs(requestThrottleMs)
new DescribeClientQuotasResponse(result)
})
}
}
}
def handleAlterClientQuotasRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val alterClientQuotasRequest = request.body[AlterClientQuotasRequest]
if (authHelper.authorize(request.context, ALTER_CONFIGS, CLUSTER, CLUSTER_NAME)) {
val result = zkSupport.adminManager.alterClientQuotas(alterClientQuotasRequest.entries.asScala,
alterClientQuotasRequest.validateOnly)
val entriesData = result.iterator.map { case (quotaEntity, apiError) =>
val entityData = quotaEntity.entries.asScala.iterator.map { case (key, value) =>
new AlterClientQuotasResponseData.EntityData()
.setEntityType(key)
.setEntityName(value)
}.toBuffer
new AlterClientQuotasResponseData.EntryData()
.setErrorCode(apiError.error.code)
.setErrorMessage(apiError.message)
.setEntity(entityData.asJava)
}.toBuffer
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterClientQuotasResponse(new AlterClientQuotasResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setEntries(entriesData.asJava)))
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
alterClientQuotasRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
}
}
def handleDescribeUserScramCredentialsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.notYetSupported(request))
val describeUserScramCredentialsRequest = request.body[DescribeUserScramCredentialsRequest]
if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME)) {
val result = zkSupport.adminManager.describeUserScramCredentials(
Option(describeUserScramCredentialsRequest.data.users).map(_.asScala.map(_.name).toList))
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs)))
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
describeUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
}
}
def handleAlterUserScramCredentialsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val alterUserScramCredentialsRequest = request.body[AlterUserScramCredentialsRequest]
if (!zkSupport.controller.isActive) {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
alterUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.NOT_CONTROLLER.exception))
} else if (authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
val result = zkSupport.adminManager.alterUserScramCredentials(
alterUserScramCredentialsRequest.data.upsertions().asScala, alterUserScramCredentialsRequest.data.deletions().asScala)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs)))
} else {
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
alterUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
}
}
def handleAlterIsrRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
val alterIsrRequest = request.body[AlterIsrRequest]
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
if (!zkSupport.controller.isActive)
requestHelper.sendResponseExemptThrottle(request, alterIsrRequest.getErrorResponse(
AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.NOT_CONTROLLER.exception))
else
zkSupport.controller.alterIsrs(alterIsrRequest.data, alterIsrResp =>
requestHelper.sendResponseExemptThrottle(request, new AlterIsrResponse(alterIsrResp))
)
}
def handleUpdateFeatures(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldAlwaysForward(request))
val updateFeaturesRequest = request.body[UpdateFeaturesRequest]
def sendResponseCallback(errors: Either[ApiError, Map[String, ApiError]]): Unit = {
def createResponse(throttleTimeMs: Int): UpdateFeaturesResponse = {
errors match {
case Left(topLevelError) =>
UpdateFeaturesResponse.createWithErrors(
topLevelError,
Collections.emptyMap(),
throttleTimeMs)
case Right(featureUpdateErrors) =>
UpdateFeaturesResponse.createWithErrors(
ApiError.NONE,
featureUpdateErrors.asJava,
throttleTimeMs)
}
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => createResponse(requestThrottleMs))
}
if (!authHelper.authorize(request.context, ALTER, CLUSTER, CLUSTER_NAME)) {
sendResponseCallback(Left(new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED)))
} else if (!zkSupport.controller.isActive) {
sendResponseCallback(Left(new ApiError(Errors.NOT_CONTROLLER)))
} else if (!config.isFeatureVersioningSupported) {
sendResponseCallback(Left(new ApiError(Errors.INVALID_REQUEST, "Feature versioning system is disabled.")))
} else {
zkSupport.controller.updateFeatures(updateFeaturesRequest, sendResponseCallback)
}
}
def handleDescribeCluster(request: RequestChannel.Request): Unit = {
val describeClusterRequest = request.body[DescribeClusterRequest]
var clusterAuthorizedOperations = Int.MinValue // Default value in the schema
// get cluster authorized operations
if (describeClusterRequest.data.includeClusterAuthorizedOperations) {
if (authHelper.authorize(request.context, DESCRIBE, CLUSTER, CLUSTER_NAME))
clusterAuthorizedOperations = authHelper.authorizedOperations(request, Resource.CLUSTER)
else
clusterAuthorizedOperations = 0
}
val brokers = metadataCache.getAliveBrokerNodes(request.context.listenerName)
val controllerId = metadataSupport.controllerId.getOrElse(MetadataResponse.NO_CONTROLLER_ID)
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => {
val data = new DescribeClusterResponseData()
.setThrottleTimeMs(requestThrottleMs)
.setClusterId(clusterId)
.setControllerId(controllerId)
.setClusterAuthorizedOperations(clusterAuthorizedOperations);
brokers.foreach { broker =>
data.brokers.add(new DescribeClusterResponseData.DescribeClusterBroker()
.setBrokerId(broker.id)
.setHost(broker.host)
.setPort(broker.port)
.setRack(broker.rack))
}
new DescribeClusterResponse(data)
})
}
def handleEnvelope(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
// If forwarding is not yet enabled or this request has been received on an invalid endpoint,
// then we treat the request as unparsable and close the connection.
if (!isForwardingEnabled(request)) {
info(s"Closing connection ${request.context.connectionId} because it sent an `Envelope` " +
"request even though forwarding has not been enabled")
requestChannel.closeConnection(request, Collections.emptyMap())
return
} else if (!request.context.fromPrivilegedListener) {
info(s"Closing connection ${request.context.connectionId} from listener ${request.context.listenerName} " +
s"because it sent an `Envelope` request, which is only accepted on the inter-broker listener " +
s"${config.interBrokerListenerName}.")
requestChannel.closeConnection(request, Collections.emptyMap())
return
} else if (!authHelper.authorize(request.context, CLUSTER_ACTION, CLUSTER, CLUSTER_NAME)) {
requestHelper.sendErrorResponseMaybeThrottle(request, new ClusterAuthorizationException(
s"Principal ${request.context.principal} does not have required CLUSTER_ACTION for envelope"))
return
} else if (!zkSupport.controller.isActive) {
requestHelper.sendErrorResponseMaybeThrottle(request, new NotControllerException(
s"Broker $brokerId is not the active controller"))
return
}
EnvelopeUtils.handleEnvelopeRequest(request, requestChannel.metrics, handle(_, requestLocal))
}
def handleDescribeProducersRequest(request: RequestChannel.Request): Unit = {
val describeProducersRequest = request.body[DescribeProducersRequest]
def partitionError(
topicPartition: TopicPartition,
apiError: ApiError
): DescribeProducersResponseData.PartitionResponse = {
new DescribeProducersResponseData.PartitionResponse()
.setPartitionIndex(topicPartition.partition)
.setErrorCode(apiError.error.code)
.setErrorMessage(apiError.message)
}
val response = new DescribeProducersResponseData()
describeProducersRequest.data.topics.forEach { topicRequest =>
val topicResponse = new DescribeProducersResponseData.TopicResponse()
.setName(topicRequest.name)
val invalidTopicError = checkValidTopic(topicRequest.name)
val topicError = invalidTopicError.orElse {
if (!authHelper.authorize(request.context, READ, TOPIC, topicRequest.name)) {
Some(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED))
} else if (!metadataCache.contains(topicRequest.name))
Some(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION))
else {
None
}
}
topicRequest.partitionIndexes.forEach { partitionId =>
val topicPartition = new TopicPartition(topicRequest.name, partitionId)
val partitionResponse = topicError match {
case Some(error) => partitionError(topicPartition, error)
case None => replicaManager.activeProducerState(topicPartition)
}
topicResponse.partitions.add(partitionResponse)
}
response.topics.add(topicResponse)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeProducersResponse(response.setThrottleTimeMs(requestThrottleMs)))
}
private def checkValidTopic(topic: String): Option[ApiError] = {
try {
Topic.validate(topic)
None
} catch {
case e: Throwable => Some(ApiError.fromThrowable(e))
}
}
def handleDescribeTransactionsRequest(request: RequestChannel.Request): Unit = {
val describeTransactionsRequest = request.body[DescribeTransactionsRequest]
val response = new DescribeTransactionsResponseData()
describeTransactionsRequest.data.transactionalIds.forEach { transactionalId =>
val transactionState = if (!authHelper.authorize(request.context, DESCRIBE, TRANSACTIONAL_ID, transactionalId)) {
new DescribeTransactionsResponseData.TransactionState()
.setTransactionalId(transactionalId)
.setErrorCode(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.code)
} else {
txnCoordinator.handleDescribeTransactions(transactionalId)
}
// Include only partitions which the principal is authorized to describe
val topicIter = transactionState.topics.iterator()
while (topicIter.hasNext) {
val topic = topicIter.next().topic
if (!authHelper.authorize(request.context, DESCRIBE, TOPIC, topic)) {
topicIter.remove()
}
}
response.transactionStates.add(transactionState)
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeTransactionsResponse(response.setThrottleTimeMs(requestThrottleMs)))
}
def handleListTransactionsRequest(request: RequestChannel.Request): Unit = {
val listTransactionsRequest = request.body[ListTransactionsRequest]
val filteredProducerIds = listTransactionsRequest.data.producerIdFilters.asScala.map(Long.unbox).toSet
val filteredStates = listTransactionsRequest.data.stateFilters.asScala.toSet
val response = txnCoordinator.handleListTransactions(filteredProducerIds, filteredStates)
// The response should contain only transactionalIds that the principal
// has `Describe` permission to access.
val transactionStateIter = response.transactionStates.iterator()
while (transactionStateIter.hasNext) {
val transactionState = transactionStateIter.next()
if (!authHelper.authorize(request.context, DESCRIBE, TRANSACTIONAL_ID, transactionState.transactionalId)) {
transactionStateIter.remove()
}
}
requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs =>
new ListTransactionsResponse(response.setThrottleTimeMs(requestThrottleMs)))
}
def handleAllocateProducerIdsRequest(request: RequestChannel.Request): Unit = {
val zkSupport = metadataSupport.requireZkOrThrow(KafkaApis.shouldNeverReceive(request))
authHelper.authorizeClusterOperation(request, CLUSTER_ACTION)
val allocateProducerIdsRequest = request.body[AllocateProducerIdsRequest]
if (!zkSupport.controller.isActive)
requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs =>
allocateProducerIdsRequest.getErrorResponse(throttleTimeMs, Errors.NOT_CONTROLLER.exception))
else
zkSupport.controller.allocateProducerIds(allocateProducerIdsRequest.data, producerIdsResponse =>
requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs =>
new AllocateProducerIdsResponse(producerIdsResponse.setThrottleTimeMs(throttleTimeMs)))
)
}
private def updateRecordConversionStats(request: RequestChannel.Request,
tp: TopicPartition,
conversionStats: RecordConversionStats): Unit = {
val conversionCount = conversionStats.numRecordsConverted
if (conversionCount > 0) {
request.header.apiKey match {
case ApiKeys.PRODUCE =>
brokerTopicStats.topicStats(tp.topic).produceMessageConversionsRate.mark(conversionCount)
brokerTopicStats.allTopicsStats.produceMessageConversionsRate.mark(conversionCount)
case ApiKeys.FETCH =>
brokerTopicStats.topicStats(tp.topic).fetchMessageConversionsRate.mark(conversionCount)
brokerTopicStats.allTopicsStats.fetchMessageConversionsRate.mark(conversionCount)
case _ =>
throw new IllegalStateException("Message conversion info is recorded only for Produce/Fetch requests")
}
request.messageConversionsTimeNanos = conversionStats.conversionTimeNanos
}
request.temporaryMemoryBytes = conversionStats.temporaryMemoryBytes
}
private def isBrokerEpochStale(zkSupport: ZkSupport, brokerEpochInRequest: Long): Boolean = {
// Broker epoch in LeaderAndIsr/UpdateMetadata/StopReplica request is unknown
// if the controller hasn't been upgraded to use KIP-380
if (brokerEpochInRequest == AbstractControlRequest.UNKNOWN_BROKER_EPOCH) false
else {
// brokerEpochInRequest > controller.brokerEpoch is possible in rare scenarios where the controller gets notified
// about the new broker epoch and sends a control request with this epoch before the broker learns about it
brokerEpochInRequest < zkSupport.controller.brokerEpoch
}
}
}
object KafkaApis {
// Traffic from both in-sync and out of sync replicas are accounted for in replication quota to ensure total replication
// traffic doesn't exceed quota.
// TODO: remove resolvedResponseData method when sizeOf can take a data object.
private[server] def sizeOfThrottledPartitions(versionId: Short,
unconvertedResponse: FetchResponse,
quota: ReplicationQuotaManager,
topicIds: util.Map[String, Uuid]): Int = {
val responseData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
unconvertedResponse.data.responses().forEach(topicResponse =>
topicResponse.partitions().forEach(partition =>
responseData.put(new TopicPartition(topicResponse.topic(), partition.partitionIndex()), partition)))
FetchResponse.sizeOf(versionId, responseData.entrySet
.iterator.asScala.filter(element => quota.isThrottled(element.getKey)).asJava, topicIds)
}
// visible for testing
private[server] def shouldNeverReceive(request: RequestChannel.Request): Exception = {
new UnsupportedVersionException(s"Should never receive when using a Raft-based metadata quorum: ${request.header.apiKey()}")
}
// visible for testing
private[server] def shouldAlwaysForward(request: RequestChannel.Request): Exception = {
new UnsupportedVersionException(s"Should always be forwarded to the Active Controller when using a Raft-based metadata quorum: ${request.header.apiKey}")
}
private def unsupported(text: String): Exception = {
new UnsupportedVersionException(s"Unsupported when using a Raft-based metadata quorum: $text")
}
private def notYetSupported(request: RequestChannel.Request): Exception = {
notYetSupported(request.header.apiKey().toString)
}
private def notYetSupported(text: String): Exception = {
new UnsupportedVersionException(s"Not yet supported when using a Raft-based metadata quorum: $text")
}
}
|
lindong28/kafka
|
core/src/main/scala/kafka/server/KafkaApis.scala
|
Scala
|
apache-2.0
| 178,949
|
package foo.bar.baz // the package nesting level material to this bug
class DivergenceTest {
trait ColumnBase[T]
trait ShapeLevel
trait Flat extends ShapeLevel
trait Lower extends Flat
class Shape2[Level <: ShapeLevel, -M, U]
implicit final def columnBaseShape[Level >: Flat <: ShapeLevel, T, C <: ColumnBase[_]]
(implicit ev: C <:< ColumnBase[T]
): Shape2[Level, C, T] = ???
implicit final def intShape[Level <: ShapeLevel, T]: Shape2[Level, Int, Int] = ???
implicit final def tuple2Shape[Level <: ShapeLevel, M1,M2, U1,U2]
(implicit u1: Shape2[_ <: Level, M1, U1],
u2: Shape2[_ <: Level, M2, U2]
): Shape2[Level, (M1,M2), (U1,U2)] = ???
def foo {
class Coffees extends ColumnBase[Int]
def map1[F, T](f: F)(implicit shape: Shape2[_ <: Flat, F, T]) = ???
map1(((1, null: Coffees), 1))
map1(((null: Coffees, 1), 1)) // fails with implicit divergence error in 2.11.0-M6, works under 2.10.3
}
}
|
loskutov/intellij-scala
|
testdata/scalacTests/pos/t7983.scala
|
Scala
|
apache-2.0
| 1,123
|
package com.sksamuel.elastic4s.requests.searches.aggs.pipeline
import com.sksamuel.elastic4s.handlers.script
import com.sksamuel.elastic4s.handlers.script.ScriptBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
object BucketSelectorPipelineBuilder {
def apply(agg: BucketSelectorPipelineAgg): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject("bucket_selector")
builder.startObject("buckets_path")
agg.bucketsPathMap.foreach(p => builder.field(p._1, p._2))
builder.endObject()
builder.rawField("script", script.ScriptBuilderFn(agg.script))
builder.endObject()
}
}
|
sksamuel/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/pipeline/BucketSelectorPipelineBuilder.scala
|
Scala
|
apache-2.0
| 662
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark._
class FakeSchedulerBackend extends SchedulerBackend {
def start() {}
def stop() {}
def reviveOffers() {}
def defaultParallelism(): Int = 1
}
class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with Logging {
test("Scheduler does not always schedule tasks on the same workers") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val numFreeCores = 1
val workerOffers = Seq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
// Repeatedly try to schedule a 1-task job, and make sure that it doesn't always
// get scheduled on the same executor. While there is a chance this test will fail
// because the task randomly gets placed on the first executor all 1000 times, the
// probability of that happening is 2^-1000 (so sufficiently small to be considered
// negligible).
val numTrials = 1000
val selectedExecutorIds = 1.to(numTrials).map { _ =>
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
taskDescriptions(0).executorId
}
val count = selectedExecutorIds.count(_ == workerOffers(0).executorId)
assert(count > 0)
assert(count < numTrials)
}
test("Scheduler correctly accounts for multiple CPUs per task") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskCpus = 2
sc.conf.set("spark.task.cpus", taskCpus.toString)
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
// Give zero core offers. Should not generate any tasks
val zeroCoreWorkerOffers = Seq(new WorkerOffer("executor0", "host0", 0),
new WorkerOffer("executor1", "host1", 0))
val taskSet = FakeTask.createTaskSet(1)
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(zeroCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// No tasks should run as we only have 1 core free.
val numFreeCores = 1
val singleCoreWorkerOffers = Seq(new WorkerOffer("executor0", "host0", numFreeCores),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(singleCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// Now change the offers to have 2 cores in one executor and verify if it
// is chosen.
val multiCoreWorkerOffers = Seq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(1 === taskDescriptions.length)
assert("executor0" === taskDescriptions(0).executorId)
}
test("Scheduler does not crash when tasks are not serializable") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskCpus = 2
sc.conf.set("spark.task.cpus", taskCpus.toString)
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
val dagScheduler = new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val numFreeCores = 1
taskScheduler.setDAGScheduler(dagScheduler)
val taskSet = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)), 0, 0, 0, null)
val multiCoreWorkerOffers = Seq(new WorkerOffer("executor0", "host0", taskCpus),
new WorkerOffer("executor1", "host1", numFreeCores))
taskScheduler.submitTasks(taskSet)
var taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(0 === taskDescriptions.length)
// Now check that we can still submit tasks
// Even if one of the tasks has not-serializable tasks, the other task set should
// still be processed without error
taskScheduler.submitTasks(taskSet)
taskScheduler.submitTasks(FakeTask.createTaskSet(1))
taskDescriptions = taskScheduler.resourceOffers(multiCoreWorkerOffers).flatten
assert(taskDescriptions.map(_.executorId) === Seq("executor0"))
}
test("refuse to schedule concurrent attempts for the same stage (SPARK-8103)") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
val dagScheduler = new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
taskScheduler.setDAGScheduler(dagScheduler)
val attempt1 = FakeTask.createTaskSet(1, 0)
val attempt2 = FakeTask.createTaskSet(1, 1)
taskScheduler.submitTasks(attempt1)
intercept[IllegalStateException] { taskScheduler.submitTasks(attempt2) }
// OK to submit multiple if previous attempts are all zombie
taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId)
.get.isZombie = true
taskScheduler.submitTasks(attempt2)
val attempt3 = FakeTask.createTaskSet(1, 2)
intercept[IllegalStateException] { taskScheduler.submitTasks(attempt3) }
taskScheduler.taskSetManagerForAttempt(attempt2.stageId, attempt2.stageAttemptId)
.get.isZombie = true
taskScheduler.submitTasks(attempt3)
}
test("don't schedule more tasks after a taskset is zombie") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val numFreeCores = 1
val workerOffers = Seq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions.length)
// now mark attempt 1 as a zombie
taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId)
.get.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// if we schedule another attempt for the same stage, it should get scheduled
val attempt2 = FakeTask.createTaskSet(10, 1)
// submit attempt 2, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt2)
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(1 === taskDescriptions3.length)
val mgr = taskScheduler.taskIdToTaskSetManager.get(taskDescriptions3(0).taskId).get
assert(mgr.taskSet.stageAttemptId === 1)
}
test("if a zombie attempt finishes, continue scheduling tasks for non-zombie attempts") {
sc = new SparkContext("local", "TaskSchedulerImplSuite")
val taskScheduler = new TaskSchedulerImpl(sc)
taskScheduler.initialize(new FakeSchedulerBackend)
// Need to initialize a DAGScheduler for the taskScheduler to use for callbacks.
new DAGScheduler(sc, taskScheduler) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {}
override def executorAdded(execId: String, host: String) {}
}
val numFreeCores = 10
val workerOffers = Seq(new WorkerOffer("executor0", "host0", numFreeCores))
val attempt1 = FakeTask.createTaskSet(10)
// submit attempt 1, offer some resources, some tasks get scheduled
taskScheduler.submitTasks(attempt1)
val taskDescriptions = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions.length)
// now mark attempt 1 as a zombie
val mgr1 = taskScheduler.taskSetManagerForAttempt(attempt1.stageId, attempt1.stageAttemptId).get
mgr1.isZombie = true
// don't schedule anything on another resource offer
val taskDescriptions2 = taskScheduler.resourceOffers(workerOffers).flatten
assert(0 === taskDescriptions2.length)
// submit attempt 2
val attempt2 = FakeTask.createTaskSet(10, 1)
taskScheduler.submitTasks(attempt2)
// attempt 1 finished (this can happen even if it was marked zombie earlier -- all tasks were
// already submitted, and then they finish)
taskScheduler.taskSetFinished(mgr1)
// now with another resource offer, we should still schedule all the tasks in attempt2
val taskDescriptions3 = taskScheduler.resourceOffers(workerOffers).flatten
assert(10 === taskDescriptions3.length)
taskDescriptions3.foreach { task =>
val mgr = taskScheduler.taskIdToTaskSetManager.get(task.taskId).get
assert(mgr.taskSet.stageAttemptId === 1)
}
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala
|
Scala
|
apache-2.0
| 10,958
|
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.directed
import cc.factorie._
import cc.factorie.variable.DoubleVar
// TODO Consider creating PostiveDouble, and then Gamma extends
object Gamma extends DirectedFamily3[DoubleVar,DoubleVar,DoubleVar] {
self =>
def logpr(value:Double, mean:Double, variance:Double): Double = {
val diff = value - mean
- diff * diff / (2 * variance) - 0.5 * math.log(2.0 * math.Pi * variance)
}
def pr(x:Double, alpha:Double, beta:Double): Double = {
require(x > 0)
math.pow(beta, alpha) / maths.gamma(alpha) * math.pow(x, alpha - 1) * math.exp(- beta * x)
}
def sampledValue(alpha:Double, beta:Double)(implicit random: scala.util.Random): Double = maths.nextGamma(alpha, beta)(random)
case class Factor(override val _1:DoubleVar, override val _2:DoubleVar, override val _3:DoubleVar) extends super.Factor(_1, _2, _3) {
def pr(child:Double, mean:Double, variance:Double): Double = self.pr(child, mean, variance)
def sampledValue(mean:Double, variance:Double)(implicit random: scala.util.Random): Double = self.sampledValue(mean, variance)
}
def newFactor(_1:DoubleVar, _2:DoubleVar, _3:DoubleVar) = Factor(_1, _2, _3)
}
// TODO Finish this.
//class GammaGamma(alphaGamma:Gamma, betaGamma:Gamma, value:Double = 0) extends Gamma(alphaGamma, betaGamma, value)
|
strubell/factorie
|
src/main/scala/cc/factorie/directed/Gamma.scala
|
Scala
|
apache-2.0
| 2,056
|
/*
* Copyright 2016 Nikolay Donets
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nikdon.telepooz.model.methods
import com.github.nikdon.telepooz.model.{Message, ReplyMarkup, Response}
/**
* Use this method to send point on the map. On success, the sent Message is returned.
*
* @param chat_id Unique identifier for the target chat or username of the target channel
* (in the format @channelusername)
* @param latitude Latitude of location
* @param longitude Longitude of location
* @param disable_notification Sends the message silently. iOS users will not receive a notification, Android users
* will receive a notification with no sound.
* @param reply_to_message_id If the message is a reply, ID of the original message
* @param reply_markup Additional interface options. A JSON-serialized object for an inline keyboard, custom
* reply keyboard, instructions to hide reply keyboard or to force a reply from the user.
*/
case class SendLocation(
chat_id: String,
latitude: Double,
longitude: Double,
disable_notification: Option[Boolean] = None,
reply_to_message_id: Option[Long] = None,
reply_markup: Option[ReplyMarkup] = None
) extends Method[Response[Message]]
|
nikdon/telepooz
|
src/main/scala/com/github/nikdon/telepooz/model/methods/SendLocation.scala
|
Scala
|
apache-2.0
| 1,883
|
/*
* Open Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkoreantext.processor.util
import org.openkoreantext.processor.TestBase
import org.openkoreantext.processor.tokenizer.KoreanTokenizer.KoreanToken
import org.openkoreantext.processor.util.KoreanPos._
import org.openkoreantext.processor.util.KoreanSubstantive._
class KoreanSubstantiveTest extends TestBase {
test("isJosaAttachable") {
//애플은
assert(isJosaAttachable('플', '은'))
assert(isJosaAttachable('플', '이'))
assert(isJosaAttachable('플', '을'))
assert(isJosaAttachable('플', '과'))
assert(isJosaAttachable('플', '아'))
//애플가
assert(!isJosaAttachable('플', '는'))
assert(!isJosaAttachable('플', '가'))
assert(!isJosaAttachable('플', '를'))
assert(!isJosaAttachable('플', '와'))
assert(!isJosaAttachable('플', '야'))
assert(!isJosaAttachable('플', '여'))
assert(!isJosaAttachable('플', '라'))
//에프은
assert(!isJosaAttachable('프', '은'))
assert(!isJosaAttachable('프', '이'))
assert(!isJosaAttachable('프', '을'))
assert(!isJosaAttachable('프', '과'))
assert(!isJosaAttachable('프', '아'))
//에프가
assert(isJosaAttachable('프', '는'))
assert(isJosaAttachable('프', '가'))
assert(isJosaAttachable('프', '를'))
assert(isJosaAttachable('프', '와'))
assert(isJosaAttachable('프', '야'))
assert(isJosaAttachable('프', '여'))
assert(isJosaAttachable('프', '라'))
}
test("isName should return false if input length less than 3") {
assert(!isName("김"))
assert(!isName("관진"))
}
test("isName should correctly identify 3-char person names") {
assert(isName("유호현"))
assert(isName("김혜진"))
assert(!isName("개루루"))
assert(isName("이상헌"))
assert(isName("박수형"))
assert(isName("이은별"))
assert(isName("최종은"))
assert(isName("박근혜"))
assert(isName("손석희"))
assert(isName("강철중"))
assert(!isName("사측의"))
assert(!isName("사다리"))
assert(!isName("철지난"))
assert(!isName("수용액"))
assert(!isName("눈맞춰"))
}
test ("isName should correctly identify 4-char person names") {
assert(isName("독고영재"))
assert(isName("제갈경준"))
assert(!isName("유호현진"))
}
test("isKoreanNumber should return true if the text is a Korean number") {
assert(isKoreanNumber("천이백만이십오"))
assert(isKoreanNumber("이십"))
assert(isKoreanNumber("오"))
assert(isKoreanNumber("삼"))
}
test("isKoreanNumber should return false if the text is not a Korean number") {
assert(!isKoreanNumber("영삼"))
assert(!isKoreanNumber("이정"))
assert(!isKoreanNumber("조삼모사"))
}
test("isKoreanNameVariation should correctly identify removed null consonanats") {
assert(isKoreanNameVariation("호혀니"))
assert(isKoreanNameVariation("혜지니"))
assert(isKoreanNameVariation("빠수니"))
assert(isKoreanNameVariation("은벼리"))
assert(isKoreanNameVariation("귀여미"))
assert(isKoreanNameVariation("루하니"))
assert(isKoreanNameVariation("이오니"))
assert(!isKoreanNameVariation("이"))
assert(!isKoreanNameVariation("장미"))
assert(!isKoreanNameVariation("별이"))
assert(!isKoreanNameVariation("꼬치"))
assert(!isKoreanNameVariation("꽃이"))
assert(!isKoreanNameVariation("팔티"))
assert(!isKoreanNameVariation("감미"))
assert(!isKoreanNameVariation("고미"))
assert(!isKoreanNameVariation("가라찌"))
assert(!isKoreanNameVariation("귀요미"))
assert(!isKoreanNameVariation("사람이"))
assert(!isKoreanNameVariation("사람이니"))
assert(!isKoreanNameVariation("유하기"))
}
test("collapseNouns should collapse single-length nouns correctly") {
assert(
collapseNouns(Seq(KoreanToken("마", Noun, 0, 1), KoreanToken("코", Noun, 1, 1), KoreanToken("토", Noun, 2, 1)))
=== Seq(KoreanToken("마코토", Noun, 0, 3, unknown = true))
)
assert(
collapseNouns(Seq(KoreanToken("마", Noun, 0, 1), KoreanToken("코", Noun, 1, 1),
KoreanToken("토", Noun, 2, 1), KoreanToken("를", Josa, 3, 1)))
=== Seq(KoreanToken("마코토", Noun, 0, 3, unknown = true), KoreanToken("를", Josa, 3, 1))
)
assert(
collapseNouns(Seq(KoreanToken("개", Modifier, 0, 1), KoreanToken("마", Noun, 1, 1),
KoreanToken("코", Noun, 2, 1), KoreanToken("토", Noun, 3, 1)))
=== Seq(KoreanToken("개", Modifier, 0, 1), KoreanToken("마코토", Noun, 1, 3, unknown = true))
)
assert(
collapseNouns(Seq(KoreanToken("마", Noun, 0, 1), KoreanToken("코", Noun, 1, 1),
KoreanToken("토", Noun, 2, 1), KoreanToken("사람", Noun, 3, 2)))
=== Seq(KoreanToken("마코토", Noun, 0, 3, unknown = true), KoreanToken("사람", Noun, 3, 2))
)
assert(
collapseNouns(Seq(KoreanToken("마", Noun, 0, 1), KoreanToken("코", Noun, 1, 1),
KoreanToken("사람", Noun, 2, 2), KoreanToken("토", Noun, 4, 1)))
=== Seq(KoreanToken("마코", Noun, 0, 2, unknown = true), KoreanToken("사람", Noun, 2, 2), KoreanToken("토", Noun, 4, 1))
)
}
}
|
open-korean-text/open-korean-text
|
src/test/scala/org/openkoreantext/processor/util/KoreanSubstantiveTest.scala
|
Scala
|
apache-2.0
| 5,928
|
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.web.asset.impl
import org.eknet.publet.vfs._
import fs.FilesystemPartition
import org.eknet.publet.web.asset._
import org.eknet.publet.vfs.Path._
import org.eknet.publet.vfs.util.MapContainer
import java.io.File
import org.eknet.publet.web.asset.AssetResource
import scala.Some
import com.google.common.eventbus.EventBus
/**
* @author Eike Kettner eike.kettner@gmail.com
* @since 30.09.12 14:25
*/
class AssetContainer(tempDir: File, bus: EventBus) extends MountManager with RootContainer {
mount("compressed".p, new FilesystemPartition(tempDir, bus, true))
def mount(g: Group) {
g.resources.foreach(mount)
}
def mount(r: AssetResource) {
val path = internalPath(r)
mountTo(path, r)
val kinds = Kind.values.map(_.asInstanceOf[Kind.KindVal].ext)
if (!kinds.contains(r.name.ext)) {
val p = defaultFolder(r).p / r.name
mountTo(p, r)
}
}
private def mountTo(path: Path, r: AssetResource) {
resolveMount(path) match {
case Some(part) => {
part._2.asInstanceOf[MapContainer].addResource(r)
}
case None => {
val c = new MapContainer
mount(path.parent, c)
c.addResource(r)
}
}
}
private def internalPath(r: AssetResource) =
"groups".p / r.group / defaultFolder(r) / r.name
private def internalTemp(name: String) = AssetManager.compressedPath.p.segments.last.p / name
def pathFor(r: AssetResource) =
AssetManager.assetPath.p / internalPath(r)
def pathForCompressed(name: String) = AssetManager.compressedPath.p / name
def lookupTempFile(name: String) = lookup(internalTemp(name))
def createTempFile(name: String) = createResource(internalTemp(name))
private def defaultFolder(r: AssetResource) = {
r.target match {
case Some(path) => path
case None => r.name.targetType match {
case ContentType.javascript => "js"
case ContentType.css => "css"
case m if (m.mime._1 == "image") => "img"
case _ => "other"
}
}
}
}
|
eikek/publet
|
web/src/main/scala/org/eknet/publet/web/asset/impl/AssetContainer.scala
|
Scala
|
apache-2.0
| 2,637
|
package com.socrata.curator
import java.util.concurrent.{Executor, Executors}
import com.rojoma.simplearm.v2.{Managed, Resource, managed}
import com.socrata.http.client.{HttpClient, HttpClientHttpClient}
object DiscoveryBrokerFromConfig {
private def defaultExecutor = {
// Never timeout shutting down an executor.
implicit val timeout = Resource.executorShutdownNoTimeout
managed(Executors.newCachedThreadPool())
}
/** Builds a DiscoveryBroker from configuration.
*
* @param discoveryConfig discovery config.
* @param http The http client to use when making HTTP requests.
*/
def apply(config: DiscoveryBrokerConfig, http: HttpClient): Managed[DiscoveryBroker] = {
for {
curator <- CuratorFromConfig(config.curator)
discovery <- DiscoveryFromConfig(classOf[Void], curator, config.discovery)
} yield new DiscoveryBroker(discovery, http)
}
/** Builds a DiscoveryBroker from configuration.
*
* @param discoveryConfig discovery config.
* @param userAgent The user agent to use when making HTTP requests.
* @param executor The executor to use for HTTP requests,
* defaults to a cached thread pool.
*/
def apply(config: DiscoveryBrokerConfig,
userAgent: String,
executor: Managed[Executor] = defaultExecutor) : Managed[DiscoveryBroker] = {
for {
executor <- executor
curator <- CuratorFromConfig(config.curator)
discovery <- DiscoveryFromConfig(classOf[Void], curator, config.discovery)
http <- managed(new HttpClientHttpClient(executor,
HttpClientHttpClient.
defaultOptions.
withUserAgent(userAgent)))
} yield new DiscoveryBroker(discovery, http)
}
}
|
socrata-platform/socrata-curator-utils
|
core/src/main/scala/com.socrata.curator/DiscoveryBrokerFromConfig.scala
|
Scala
|
apache-2.0
| 1,846
|
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.akka
import com.comcast.money.api.{ SpanHandler, SpanInfo }
class CollectingSpanHandler() extends SpanHandler {
var spanInfoStack = Seq.empty[SpanInfo]
def push(span: SpanInfo): Unit = spanInfoStack = span +: spanInfoStack
def pop: Option[SpanInfo] = spanInfoStack.headOption
def clear(): Unit = spanInfoStack = Seq.empty[SpanInfo]
override def handle(span: SpanInfo): Unit = push(span)
}
|
Comcast/money
|
money-akka/src/test/scala/com/comcast/money/akka/CollectingSpanHandler.scala
|
Scala
|
apache-2.0
| 1,060
|
/*
* This is free and unencumbered software released into the public domain.
*
* Anyone is free to copy, modify, publish, use, compile, sell, or
* distribute this software, either in source code form or as a compiled
* binary, for any purpose, commercial or non-commercial, and by any
* means.
*
* In jurisdictions that recognize copyright laws, the author or authors
* of this software dedicate any and all copyright interest in the
* software to the public domain. We make this dedication for the benefit
* of the public at large and to the detriment of our heirs and
* successors. We intend this dedication to be an overt act of
* relinquishment in perpetuity of all present and future rights to this
* software under copyright law.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* For more information, please refer to <http://unlicense.org/>
*/
package net.adamcin.snagjar
import java.io.File
import org.apache.maven.artifact.repository.{ArtifactRepository, ArtifactRepositoryPolicy, DefaultRepositoryRequest, RepositoryRequest}
import org.apache.maven.plugins.annotations.{Component, Parameter}
import org.apache.maven.repository.RepositorySystem
import org.apache.maven.artifact.repository.layout.ArtifactRepositoryLayout
import org.apache.maven.settings.{Repository, RepositoryPolicy, Settings}
import scala.Option
import org.apache.maven.project.artifact.ProjectArtifactMetadata
import org.apache.maven.artifact.metadata.ArtifactMetadata
import org.apache.maven.artifact.Artifact
import org.apache.maven.artifact.resolver.ArtifactResolutionRequest
import org.apache.maven.execution.MavenSession
import scala.util.Try
object AccessToRepositories {
final val PROP_GENERATE_POMS = "generatePoms"
}
/**
* Trait defining common mojo parameters and methods useful for accessing maven repositories
* @since 0.8.0
* @author Mark Adamcin
*/
trait AccessToRepositories {
// -----------------------------------------------
// Injected Maven Components
// -----------------------------------------------
@Parameter(defaultValue = "${settings}", readonly = true)
var settings: Settings = null
@Parameter(defaultValue = "${session}", readonly = true)
var session: MavenSession = null
@Component
var repositorySystem: RepositorySystem = null
@Component(role = classOf[ArtifactRepositoryLayout])
var repositoryLayouts: java.util.Map[String, ArtifactRepositoryLayout] = null
// -----------------------------------------------
// Maven Parameters
// -----------------------------------------------
/**
* Specify the repository layout to use for local and remote repositories
*/
@Parameter(property = "repositoryLayout")
val repositoryLayout: String = null
/**
* Specify true to install generated "jar" pom when a parent pom is unresolvable,
* which omits dependencies and the parent pom reference
* @since 1.2.0
*/
@Parameter(property = AccessToRepositories.PROP_GENERATE_POMS)
val generatePoms: Boolean = false
/**
* Specify the local repository path
* Refer to maven-install-plugin:install-file
*/
@Parameter(property = "localRepositoryPath")
val localRepositoryPath: File = null
lazy val localRepository: ArtifactRepository =
Option(localRepositoryPath) match {
case Some(path) => repositorySystem.createLocalRepository(path)
case None => repositorySystem.createDefaultLocalRepository()
}
lazy val reposFromSettings: List[Repository] = {
import collection.JavaConverters._
val activeProfiles = settings.getActiveProfiles.asScala.toSet
settings.getProfiles.asScala.filter(p => activeProfiles.contains(p.getId)).foldLeft(List.empty[Repository]) { (acc, p) =>
acc ++ p.getRepositories.asScala.toList
}
}
lazy val repositoryRequest: RepositoryRequest = {
import scala.collection.JavaConverters._
val request = DefaultRepositoryRequest.getRepositoryRequest(session, null)
request.setLocalRepository(localRepository)
val defaultSnapshotPolicy = new ArtifactRepositoryPolicy(false, ArtifactRepositoryPolicy.UPDATE_POLICY_ALWAYS, ArtifactRepositoryPolicy.CHECKSUM_POLICY_IGNORE)
val defaultReleasePolicy = new ArtifactRepositoryPolicy(true, ArtifactRepositoryPolicy.UPDATE_POLICY_DAILY, ArtifactRepositoryPolicy.CHECKSUM_POLICY_WARN)
def getPolicies(repo: Repository): (ArtifactRepositoryPolicy, ArtifactRepositoryPolicy) = {
val snapshots = Option(repo.getSnapshots).map { policy =>
new ArtifactRepositoryPolicy(policy.isEnabled, policy.getUpdatePolicy, policy.getChecksumPolicy)
}.getOrElse(defaultSnapshotPolicy)
val releases = Option(repo.getReleases).map { policy =>
new ArtifactRepositoryPolicy(policy.isEnabled, policy.getUpdatePolicy, policy.getChecksumPolicy)
}.getOrElse(defaultReleasePolicy)
(snapshots, releases)
}
def convertSettingsRepo(settingsRepo: Repository): Try[ArtifactRepository] = {
val (snapshots, releases) = getPolicies(settingsRepo)
Try(repositorySystem.createArtifactRepository(
settingsRepo.getId,
settingsRepo.getUrl,
repositoryLayouts.get(Option(settingsRepo.getLayout).getOrElse("default")), snapshots, releases))
}
val settingsRepos = reposFromSettings.foldLeft(List(Try(repositorySystem.createDefaultRemoteRepository()))) { (acc, settingsRepo) =>
convertSettingsRepo(settingsRepo) :: acc
}
request.setRemoteRepositories(settingsRepos.map(_.get).asJava)
request
}
// -----------------------------------------------
// Members
// -----------------------------------------------
val snapshotPolicy =
new ArtifactRepositoryPolicy(true,
ArtifactRepositoryPolicy.UPDATE_POLICY_ALWAYS,
ArtifactRepositoryPolicy.CHECKSUM_POLICY_IGNORE)
val releasePolicy =
new ArtifactRepositoryPolicy(true,
ArtifactRepositoryPolicy.UPDATE_POLICY_ALWAYS,
ArtifactRepositoryPolicy.CHECKSUM_POLICY_IGNORE)
// lazy evaluation occurs after dependency injection :)
lazy val layout: ArtifactRepositoryLayout = repositoryLayouts.get(Option(repositoryLayout).getOrElse("default"))
def snaggableToArtifact(s: Snaggable): (Artifact, ProjectArtifactMetadata) = {
val a = repositorySystem.createArtifact(s.gav.groupId, s.gav.artifactId, s.gav.version, "jar")
a.setFile(s.jar)
(a, new ProjectArtifactMetadata(a, s.pom))
}
def isResolvable(s: Snaggable): Boolean = {
s.gav.parent.forall { parentGav =>
val parentArtifact = repositorySystem.createProjectArtifact(
parentGav.groupId, parentGav.artifactId, parentGav.version)
val request = new ArtifactResolutionRequest(repositoryRequest)
request.setArtifact(parentArtifact)
repositorySystem.resolve(request).isSuccess
}
}
}
|
adamcin/snagjar-maven-plugin
|
src/main/scala/net/adamcin/snagjar/AccessToRepositories.scala
|
Scala
|
unlicense
| 7,204
|
object B {
inline def getInline: Int =
sys.error("This is an expected failure when running C")
}
|
dotty-staging/dotty
|
sbt-test/source-dependencies/inline/changes/B3.scala
|
Scala
|
apache-2.0
| 103
|
package org.nkvoll.javabin.json
import spray.json._
import org.nkvoll.javabin.service.internal.ElasticsearchService.TemplatesUpdated
trait ElasticsearchProtocol extends DefaultJsonProtocol {
implicit val templatesUpdatedProtocol: RootJsonFormat[TemplatesUpdated] = jsonFormat1(TemplatesUpdated)
}
|
nkvoll/javabin-rest-on-akka
|
src/main/scala/org/nkvoll/javabin/json/ElasticsearchProtocol.scala
|
Scala
|
mit
| 301
|
package dotty.tools
package dotc
package ast
import core._
import util.Positions._, Types._, Contexts._, Constants._, Names._, NameOps._, Flags._
import SymDenotations._, Symbols._, StdNames._, Annotations._, Trees._
import Decorators._
import language.higherKinds
import collection.mutable.ListBuffer
import config.Printers._
import typer.Mode
object desugar {
/** Are we using the new unboxed pair scheme? */
private final val unboxedPairs = false
import untpd._
/** Info of a variable in a pattern: The named tree and its type */
private type VarInfo = (NameTree, Tree)
// ----- DerivedTypeTrees -----------------------------------
class SetterParamTree extends DerivedTypeTree {
def derivedType(sym: Symbol)(implicit ctx: Context) = sym.info.resultType
}
class TypeRefTree extends DerivedTypeTree {
def derivedType(sym: Symbol)(implicit ctx: Context) = sym.typeRef
}
class DerivedFromParamTree extends DerivedTypeTree {
/** Make sure that for all enclosing module classes their companion lasses
* are completed. Reason: We need the constructor of such companion classes to
* be completed so that OriginalSymbol attachments are pushed to DerivedTypeTrees
* in apply/unapply methods.
*/
override def ensureCompletions(implicit ctx: Context) =
if (!(ctx.owner is Package))
if (ctx.owner is ModuleClass) ctx.owner.linkedClass.ensureCompleted()
else ensureCompletions(ctx.outer)
/** Return info of original symbol, where all references to siblings of the
* original symbol (i.e. sibling and original symbol have the same owner)
* are rewired to same-named parameters or accessors in the scope enclosing
* the current scope. The current scope is the scope owned by the defined symbol
* itself, that's why we have to look one scope further out. If the resulting
* type is an alias type, dealias it. This is necessary because the
* accessor of a type parameter is a private type alias that cannot be accessed
* from subclasses.
*/
def derivedType(sym: Symbol)(implicit ctx: Context) = {
val relocate = new TypeMap {
val originalOwner = sym.owner
def apply(tp: Type) = tp match {
case tp: NamedType if tp.symbol.owner eq originalOwner =>
val defctx = ctx.outersIterator.dropWhile(_.scope eq ctx.scope).next
var local = defctx.denotNamed(tp.name).suchThat(_ is ParamOrAccessor).symbol
if (local.exists) (defctx.owner.thisType select local).dealias
else throw new Error(s"no matching symbol for ${sym.showLocated} in ${defctx.owner} / ${defctx.effectiveScope}")
case _ =>
mapOver(tp)
}
}
relocate(sym.info)
}
}
/** A type definition copied from `tdef` with a rhs typetree derived from it */
def derivedTypeParam(tdef: TypeDef) =
cpy.TypeDef(tdef)(
rhs = new DerivedFromParamTree() withPos tdef.rhs.pos watching tdef)
/** A value definition copied from `vdef` with a tpt typetree derived from it */
def derivedTermParam(vdef: ValDef) =
cpy.ValDef(vdef)(
tpt = new DerivedFromParamTree() withPos vdef.tpt.pos watching vdef)
// ----- Desugar methods -------------------------------------------------
/** var x: Int = expr
* ==>
* def x: Int = expr
* def x_=($1: <TypeTree()>): Unit = ()
*/
def valDef(vdef: ValDef)(implicit ctx: Context): Tree = {
val ValDef(name, tpt, rhs) = vdef
val mods = vdef.mods
def setterNeeded =
(mods is Mutable) && ctx.owner.isClass && (!(mods is PrivateLocal) || (ctx.owner is Trait))
if (setterNeeded) {
// todo: copy of vdef as getter needed?
// val getter = ValDef(mods, name, tpt, rhs) withPos vdef.pos ?
// right now vdef maps via expandedTree to a thicket which concerns itself.
// I don't see a problem with that but if there is one we can avoid it by making a copy here.
val setterParam = makeSyntheticParameter(tpt = (new SetterParamTree).watching(vdef))
val setterRhs = if (vdef.rhs.isEmpty) EmptyTree else unitLiteral
val setter = cpy.DefDef(vdef)(
name = name.setterName,
tparams = Nil,
vparamss = (setterParam :: Nil) :: Nil,
tpt = TypeTree(defn.UnitType),
rhs = setterRhs
).withMods((mods | Accessor) &~ CaseAccessor) // rhs gets filled in later, when field is generated and getter has parameters
Thicket(vdef, setter)
}
else vdef
}
/** Expand context bounds to evidence params. E.g.,
*
* def f[T >: L <: H : B](params)
* ==>
* def f[T >: L <: H](params)(implicit evidence$0: B[T])
*
* Expand default arguments to default getters. E.g,
*
* def f[T: B](x: Int = 1)(y: String = x + "m") = ...
* ==>
* def f[T](x: Int)(y: String)(implicit evidence$0: B[T]) = ...
* def f$default$1[T] = 1
* def f$default$2[T](x: Int) = x + "m"
*/
def defDef(meth: DefDef, isPrimaryConstructor: Boolean = false)(implicit ctx: Context): Tree = {
val DefDef(name, tparams, vparamss, tpt, rhs) = meth
val mods = meth.mods
val epbuf = new ListBuffer[ValDef]
val tparams1 = tparams mapConserve {
case tparam @ TypeDef(_, ContextBounds(tbounds, cxbounds)) =>
for (cxbound <- cxbounds) {
val paramFlags: FlagSet = if (isPrimaryConstructor) PrivateLocalParamAccessor else Param
val epname = (nme.EVIDENCE_PARAM_PREFIX.toString + epbuf.length).toTermName
epbuf += ValDef(epname, cxbound, EmptyTree).withFlags(paramFlags | Implicit)
}
cpy.TypeDef(tparam)(rhs = tbounds)
case tparam =>
tparam
}
val meth1 = epbuf.toList match {
case Nil =>
meth
case evidenceParams =>
val vparamss1 = vparamss.reverse match {
case (vparams @ (vparam :: _)) :: rvparamss if vparam.mods is Implicit =>
((vparams ++ evidenceParams) :: rvparamss).reverse
case _ =>
vparamss :+ evidenceParams
}
cpy.DefDef(meth)(tparams = tparams1, vparamss = vparamss1)
}
/** The longest prefix of parameter lists in vparamss whose total length does not exceed `n` */
def takeUpTo(vparamss: List[List[ValDef]], n: Int): List[List[ValDef]] = vparamss match {
case vparams :: vparamss1 =>
val len = vparams.length
if (n >= len) vparams :: takeUpTo(vparamss1, n - len) else Nil
case _ =>
Nil
}
def normalizedVparamss = meth1.vparamss map (_ map (vparam =>
cpy.ValDef(vparam)(rhs = EmptyTree)))
def dropContextBound(tparam: TypeDef) = tparam.rhs match {
case ContextBounds(tbounds, _) => cpy.TypeDef(tparam)(rhs = tbounds)
case _ => tparam
}
def defaultGetters(vparamss: List[List[ValDef]], n: Int): List[DefDef] = vparamss match {
case (vparam :: vparams) :: vparamss1 =>
def defaultGetter: DefDef =
DefDef(
name = meth.name.defaultGetterName(n),
tparams = meth.tparams.map(tparam => dropContextBound(toDefParam(tparam))),
vparamss = takeUpTo(normalizedVparamss, n),
tpt = TypeTree(),
rhs = vparam.rhs
).withMods(vparam.mods & AccessFlags)
val rest = defaultGetters(vparams :: vparamss1, n + 1)
if (vparam.rhs.isEmpty) rest else defaultGetter :: rest
case Nil :: vparamss1 =>
defaultGetters(vparamss1, n)
case nil =>
Nil
}
val defGetters = defaultGetters(vparamss, 0)
if (defGetters.isEmpty) meth1
else {
val meth2 = cpy.DefDef(meth1)(vparamss = normalizedVparamss)
.withMods(meth1.mods | DefaultParameterized)
Thicket(meth2 :: defGetters)
}
}
/** Fill in empty type bounds with Nothing/Any. Expand private local type parameters as follows:
*
* class C[v T]
* ==>
* class C { type v C$T; type v T = C$T }
*/
def typeDef(tdef: TypeDef)(implicit ctx: Context): Tree = {
if (tdef.mods is PrivateLocalParam) {
val tparam = cpy.TypeDef(tdef)(name = tdef.name.expandedName(ctx.owner))
.withMods(tdef.mods &~ PrivateLocal | ExpandedName)
val alias = cpy.TypeDef(tdef)(rhs = refOfDef(tparam), tparams = Nil)
.withFlags(PrivateLocalParamAccessor | Synthetic | tdef.mods.flags & VarianceFlags)
Thicket(tparam, alias)
}
else tdef
}
@sharable private val synthetic = Modifiers(Synthetic)
private def toDefParam(tparam: TypeDef): TypeDef =
tparam.withFlags(Param)
private def toDefParam(vparam: ValDef): ValDef =
vparam.withFlags(Param | vparam.rawMods.flags & Implicit)
/** The expansion of a class definition. See inline comments for what is involved */
def classDef(cdef: TypeDef)(implicit ctx: Context): Tree = {
val TypeDef(name, impl @ Template(constr0, parents, self, _)) = cdef
val mods = cdef.mods
val (constr1, defaultGetters) = defDef(constr0, isPrimaryConstructor = true) match {
case meth: DefDef => (meth, Nil)
case Thicket((meth: DefDef) :: defaults) => (meth, defaults)
}
// The original type and value parameters in the constructor already have the flags
// needed to be type members (i.e. param, and possibly also private and local unless
// prefixed by type or val). `tparams` and `vparamss` are the type parameters that
// go in `constr`, the constructor after desugaring.
val constrTparams = constr1.tparams map toDefParam
val constrVparamss =
if (constr1.vparamss.isEmpty) { // ensure parameter list is non-empty
if (mods is Case)
ctx.error("case class needs to have at least one parameter list", cdef.pos)
ListOfNil
}
else constr1.vparamss.nestedMap(toDefParam)
val constr = cpy.DefDef(constr1)(tparams = constrTparams, vparamss = constrVparamss)
// Add constructor type parameters to auxiliary constructors
val normalizedBody = impl.body map {
case ddef: DefDef if ddef.name.isConstructorName =>
cpy.DefDef(ddef)(tparams = constrTparams)
case stat =>
stat
}
val derivedTparams = constrTparams map derivedTypeParam
val derivedVparamss = constrVparamss nestedMap derivedTermParam
val arity = constrVparamss.head.length
var classTycon: Tree = EmptyTree
// a reference to the class type, with all parameters given.
val classTypeRef/*: Tree*/ = {
// -language:keepUnions difference: classTypeRef needs type annotation, otherwise
// infers Ident | AppliedTypeTree, which
// renders the :\\ in companions below untypable.
classTycon = (new TypeRefTree) withPos cdef.pos.startPos // watching is set at end of method
val tparams = impl.constr.tparams
if (tparams.isEmpty) classTycon else AppliedTypeTree(classTycon, tparams map refOfDef)
}
// new C[Ts](paramss)
lazy val creatorExpr = New(classTypeRef, constrVparamss nestedMap refOfDef)
// Methods to add to a case class C[..](p1: T1, ..., pN: Tn)(moreParams)
// def isDefined = true
// def productArity = N
// def _1 = this.p1
// ...
// def _N = this.pN
// def copy(p1: T1 = p1: @uncheckedVariance, ...,
// pN: TN = pN: @uncheckedVariance)(moreParams) =
// new C[...](p1, ..., pN)(moreParams)
//
// Note: copy default parameters need @uncheckedVariance; see
// neg/t1843-variances.scala for a test case. The test would give
// two errors without @uncheckedVariance, one of them spurious.
val caseClassMeths =
if (mods is Case) {
def syntheticProperty(name: TermName, rhs: Tree) =
DefDef(name, Nil, Nil, TypeTree(), rhs).withMods(synthetic)
val isDefinedMeth = syntheticProperty(nme.isDefined, Literal(Constant(true)))
val caseParams = constrVparamss.head.toArray
val productElemMeths = for (i <- 0 until arity) yield
syntheticProperty(nme.selectorName(i), Select(This(EmptyTypeName), caseParams(i).name))
def isRepeated(tree: Tree): Boolean = tree match {
case PostfixOp(_, nme.raw.STAR) => true
case ByNameTypeTree(tree1) => isRepeated(tree1)
case _ => false
}
val hasRepeatedParam = constrVparamss.exists(_.exists {
case ValDef(_, tpt, _) => isRepeated(tpt)
case _ => false
})
val copyMeths =
if (mods.is(Abstract) || hasRepeatedParam) Nil // cannot have default arguments for repeated parameters, hence copy method is not issued
else {
def copyDefault(vparam: ValDef) =
makeAnnotated(defn.UncheckedVarianceAnnot, refOfDef(vparam))
val copyFirstParams = derivedVparamss.head.map(vparam =>
cpy.ValDef(vparam)(rhs = copyDefault(vparam)))
val copyRestParamss = derivedVparamss.tail.nestedMap(vparam =>
cpy.ValDef(vparam)(rhs = EmptyTree))
DefDef(nme.copy, derivedTparams, copyFirstParams :: copyRestParamss, TypeTree(), creatorExpr)
.withMods(synthetic) :: Nil
}
copyMeths ::: isDefinedMeth :: productElemMeths.toList
}
else Nil
def anyRef = ref(defn.AnyRefAlias.typeRef)
def productConstr(n: Int) = {
val tycon = ref(defn.ProductNClass(n).typeRef)
val targs = constrVparamss.head map (_.tpt)
if (targs.isEmpty) tycon else AppliedTypeTree(tycon, targs)
}
// Case classes get a ProductN parent
var parents1 = parents
if ((mods is Case) && arity <= Definitions.MaxTupleArity)
parents1 = parents1 :+ productConstr(arity)
// The thicket which is the desugared version of the companion object
// synthetic object C extends parentTpt { defs }
def companionDefs(parentTpt: Tree, defs: List[Tree]) =
moduleDef(
ModuleDef(
name.toTermName, Template(emptyConstructor, parentTpt :: Nil, EmptyValDef, defs))
.withMods(synthetic))
.withPos(cdef.pos).toList
// The companion object definitions, if a companion is needed, Nil otherwise.
// companion definitions include:
// 1. If class is a case class case class C[Ts](p1: T1, ..., pN: TN)(moreParams):
// def apply[Ts](p1: T1, ..., pN: TN)(moreParams) = new C[Ts](p1, ..., pN)(moreParams) (unless C is abstract)
// def unapply[Ts]($1: C[Ts]) = $1
// 2. The default getters of the constructor
// The parent of the companion object of a non-parameterized case class
// (T11, ..., T1N) => ... => (TM1, ..., TMN) => C
// For all other classes, the parent is AnyRef.
val companions =
if (mods is Case) {
val parent =
if (constrTparams.nonEmpty ||
constrVparamss.length > 1 ||
mods.is(Abstract) ||
constr.mods.is(Private)) anyRef
// todo: also use anyRef if constructor has a dependent method type (or rule that out)!
else (constrVparamss :\\ classTypeRef) ((vparams, restpe) => Function(vparams map (_.tpt), restpe))
val applyMeths =
if (mods is Abstract) Nil
else
DefDef(nme.apply, derivedTparams, derivedVparamss, TypeTree(), creatorExpr)
.withMods(synthetic | (constr1.mods.flags & DefaultParameterized)) :: Nil
val unapplyMeth = {
val unapplyParam = makeSyntheticParameter(tpt = classTypeRef)
val unapplyRHS = if (arity == 0) Literal(Constant(true)) else Ident(unapplyParam.name)
DefDef(nme.unapply, derivedTparams, (unapplyParam :: Nil) :: Nil, TypeTree(), unapplyRHS)
.withMods(synthetic)
}
companionDefs(parent, applyMeths ::: unapplyMeth :: defaultGetters)
}
else if (defaultGetters.nonEmpty)
companionDefs(anyRef, defaultGetters)
else Nil
// For an implicit class C[Ts](p11: T11, ..., p1N: T1N) ... (pM1: TM1, .., pMN: TMN), the method
// synthetic implicit C[Ts](p11: T11, ..., p1N: T1N) ... (pM1: TM1, ..., pMN: TMN): C[Ts] =
// new C[Ts](p11, ..., p1N) ... (pM1, ..., pMN) =
val implicitWrappers =
if (mods is Implicit) {
if (ctx.owner is Package)
ctx.error("implicit classes may not be toplevel", cdef.pos)
if (mods is Case)
ctx.error("implicit classes may not case classes", cdef.pos)
// implicit wrapper is typechecked in same scope as constructor, so
// we can reuse the constructor parameters; no derived params are needed.
DefDef(name.toTermName, constrTparams, constrVparamss, classTypeRef, creatorExpr)
.withFlags(Synthetic | Implicit) :: Nil
}
else Nil
val self1 = {
val selfType = if (self.tpt.isEmpty) classTypeRef else self.tpt
if (self.isEmpty) self
else cpy.ValDef(self)(tpt = selfType).withMods(self.mods | SelfName)
}
val cdef1 = {
val originalTparams = constr1.tparams.toIterator
val originalVparams = constr1.vparamss.toIterator.flatten
val tparamAccessors = derivedTparams.map(_.withMods(originalTparams.next.mods))
val caseAccessor = if (mods is Case) CaseAccessor else EmptyFlags
val vparamAccessors = derivedVparamss.flatten.map(_.withMods(originalVparams.next.mods | caseAccessor))
cpy.TypeDef(cdef)(
rhs = cpy.Template(impl)(constr, parents1, self1,
tparamAccessors ::: vparamAccessors ::: normalizedBody ::: caseClassMeths),
tparams = Nil)
}
// install the watch on classTycon
classTycon match {
case tycon: DerivedTypeTree => tycon.watching(cdef1)
case _ =>
}
flatTree(cdef1 :: companions ::: implicitWrappers)
}
val AccessOrSynthetic = AccessFlags | Synthetic
/** Expand
*
* object name extends parents { self => body }
*
* to:
* <module> val name: name$ = New(name$)
* <module> final class name$ extends parents { self: name.type => body }
*/
def moduleDef(mdef: ModuleDef)(implicit ctx: Context): Tree = {
val ModuleDef(name, tmpl) = mdef
val mods = mdef.mods
if (mods is Package)
PackageDef(Ident(name), cpy.ModuleDef(mdef)(nme.PACKAGE, tmpl).withMods(mods &~ Package) :: Nil)
else {
val clsName = name.moduleClassName
val clsRef = Ident(clsName)
val modul = ValDef(name, clsRef, New(clsRef, Nil))
.withMods(mods | ModuleCreationFlags)
.withPos(mdef.pos)
val ValDef(selfName, selfTpt, _) = tmpl.self
val selfMods = tmpl.self.mods
if (!selfTpt.isEmpty) ctx.error("object definition may not have a self type", tmpl.self.pos)
val clsSelf = ValDef(selfName, SingletonTypeTree(Ident(name)), tmpl.self.rhs)
.withMods(selfMods)
.withPos(tmpl.self.pos orElse tmpl.pos.startPos)
val clsTmpl = cpy.Template(tmpl)(self = clsSelf, body = tmpl.body)
val cls = TypeDef(clsName, clsTmpl)
.withMods(mods.toTypeFlags & AccessOrSynthetic | ModuleClassCreationFlags)
Thicket(modul, classDef(cls))
}
}
/** val p1, ..., pN: T = E
* ==>
* makePatDef[[val p1: T1 = E]]; ...; makePatDef[[val pN: TN = E]]
*/
def patDef(pdef: PatDef)(implicit ctx: Context): Tree = {
val PatDef(mods, pats, tpt, rhs) = pdef
val pats1 = if (tpt.isEmpty) pats else pats map (Typed(_, tpt))
flatTree(pats1 map (makePatDef(mods, _, rhs)))
}
/** If `pat` is a variable pattern,
*
* val/var p = e
*
* Otherwise, in case there is exactly one variable x_1 in pattern
* val/var p = e ==> val/var x_1 = (e: @unchecked) match (case p => (x_1))
*
* in case there are zero or more than one variables in pattern
* val/var p = e ==> private synthetic val t$ = (e: @unchecked) match (case p => (x_1, ..., x_N))
* val/var x_1 = t$._1
* ...
* val/var x_N = t$._N
* If the original pattern variable carries a type annotation, so does the corresponding
* ValDef.
*/
def makePatDef(mods: Modifiers, pat: Tree, rhs: Tree)(implicit ctx: Context): Tree = pat match {
case VarPattern(named, tpt) =>
derivedValDef(named, tpt, rhs, mods)
case _ =>
val rhsUnchecked = makeAnnotated(defn.UncheckedAnnot, rhs)
val vars = getVariables(pat)
val isMatchingTuple: Tree => Boolean = {
case Tuple(es) => es.length == vars.length
case _ => false
}
val ids = for ((named, _) <- vars) yield Ident(named.name)
val caseDef = CaseDef(pat, EmptyTree, makeTuple(ids))
val matchExpr =
if (forallResults(rhs, isMatchingTuple)) rhs
else Match(rhsUnchecked, caseDef :: Nil)
vars match {
case Nil =>
matchExpr
case (named, tpt) :: Nil =>
derivedValDef(named, tpt, matchExpr, mods)
case _ =>
val tmpName = ctx.freshName().toTermName
val patFlags = PrivateLocal | Synthetic | (mods.flags & Lazy)
val firstDef = ValDef(tmpName, TypeTree(), matchExpr).withFlags(patFlags)
def selector(n: Int) = Select(Ident(tmpName), nme.selectorName(n))
val restDefs =
for (((named, tpt), n) <- vars.zipWithIndex)
yield derivedValDef(named, tpt, selector(n), mods)
flatTree(firstDef :: restDefs)
}
}
def defTree(tree: Tree)(implicit ctx: Context): Tree = tree match {
case tree: ValDef => valDef(tree)
case tree: TypeDef => if (tree.isClassDef) classDef(tree) else typeDef(tree)
case tree: DefDef => defDef(tree)
case tree: ModuleDef => moduleDef(tree)
case tree: PatDef => patDef(tree)
}
/** { stats; <empty > }
* ==>
* { stats; () }
*/
def block(tree: Block)(implicit ctx: Context): Block = tree.expr match {
case EmptyTree =>
cpy.Block(tree)(tree.stats,
unitLiteral withPos (if (tree.stats.isEmpty) tree.pos else tree.pos.endPos))
case _ =>
tree
}
/** EmptyTree in lower bound ==> Nothing
* EmptyTree in upper bounds ==> Any
*/
def typeBoundsTree(tree: TypeBoundsTree)(implicit ctx: Context): TypeBoundsTree = {
val TypeBoundsTree(lo, hi) = tree
val lo1 = if (lo.isEmpty) untpd.TypeTree(defn.NothingType) else lo
val hi1 = if (hi.isEmpty) untpd.TypeTree(defn.AnyType) else hi
cpy.TypeBoundsTree(tree)(lo1, hi1)
}
/** Make closure corresponding to function.
* params => body
* ==>
* def $anonfun(params) = body
* Closure($anonfun)
*/
def makeClosure(params: List[ValDef], body: Tree, tpt: Tree = TypeTree()) =
Block(
DefDef(nme.ANON_FUN, Nil, params :: Nil, tpt, body).withMods(synthetic),
Closure(Nil, Ident(nme.ANON_FUN), EmptyTree))
/** Expand partial function
* { cases }
* ==>
* x$0 => x$0 match { cases }
*/
def makeCaseLambda(cases: List[CaseDef])(implicit ctx: Context) = {
val param = makeSyntheticParameter()
Function(param :: Nil, Match(Ident(param.name), cases))
}
/** Add annotation with class `cls` to tree:
* tree @cls
*/
def makeAnnotated(cls: Symbol, tree: Tree)(implicit ctx: Context) =
Annotated(TypedSplice(tpd.New(cls.typeRef, Nil)), tree)
private def derivedValDef(named: NameTree, tpt: Tree, rhs: Tree, mods: Modifiers) =
ValDef(named.name.asTermName, tpt, rhs).withMods(mods).withPos(named.pos)
/** Main desugaring method */
def apply(tree: Tree)(implicit ctx: Context): Tree = {
/** { label def lname(): Unit = rhs; call }
*/
def labelDefAndCall(lname: TermName, rhs: Tree, call: Tree) = {
val ldef = DefDef(lname, Nil, ListOfNil, TypeTree(defn.UnitType), rhs).withFlags(Label)
Block(ldef, call)
}
/** Translate infix operation expression left op right
*/
def makeBinop(left: Tree, op: Name, right: Tree): Tree = {
def assignToNamedArg(arg: Tree) = arg match {
case Assign(Ident(name), rhs) => cpy.NamedArg(arg)(name, rhs)
case _ => arg
}
if (isLeftAssoc(op)) {
val args: List[Tree] = right match {
case Parens(arg) => assignToNamedArg(arg) :: Nil
case Tuple(args) => args mapConserve assignToNamedArg
case _ => right :: Nil
}
Apply(Select(left, op), args)
} else {
val x = ctx.freshName().toTermName
Block(
ValDef(x, TypeTree(), left).withMods(synthetic),
Apply(Select(right, op), Ident(x)))
}
}
/** Create tree for for-comprehension `<for (enums) do body>` or
* `<for (enums) yield body>` where mapName and flatMapName are chosen
* corresponding to whether this is a for-do or a for-yield.
* The creation performs the following rewrite rules:
*
* 1.
*
* for (P <- G) E ==> G.foreach (P => E)
*
* Here and in the following (P => E) is interpreted as the function (P => E)
* if P is a variable pattern and as the partial function { case P => E } otherwise.
*
* 2.
*
* for (P <- G) yield E ==> G.map (P => E)
*
* 3.
*
* for (P_1 <- G_1; P_2 <- G_2; ...) ...
* ==>
* G_1.flatMap (P_1 => for (P_2 <- G_2; ...) ...)
*
* 4.
*
* for (P <- G; E; ...) ...
* =>
* for (P <- G.filter (P => E); ...) ...
*
* 5. For any N:
*
* for (P_1 <- G; P_2 = E_2; val P_N = E_N; ...)
* ==>
* for (TupleN(P_1, P_2, ... P_N) <-
* for (x_1 @ P_1 <- G) yield {
* val x_2 @ P_2 = E_2
* ...
* val x_N & P_N = E_N
* TupleN(x_1, ..., x_N)
* } ...)
*
* If any of the P_i are variable patterns, the corresponding `x_i @ P_i` is not generated
* and the variable constituting P_i is used instead of x_i
*
* @param mapName The name to be used for maps (either map or foreach)
* @param flatMapName The name to be used for flatMaps (either flatMap or foreach)
* @param enums The enumerators in the for expression
* @param body The body of the for expression
*/
def makeFor(mapName: TermName, flatMapName: TermName, enums: List[Tree], body: Tree): Tree = ctx.traceIndented(i"make for ${ForYield(enums, body)}", show = true) {
/** Make a function value pat => body.
* If pat is a var pattern id: T then this gives (id: T) => body
* Otherwise this gives { case pat => body }
*/
def makeLambda(pat: Tree, body: Tree): Tree = pat match {
case VarPattern(named, tpt) =>
Function(derivedValDef(named, tpt, EmptyTree, Modifiers(Param)) :: Nil, body)
case _ =>
makeCaseLambda(CaseDef(pat, EmptyTree, body) :: Nil)
}
/** If `pat` is not an Identifier, a Typed(Ident, _), or a Bind, wrap
* it in a Bind with a fresh name. Return the transformed pattern, and the identifier
* that refers to the bound variable for the pattern.
*/
def makeIdPat(pat: Tree): (Tree, Ident) = pat match {
case Bind(name, _) => (pat, Ident(name))
case id: Ident if isVarPattern(id) && id.name != nme.WILDCARD => (id, id)
case Typed(id: Ident, _) if isVarPattern(id) && id.name != nme.WILDCARD => (pat, id)
case _ =>
val name = ctx.freshName().toTermName
(Bind(name, pat), Ident(name))
}
/** Make a pattern filter:
* rhs.withFilter { case pat => true case _ => false }
*
* On handling irrefutable patterns:
* The idea is to wait until the pattern matcher sees a call
*
* xs withFilter { cases }
*
* where cases can be proven to be refutable i.e. cases would be
* equivalent to { case _ => true }
*
* In that case, compile to
*
* xs withFilter alwaysTrue
*
* where `alwaysTrue` is a predefined function value:
*
* val alwaysTrue: Any => Boolean = true
*
* In the libraries operations can take advantage of alwaysTrue to shortcircuit the
* withFilter call.
*
* def withFilter(f: Elem => Boolean) =
* if (f eq alwaysTrue) this // or rather identity filter monadic applied to this
* else real withFilter
*/
def makePatFilter(rhs: Tree, pat: Tree): Tree = {
val cases = List(
CaseDef(pat, EmptyTree, Literal(Constant(true))),
CaseDef(Ident(nme.WILDCARD), EmptyTree, Literal(Constant(false))))
Apply(Select(rhs, nme.withFilter), Match(EmptyTree, cases))
}
/** Is pattern `pat` irrefutable when matched against `rhs`?
* We only can do a simple syntactic check here; a more refined check
* is done later in the pattern matcher (see discussion in @makePatFilter).
*/
def isIrrefutable(pat: Tree, rhs: Tree): Boolean = {
def matchesTuple(pats: List[Tree], rhs: Tree): Boolean = rhs match {
case Tuple(trees) => (pats corresponds trees)(isIrrefutable)
case Parens(rhs1) => matchesTuple(pats, rhs1)
case Block(_, rhs1) => matchesTuple(pats, rhs1)
case If(_, thenp, elsep) => matchesTuple(pats, thenp) && matchesTuple(pats, elsep)
case Match(_, cases) => cases forall (matchesTuple(pats, _))
case CaseDef(_, _, rhs1) => matchesTuple(pats, rhs1)
case Throw(_) => true
case _ => false
}
pat match {
case Bind(_, pat1) => isIrrefutable(pat1, rhs)
case Parens(pat1) => isIrrefutable(pat1, rhs)
case Tuple(pats) => matchesTuple(pats, rhs)
case _ => isVarPattern(pat)
}
}
def isIrrefutableGenFrom(gen: GenFrom): Boolean =
gen.isInstanceOf[IrrefutableGenFrom] || isIrrefutable(gen.pat, gen.expr)
/** rhs.name with a pattern filter on rhs unless `pat` is irrefutable when
* matched against `rhs`.
*/
def rhsSelect(gen: GenFrom, name: TermName) = {
val rhs = if (isIrrefutableGenFrom(gen)) gen.expr else makePatFilter(gen.expr, gen.pat)
Select(rhs, name)
}
enums match {
case (gen: GenFrom) :: Nil =>
Apply(rhsSelect(gen, mapName), makeLambda(gen.pat, body))
case (gen: GenFrom) :: (rest @ (GenFrom(_, _) :: _)) =>
val cont = makeFor(mapName, flatMapName, rest, body)
Apply(rhsSelect(gen, flatMapName), makeLambda(gen.pat, cont))
case (enum @ GenFrom(pat, rhs)) :: (rest @ GenAlias(_, _) :: _) =>
val (valeqs, rest1) = rest.span(_.isInstanceOf[GenAlias])
val pats = valeqs map { case GenAlias(pat, _) => pat }
val rhss = valeqs map { case GenAlias(_, rhs) => rhs }
val (defpat0, id0) = makeIdPat(pat)
val (defpats, ids) = (pats map makeIdPat).unzip
val pdefs = (defpats, rhss).zipped map (makePatDef(Modifiers(), _, _))
val rhs1 = makeFor(nme.map, nme.flatMap, GenFrom(defpat0, rhs) :: Nil, Block(pdefs, makeTuple(id0 :: ids)))
val allpats = pat :: pats
val vfrom1 = new IrrefutableGenFrom(makeTuple(allpats), rhs1)
makeFor(mapName, flatMapName, vfrom1 :: rest1, body)
case (gen: GenFrom) :: test :: rest =>
val filtered = Apply(rhsSelect(gen, nme.withFilter), makeLambda(gen.pat, test))
val genFrom =
if (isIrrefutableGenFrom(gen)) new IrrefutableGenFrom(gen.pat, filtered)
else GenFrom(gen.pat, filtered)
makeFor(mapName, flatMapName, genFrom :: rest, body)
case _ =>
EmptyTree //may happen for erroneous input
}
}
// begin desugar
tree match {
case SymbolLit(str) =>
Apply(
Select(ref(defn.SymbolClass.companionModule.termRef), nme.apply),
Literal(Constant(str)) :: Nil)
case InterpolatedString(id, strs, elems) =>
Apply(Select(Apply(Ident(nme.StringContext), strs), id), elems)
case InfixOp(l, op, r) =>
if (ctx.mode is Mode.Type)
if (op == tpnme.raw.AMP) AndTypeTree(l, r) // l & r
else if (op == tpnme.raw.BAR) OrTypeTree(l, r) // l | r
else AppliedTypeTree(Ident(op), l :: r :: Nil) // op[l, r]
else if (ctx.mode is Mode.Pattern)
Apply(Ident(op), l :: r :: Nil) // op(l, r)
else // l.op(r), or val x = r; l.op(x), plus handle named args specially
makeBinop(l, op, r)
case PostfixOp(t, op) =>
if ((ctx.mode is Mode.Type) && op == nme.raw.STAR) {
val seqClass = if (ctx.compilationUnit.isJava) defn.ArrayClass else defn.SeqClass
Annotated(
New(ref(defn.RepeatedAnnot.typeRef), Nil :: Nil),
AppliedTypeTree(ref(seqClass.typeRef), t))
} else {
assert(ctx.mode.isExpr || ctx.reporter.hasErrors, ctx.mode)
Select(t, op)
}
case PrefixOp(op, t) =>
Select(t, nme.UNARY_PREFIX ++ op)
case Parens(t) =>
t
case Tuple(ts) =>
if (unboxedPairs) {
def PairTypeTree(l: Tree, r: Tree) =
AppliedTypeTree(ref(defn.PairClass.typeRef), l :: r :: Nil)
if (ctx.mode is Mode.Type) ts.reduceRight(PairTypeTree)
else if (ts.isEmpty) unitLiteral
else ts.reduceRight(Pair(_, _))
}
else {
val arity = ts.length
def tupleClass = defn.TupleClass(arity)
if (arity > Definitions.MaxTupleArity) {
ctx.error(s"tuple too long (max allowed: ${Definitions.MaxTupleArity})", tree.pos)
unitLiteral
}
else if (arity == 1) ts.head
else if (ctx.mode is Mode.Type) AppliedTypeTree(ref(tupleClass.typeRef), ts)
else if (arity == 0) unitLiteral
else Apply(ref(tupleClass.companionModule.valRef), ts)
}
case WhileDo(cond, body) =>
// { <label> def while$(): Unit = if (cond) { body; while$() } ; while$() }
val call = Apply(Ident(nme.WHILE_PREFIX), Nil)
val rhs = If(cond, Block(body, call), unitLiteral)
labelDefAndCall(nme.WHILE_PREFIX, rhs, call)
case DoWhile(body, cond) =>
// { label def doWhile$(): Unit = { body; if (cond) doWhile$() } ; doWhile$() }
val call = Apply(Ident(nme.DO_WHILE_PREFIX), Nil)
val rhs = Block(body, If(cond, call, unitLiteral))
labelDefAndCall(nme.DO_WHILE_PREFIX, rhs, call)
case ForDo(enums, body) =>
makeFor(nme.foreach, nme.foreach, enums, body) orElse tree
case ForYield(enums, body) =>
makeFor(nme.map, nme.flatMap, enums, body) orElse tree
case PatDef(mods, pats, tpt, rhs) =>
val pats1 = if (tpt.isEmpty) pats else pats map (Typed(_, tpt))
flatTree(pats1 map (makePatDef(mods, _, rhs)))
case ParsedTry(body, handler, finalizer) =>
handler match {
case Match(EmptyTree, cases) => Try(body, cases, finalizer)
case EmptyTree => Try(body, Nil, finalizer)
case _ =>
Try(body,
List(CaseDef(Ident(nme.DEFAULT_EXCEPTION_NAME), EmptyTree, Apply(handler, Ident(nme.DEFAULT_EXCEPTION_NAME)))),
finalizer)
}
}
}.withPos(tree.pos)
/** Create a class definition with the same info as the refined type given by `parent`
* and `refinements`.
*
* parent { refinements }
* ==>
* trait <refinement> extends core { this: self => refinements }
*
* Here, `core` is the (possibly parameterized) class part of `parent`.
* If `parent` is the same as `core`, self is empty. Otherwise `self` is `parent`.
*
* Example: Given
*
* class C
* type T1 extends C { type T <: A }
*
* the refined type
*
* T1 { type T <: B }
*
* is expanded to
*
* trait <refinement> extends C { this: T1 => type T <: A }
*
* The result of this method is used for validity checking, is thrown away afterwards.
* @param parent The type of `parent`
*/
def refinedTypeToClass(parent: tpd.Tree, refinements: List[Tree])(implicit ctx: Context): TypeDef = {
def stripToCore(tp: Type): List[Type] = tp match {
case tp: RefinedType if tp.argInfos.nonEmpty => tp :: Nil // parameterized class type
case tp: TypeRef if tp.symbol.isClass => tp :: Nil // monomorphic class type
case tp: TypeProxy => stripToCore(tp.underlying)
case AndType(tp1, tp2) => stripToCore(tp1) ::: stripToCore(tp2)
case _ => defn.AnyType :: Nil
}
val parentCores = stripToCore(parent.tpe)
val untpdParent = TypedSplice(parent)
val (classParents, self) =
if (parentCores.length == 1 && (parent.tpe eq parentCores.head)) (untpdParent :: Nil, EmptyValDef)
else (parentCores map TypeTree, ValDef(nme.WILDCARD, untpdParent, EmptyTree))
val impl = Template(emptyConstructor, classParents, self, refinements)
TypeDef(tpnme.REFINE_CLASS, impl).withFlags(Trait)
}
/** If tree is a variable pattern, return its name and type, otherwise return None.
*/
private object VarPattern {
def unapply(tree: Tree)(implicit ctx: Context): Option[VarInfo] = tree match {
case id: Ident => Some(id, TypeTree())
case Typed(id: Ident, tpt) => Some((id, tpt))
case _ => None
}
}
/** Returns list of all pattern variables, possibly with their types,
* without duplicates
*/
private def getVariables(tree: Tree)(implicit ctx: Context): List[VarInfo] = {
val buf = new ListBuffer[VarInfo]
def seenName(name: Name) = buf exists (_._1.name == name)
def add(named: NameTree, t: Tree): Unit =
if (!seenName(named.name)) buf += ((named, t))
def collect(tree: Tree): Unit = tree match {
case Bind(nme.WILDCARD, _) =>
collect(tree)
case tree @ Bind(_, Typed(tree1, tpt)) if !mayBeTypePat(tpt) =>
add(tree, tpt)
collect(tree1)
case tree @ Bind(_, tree1) =>
add(tree, TypeTree())
collect(tree1)
case Typed(id: Ident, t) if isVarPattern(id) && id.name != nme.WILDCARD && !isWildcardStarArg(tree) =>
add(id, t)
case id: Ident if isVarPattern(id) && id.name != nme.WILDCARD =>
add(id, TypeTree())
case Apply(_, args) =>
args foreach collect
case Pair(left, right) =>
collect(left)
collect(right)
case Typed(expr, _) =>
collect(expr)
case NamedArg(_, arg) =>
collect(arg)
case SeqLiteral(elems) =>
elems foreach collect
case Alternative(trees) =>
for (tree <- trees; (vble, _) <- getVariables(tree))
ctx.error("illegal variable in pattern alternative", vble.pos)
case Annotated(annot, arg) =>
collect(arg)
case InterpolatedString(_, _, elems) =>
elems foreach collect
case InfixOp(left, _, right) =>
collect(left)
collect(right)
case PrefixOp(_, od) =>
collect(od)
case Parens(tree) =>
collect(tree)
case Tuple(trees) =>
trees foreach collect
case _ =>
}
collect(tree)
buf.toList
}
private class IrrefutableGenFrom(pat: Tree, expr: Tree) extends GenFrom(pat, expr)
}
|
yusuke2255/dotty
|
src/dotty/tools/dotc/ast/Desugar.scala
|
Scala
|
bsd-3-clause
| 39,350
|
package com.rasterfoundry.datamodel
final case class PageRequest(
offset: Long,
limit: Long,
sort: Map[String, Order]
)
|
raster-foundry/raster-foundry
|
app-backend/datamodel/src/main/scala/com/rasterfoundry/datamodel/PageRequest.scala
|
Scala
|
apache-2.0
| 133
|
package provingground.interface
import provingground._
import induction._
import monix.execution.Scheduler.Implicits.global
import monix.eval._
import scala.collection.mutable.ArrayBuffer
import HoTT.{Name => _, _}
import monix.execution.CancelableFuture
import math.max
import trepplein._
import LeanInterface._
import ujson.Arr
import scala.util.Try
import scala.collection.mutable
case class RecFoldException(
indMod: TermIndMod,
argFmlyExps: Vector[Expr],
recFn: Term,
argsFmlyTerm: Vector[Term],
vec: Vector[Term],
fail: ApplnFailException
) extends IllegalArgumentException(
s"Failure to fold recursive Function for ${indMod.name}, recursion function $recFn with error $fail"
)
object LeanParser {
val parseWork: mutable.Set[Expr] = mutable.Set()
def load(s: String = "basic"): LeanParser = {
val name = s"$s.lean.export"
val path = os.resource / name
val in = new java.io.ByteArrayInputStream(os.read.bytes(path))
val mods = provingground.interface.LeanInterface.getModsFromStream(in)
new LeanParser(mods)
}
def proofLift: (Term, Term) => Task[Term] = {
case (w: Typ[u], tp: Typ[v]) =>
Task.eval { (w.Var) :-> tp } // should be in all cases
case (w: FuncLike[u, v], tp: FuncLike[a, b]) if w.dom == tp.dom =>
val x = w.dom.Var
proofLift(w(x), tp(x.asInstanceOf[a]))
.map((g: Term) => x :~> (g: Term))
case _ => throw new Exception("could not lift proof")
}
def isPropnFn(e: Expr): Boolean = e match {
case Pi(_, t) => isPropnFn(t)
case Sort(l) => l == Level.Zero
case _ => false
}
def getValue(
t: Term,
n: Int,
accum: Vector[Term]
): Task[(Term, Vector[Term])] =
(t, n) match {
case (x, 0) => Task.eval(x -> accum)
case (l: LambdaLike[u, v], m) if m > 0 =>
getValue(l.value, m - 1, accum :+ l.variable)
case (fn: FuncLike[u, v], m) if m > 0 =>
val x = fn.dom.Var
getValue(fn(x), m - 1, accum :+ x)
case _ => throw new Exception("getValue failed")
}
def introsFold(ind: TermIndMod, p: Vector[Term]): Vector[Term] =
ind.intros.map((rule) => foldFuncLean(rule, p))
def getRec(ind: TermIndMod, argsFmlyTerm: Vector[Term]): Task[Term] =
ind match {
case smp: SimpleIndMod =>
getRecSimple(smp, Task.pure(argsFmlyTerm))
case indInd: IndexedIndMod =>
getRecIndexed(indInd, Task.pure(argsFmlyTerm))
}
def getExstInduc(
ind: TermIndMod,
argsFmlyTerm: Vector[Term]
): Task[ExstInducDefn] =
ind match {
case smp: SimpleIndMod =>
getSimpleExstInduc(smp, Task.pure(argsFmlyTerm))
case indInd: IndexedIndMod =>
getIndexedExstInduc(indInd, Task.pure(argsFmlyTerm))
}
def getSimpleExstInduc(
ind: SimpleIndMod,
argsFmlyTerm: Task[Vector[Term]]
): Task[ExstInducDefn] =
argsFmlyTerm.map { argsFmly =>
val params = argsFmly.init
val typ = toTyp(foldFuncLean(ind.typF, params))
val intros = introsFold(ind, params)
val str0 = ExstInducStrucs.get(typ, introsFold(ind, params))
val str = params.foldRight(str0) {
case (x, s) => ExstInducStrucs.LambdaInduc(x, s)
}
val dfn = ExstInducDefn(typ, intros.toVector, str)
dfn
}
def getRecSimple(
ind: SimpleIndMod,
argsFmlyTerm: Task[Vector[Term]]
): Task[Term] = {
def getInd(p: Vector[Term]) =
ConstructorSeqTL
.getExst(toTyp(foldFuncLean(ind.typF, p)), introsFold(ind, p))
.value
val newParamsTask = argsFmlyTerm map (_.init)
newParamsTask.flatMap { (newParams) =>
val indNew =
getInd(newParams)
val fmlyOpt = argsFmlyTerm map (_.last)
fmlyOpt map {
case l: LambdaLike[u, v] =>
l.value match {
case tp: Typ[u] =>
if (tp.dependsOn(l.variable))(indNew
.inducE((l.variable: Term) :-> (tp: Typ[u])))
else (indNew.recE(tp))
}
case fn: FuncLike[u, v] =>
val x = fn.dom.Var
val y = fn(x)
y match {
case tp: Typ[u] =>
if (tp.dependsOn(x)) {
(indNew.inducE((x: Term) :-> (tp: Typ[u])))
} else (indNew.recE(tp))
}
// case pt: PiDefn[u, v] if ind.isPropn && pt.domain == indNew.typ =>
// indNew.inducE(lmbda(pt.variable: Term)(pt.value))
case tp: Typ[u] if (ind.isPropn) =>
// val x = tp.Var
// if (tp.dependsOn(x)) {
// (indNew.inducE((x: Term) :-> (tp: Typ[u])))
// } else
(indNew.recE(tp))
}
}
}
def getIndexedExstInduc(
ind: IndexedIndMod,
argsFmlyTerm: Task[Vector[Term]]
): Task[ExstInducDefn] =
argsFmlyTerm.map { argsFmly =>
val params = argsFmly.init
val typF = foldFuncLean(ind.typF, params)
val intros = introsFold(ind, params)
val str0 = ExstInducStrucs.getIndexed(typF, introsFold(ind, params))
val str = params.foldRight(str0) {
case (x, s) => ExstInducStrucs.LambdaInduc(x, s)
}
val dfn = ExstInducDefn(ind.typF, intros.toVector, str)
dfn
}
def getRecIndexed(
ind: IndexedIndMod,
argsFmlyTerm: Task[Vector[Term]]
): Task[Term] = {
def getInd(p: Vector[Term]) =
TypFamilyExst
.getIndexedConstructorSeq(foldFuncLean(ind.typF, p), introsFold(ind, p))
.value
val newParamsTask = argsFmlyTerm map (_.init)
newParamsTask.flatMap { (newParams) =>
val indNew =
getInd(newParams)
val fmlOptRaw = argsFmlyTerm map (_.last)
val fmlOpt =
if (ind.isPropn)
fmlOptRaw.flatMap((fib) => proofLift(indNew.W, fib))
else
fmlOptRaw
val recOptTask =
for {
fml <- fmlOpt
codOpt = indNew.family.constFinalCod(fml)
} yield codOpt.map((cod) => indNew.recE(cod))
val inducTask =
fmlOpt.map((fib) => indNew.inducE(fib))
for {
recOpt <- recOptTask
induc <- inducTask
} yield recOpt.getOrElse(induc)
}
}
object RecIterAp {
def unapply(exp: Expr): Option[(Name, Vector[Expr])] = exp match {
case Const(Name.Str(prefix, "rec"), _) => Some((prefix, Vector()))
case App(fn, x) =>
unapply(fn).map { case (name, vec) => (name, vec :+ x) }
case _ => None
}
}
case class ParseException(
expVars: Vector[(Expr, Vector[Term])],
error: Exception
) extends Exception(error.toString) {
val exps: Vector[Expr] = expVars.map(_._1)
val vars: Vector[Term] = expVars.last._2
def apl: Option[(Expr, Expr, ApplnParseException)] =
(exps.head, error) match {
case (App(f, x), er: ApplnParseException) => Some((f, x, er))
case _ => None
}
def recfl: Option[RecFoldException] = error match {
case e: RecFoldException => Some(e)
case _ => None
}
}
class ApplnParseException(
val fe: Expr,
val ae: Expr,
func: Term,
arg: Term,
val vars: Vector[Term]
) extends ApplnFailException(func, arg)
case class LambdaFormException(variable: Term, value: Term, error: Throwable)
extends Exception(error.getMessage)
sealed trait Log
case class Defined(name: Name, term: Term) extends Log
case class DefinedInduc(name: Name, indMod: TermIndMod) extends Log
case class ParseWork(expr: Expr) extends Log
case class Parsed(expr: Expr) extends Log
trait Logger extends (Log => Unit) { logger =>
def &&(that: Logger): Logger = (l: Log) => {
logger(l); that(l)
}
}
object Logger {
def apply(f: Log => Unit): Logger = (l: Log) => f(l)
val nop: Logger = Logger((_) => ())
def dispatch(send: String => Unit): Logger =
Logger({
case LeanParser.Defined(name, _) => send(s"defined $name")
case LeanParser.DefinedInduc(name, _) =>
send(s"defined inductive $name")
case LeanParser.ParseWork(expr) =>
send(s"started parsing $expr; current queue : ${parseWork.size}")
case LeanParser.Parsed(expr) =>
send(s"finished parsing $expr; current queue : ${parseWork.size}")
})
}
def splitVec[A](
sizes: Vector[Int],
vec: Vector[A]
): (Vector[Vector[A]], Vector[A]) =
sizes match {
case Vector() => (Vector(), vec)
case n +: ms =>
val (head, tail) = vec.splitAt(n)
val (prev, residue) = splitVec(ms, tail)
(head +: prev, residue)
}
@annotation.tailrec
def shiftedName(n: Int, lastName: String = "'"): String =
if (n == 0) lastName
else shiftedName(n - 1, prefixedNextName(lastName))
def getNextVarName(vecs: Vector[Term], n: Int): String = {
val cleanVecs = vecs.filterNot(isWitness)
val lastName =
cleanVecs.headOption
.collect { case sym: Symbolic => sym.name.toString }
.getOrElse(shiftedName(n))
val newName = prefixedNextName(lastName)
if (vecs.headOption.exists(isWitness))
pprint.log(s"$vecs\\n$cleanVecs\\n$lastName; $newName")
newName
}
import TermJson._
def jsDef(parser: LeanParser): Arr = {
val jsDefs = parser.defnMap.toVector.map {
case (name, term) =>
ujson.Obj(
"name" -> ujson.Str(name.toString),
"term" -> termToJson(term).get
)
}
ujson.Arr(jsDefs: _*)
}
def jsTermIndMod(parser: LeanParser): Arr = {
val jsIndMods = parser.termIndModMap.toVector map {
case (name, tim) =>
ujson.Obj(
"name" -> ujson.Str(name.toString),
"num-params" -> ujson.Num(tim.numParams),
"is-propn" -> (if (tim.isPropn) ujson.True else ujson.False),
"intros" -> ujson.Arr(tim.intros.map(termToJson(_).get): _*)
)
}
ujson.Arr(jsIndMods: _*)
}
def toJs(parser: LeanParser) =
ujson.Obj(
"defns" -> jsDef(parser),
"indmods" -> jsTermIndMod(parser)
)
def apply(filename: String): LeanParser =
new LeanParser(LeanInterface.getMods(filename))
def applyFuncOptFold(
ft: Task[Option[Term]],
v: Vector[Option[Term]]
): Task[Option[Term]] =
v match {
case Vector() => ft
case xo +: ys =>
applyFuncOptFold(
ft.map(
(fo) => fo.flatMap((f) => xo.flatMap((x) => applyFuncOpt(f, x)))
),
ys
)
}
def applyFuncFold(ft: Task[Term], v: Vector[Term]): Task[Term] =
v match {
case Vector() => ft
case x +: ys =>
applyFuncFold(
ft.map(
(f) => applyFuncLean(f, x)
),
ys
)
}
}
class LeanParser(
initMods: Seq[Modification],
defTaskMap: Map[Name, Task[Term]] = Map(),
indTaskMap: Map[Name, Task[TermIndMod]] = Map(),
log: LeanParser.Logger = LeanParser.Logger.nop
) {
val mods: ArrayBuffer[Modification] = ArrayBuffer(initMods: _*)
def addMods(m: Seq[Modification]): Unit = mods ++= m
import LeanParser._
val defnMap: mutable.Map[Name, Term] = mutable.Map()
val termIndModMap: mutable.Map[Name, TermIndMod] = mutable.Map()
def update(): Unit = {
import library._
defnMap ++= LeanMemo.defMap
termIndModMap ++= LeanMemo.indMap
}
val parseMemo: mutable.Map[(Expr, Vector[Term]), Term] = mutable.Map()
def getMemTermIndMod(name: Name, exp: Expr): Task[TermIndMod] =
getTermIndMod(name)
.orElse(indModFromMod(name))
.getOrElse(
Task.raiseError(UnParsedException(exp))
)
def getNamed(name: Name): Option[Task[Term]] =
defTaskMap
.get(name)
.orElse(
defnMap.get(name).map((t) => Task.pure(t))
)
def getTermIndMod(name: Name): Option[Task[TermIndMod]] =
indTaskMap
.get(name)
.orElse(
termIndModMap.get(name).map((t) => Task.pure(t))
)
def recApp(
name: Name,
args: Vector[Expr],
exp: Expr,
vars: Vector[Term]
): Task[Term] =
for {
indMod <- getMemTermIndMod(name, exp)
(argsFmly, xs) = args.splitAt(indMod.numParams + 1)
argsFmlyTerm <- parseVec(argsFmly, vars).executeWithOptions(
_.enableAutoCancelableRunLoops
)
recFnT = getRec(indMod, argsFmlyTerm)
vec <- parseVec(xs, vars).executeWithOptions(
_.enableAutoCancelableRunLoops
)
vecInter = indMod.interleaveData(vec)
recFn <- recFnT
resT = Task(foldFuncLean(recFn, vecInter)).onErrorRecoverWith {
case err: ApplnFailException =>
throw RecFoldException(
indMod,
args,
recFn,
argsFmlyTerm,
vecInter,
err
)
}
res <- resT
} yield res
def getTask(name: String): Task[Term] =
parse(Const(Name(name.split("\\\\.").toIndexedSeq: _*), Vector()))
def getFut(name: String): CancelableFuture[Term] =
getTask(name).runToFuture
def get(name: String): Term = getTask(name).runSyncUnsafe()
def getTry(name: String): Try[Term] =
getTask(name).materialize.runSyncUnsafe()
def getError(name: String) : Option[ParseException] =
getTry(name).fold(
err => err match {
case pe : ParseException => Some(pe)
case _ => None
},
_ => None
)
def getIndTask(s: String): Task[TermIndMod] = {
val name = Name(s.split("\\\\.").toIndexedSeq: _*)
val exp = Const(name, Vector())
getMemTermIndMod(name, exp)
}
def getInd(s: String) = getIndTask(s).runSyncUnsafe()
def findDef(s: String): Option[DefMod] = {
val name = trepplein.Name(s.split("\\\\.").toIndexedSeq: _*)
mods.find(_.name == name).collect { case df: DefMod => df }
}
def findInd(s: String): Option[IndMod] = {
val name = trepplein.Name(s.split("\\\\.").toIndexedSeq: _*)
mods.find(_.name == name).collect { case df: IndMod => df }
}
def allNames(init: String = "") =
mods
.filter(_.name.toString.startsWith(init))
.map(_.name.toString)
def topNames(init: String = "") =
allNames(init).map(s => s.split("\\\\.").headOption).flatten.distinct
def parse(exp: Expr, vars: Vector[Term] = Vector()): Task[Term] = {
val memParsed = parseMemo.get(exp -> vars)
memParsed.map(Task.pure).getOrElse {
parseWork += exp
log(ParseWork(exp))
val resTask: Task[Term] = exp match {
case Const(name, _) =>
getNamed(name)
.orElse {
defFromMod(name)
}
.getOrElse(
Task.raiseError(UnParsedException(exp))
)
case Sort(Level.Zero) => Task.pure(Prop)
case Sort(_) => Task.pure(Type)
case Var(n) => Task.pure(vars(n))
case RecIterAp(name, args) =>
pprint.log(s"Seeking RecIterAp $name, $args, $vars")
pprint.log(s"${vars.headOption.map(isWitness)}")
recApp(name, args, exp, vars)
case App(f, a) =>
for {
func <- parse(f, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
arg <- parse(a, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
res = Try(applyFuncLean(func, arg))
.getOrElse(throw new ApplnParseException(f, a, func, arg, vars))
} yield res
case Lam(domain, body) =>
for {
domTerm <- parse(domain.ty, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
domTyp <- Task.eval(toTyp(domTerm))
x = getNextVarName(vars, maxIndex(body)) :: domTyp
value <- parse(body, x +: vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
} yield
value match {
case FormalAppln(fn, arg) if arg == x && fn.indepOf(x) =>
pprint.log(fn)
fn
// case y if domain.prettyName.toString == "_" => y
case _ =>
// if (value.typ.dependsOn(x)) LambdaTerm(x, value)
// else LambdaFixed(x, value)
Try(lambda(x)(value)).fold(err => {
pprint.log(value)
pprint.log(x)
pprint.log(x.typ)
throw LambdaFormException(x, value, err)
}, res => res)
}
case Pi(domain, body) =>
for {
domTerm <- parse(domain.ty, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
domTyp <- Task
.eval(toTyp(domTerm))
.executeWithOptions(_.enableAutoCancelableRunLoops)
x = getNextVarName(vars, maxIndex(body)) :: domTyp
value <- parse(body, x +: vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
cod <- Task.eval(toTyp(value))
} yield
if (LeanInterface.usesVar(body, 0)) piDefn(x)(cod)
else x.typ ->: cod
case Let(domain, value, body) =>
for {
domTerm <- parse(domain.ty, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
domTyp <- Task.eval(toTyp(domTerm))
x = getNextVarName(vars, maxIndex(body)) :: domTyp
valueTerm <- parse(value, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
bodyTerm <- parse(body, x +: vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
} yield bodyTerm.replace(x, valueTerm)
case e => Task.raiseError(UnParsedException(e))
}
for {
res <- resTask
_ = {
parseWork -= exp
log(Parsed(exp))
}
} yield res
// if (isPropFmly(res.typ)) "_" :: (res.typ) else res
}
}.onErrorRecoverWith {
case pe: ParseException =>
Task.raiseError(ParseException(pe.expVars :+ (exp -> vars), pe.error))
case error: Exception =>
Task.raiseError(ParseException(Vector(exp -> vars), error))
}
def parseVec(vec: Vector[Expr], vars: Vector[Term]): Task[Vector[Term]] =
vec match {
case Vector() => Task.pure(Vector())
case x +: ys =>
for {
head <- parse(x, vars).executeWithOptions(
_.enableAutoCancelableRunLoops
)
tail <- parseVec(ys, vars).executeWithOptions(
_.enableAutoCancelableRunLoops
)
} yield (head +: tail)
}
def parseOptVec(
vec: Vector[(Expr, Int)],
vars: Vector[Term],
indices: Set[Int]
): Task[Vector[Option[Term]]] =
vec match {
case Vector() => Task.pure(Vector())
case (x, m) +: ys =>
for {
tail <- parseOptVec(ys, vars, indices).executeWithOptions(
_.enableAutoCancelableRunLoops
)
headOpt <- if (indices.contains(m))
parse(x, vars)
.executeWithOptions(_.enableAutoCancelableRunLoops)
.map(Option(_))
else Task.pure(None)
} yield (headOpt +: tail)
}
def withDefn(name: Name, exp: Expr): Task[Unit] =
for {
term <- parse(exp, Vector())
.executeWithOptions(_.enableAutoCancelableRunLoops)
_ = {
pprint.log(s"Defined $name"); log(Defined(name, term))
defnMap += name -> term
}
} yield ()
def withAxiom(name: Name, ty: Expr): Task[Unit] =
for {
typ <- parse(ty, Vector())
.executeWithOptions(_.enableAutoCancelableRunLoops)
term = (name.toString) :: toTyp(typ)
_ = {
pprint.log(s"Defined $name"); log(Defined(name, term))
defnMap += name -> term
}
} yield ()
def withAxiomSeq(axs: Vector[(Name, Expr)]): Task[Unit] =
axs match {
case Vector() => Task(())
case (name, ty) +: ys =>
for {
_ <- withAxiom(name, ty)
_ <- withAxiomSeq(ys)
} yield ()
}
// Like withAxiomSeq but returns the axioms
def foldAxiomSeq(
accum: Vector[Term],
axs: Vector[(Name, Expr)]
): Task[Vector[Term]] = axs match {
case Vector() =>
Task(accum)
case (name, ty) +: ys =>
for {
typ <- parse(ty, Vector())
.executeWithOptions(_.enableAutoCancelableRunLoops)
// (typ, ltm1) = pr
term = (name.toString) :: toTyp(typ)
_ = {
pprint.log(s"Defined $name"); log(Defined(name, term))
defnMap += name -> term
}
res <- foldAxiomSeq(term +: accum, ys)
} yield res
}
def withMod(mod: Modification): Task[Unit] = mod match {
case ind: IndMod =>
val isPropn = isPropnFn(ind.ty)
val name = ind.name
for {
indTypTerm <- parse(ind.ty, Vector())
.executeWithOptions(_.enableAutoCancelableRunLoops)
// (indTypTerm, ltm1) = pr
indTyp = toTyp(indTypTerm)
typF = name.toString :: indTyp
_ = {
pprint.log(s"Defined $name"); log(Defined(name, typF))
defnMap += name -> typF
}
intros <- foldAxiomSeq(Vector(), ind.intros).map(_.reverse)
// (intros, withIntros) = introsPair
typValuePair <- getValue(typF, ind.numParams, Vector())
indMod = typValuePair match {
case (_: Typ[Term], params) =>
SimpleIndMod(ind.name, typF, intros, params.size, isPropn)
case (_, params) =>
IndexedIndMod(ind.name, typF, intros, params.size, isPropn)
}
_ = {
log(DefinedInduc(name, indMod)); termIndModMap += ind.name -> indMod
}
} yield ()
case ax: AxiomMod =>
withAxiom(ax.name, ax.ty)
case df: DefMod =>
withDefn(df.name, df.value)
case QuotMod =>
import quotient._
val axs = Vector(quot, quotLift, quotMk, quotInd).map { (ax) =>
(ax.name, ax.ty)
}
withAxiomSeq(axs)
}
val allIntros: ArrayBuffer[(Name, Expr)] = mods.collect {
case ind: IndMod => ind.intros
}.flatten
def findDefMod(name: Name): Option[DefMod] =
mods.collectFirst {
case dm: DefMod if dm.name == name => dm
}
def findIndMod(name: Name): Option[IndMod] =
mods collectFirst {
case dm: IndMod if dm.name == name => dm
}
def findIntro(name: Name): Option[Expr] =
allIntros.find(_._1 == name).map(_._2)
def findRecChildren(name: Name): Option[Vector[Expr]] =
name match {
case Name.Str(prefix, "rec") =>
findIndMod(prefix).map((ind) => ind.ty +: ind.intros.map(_._2))
case _ => None
}
def findChildren(name: Name): Option[Vector[Expr]] =
findDefMod(name)
.map((dm) => Vector(dm.ty, dm.value))
.orElse(findIndMod(name).map((ind) => Vector(ind.ty)))
.orElse(findIntro(name).map((exp: Expr) => Vector(exp)))
.orElse(findRecChildren(name))
def maxIndex(exp: Expr): Int = exp match {
case Sort(_) => 0
case Var(_) => 0
case App(f, x) => max(maxIndex(f), maxIndex(x))
case LocalConst(_, _) => 0
case Lam(domain, body) => max(maxIndex(domain.ty) + 1, maxIndex(body) + 1)
case Pi(domain, body) => max(maxIndex(domain.ty) + 1, maxIndex(body) + 1)
case Let(domain, value, body) =>
Vector(maxIndex(domain.ty), maxIndex(value), maxIndex(body) + 1).max
case Const(name @ Name.Str(_, "rec"), _) =>
findRecChildren(name)
.map((v) => v.map(maxIndex).max * 2 + 1)
.getOrElse(throw new Exception(s"could not find name $name"))
case Const(name, _) =>
findChildren(name)
.map((v) => v.map(maxIndex).max)
.getOrElse(throw new Exception(s"could not find name $name"))
}
def modNames(mod: Modification): Vector[Name] = mod match {
case ind: IndMod =>
ind.name +: (ind.intros.map(_._1))
case QuotMod =>
import quotient._
Vector(quot, quotLift, quotMk, quotInd).map(_.name)
case _ =>
Vector(mod.name)
}
def findMod(name: Name, mods: Seq[Modification]): Option[Modification] =
mods.find((mod) => modNames(mod).contains(name))
def defFromMod(name: Name): Option[Task[Term]] =
findMod(name, mods.toVector).map { (mod) =>
// pprint.log(s"Using ${mod.name}")
for {
_ <- withMod(mod)
} yield (defnMap(name))
}
def indModFromMod(name: Name): Option[Task[TermIndMod]] =
findMod(name, mods.toVector).map { (mod) =>
// pprint.log(s"Using ${mod.name}")
for {
_ <- withMod(mod)
} yield (termIndModMap(name))
}
// code generation
def defNames: ArrayBuffer[Name] = mods.collect { case df: DefMod => df.name }
def allIndNames: ArrayBuffer[Name] = mods.collect {
case ind: IndMod => ind.name
}
import translation.CodeGen
def codeGen: CodeGen =
CodeGen.objNames(defNames.map(_.toString).toVector, allIndNames.map(_.toString).toVector)
def defnCode: ArrayBuffer[(Name, meta.Term)] =
defNames.flatMap { (name) =>
for {
term <- defnMap.get(name)
code <- codeGen(term)
} yield (name, code)
}
def codeFromInd(ind: TermIndMod): meta.Term = {
val p = getVariables(ind.numParams)(ind.typF).toVector
val codeOpt =
ind match {
case mod: SimpleIndMod =>
val seq =
ConstructorSeqTL
.getExst(toTyp(foldFuncLean(mod.typF, p)), introsFold(mod, p))
.value
codeGen.consSeq(seq)
case mod: IndexedIndMod =>
val indSeq =
TypFamilyExst
.getIndexedConstructorSeq(
foldFuncLean(mod.typF, p),
introsFold(mod, p)
)
.value
codeGen.indexedConsSeqDom(indSeq)
}
val cp = p.map(codeGen(_).get)
cp.foldRight(codeOpt.get) {
case (x, y) =>
meta.Term.Apply(
meta.Term.Select(meta.Term.Name("Subst"), meta.Term.Name("Lambda")),
List(x, y)
)
// q"Subst.Lambda($x, $y)"
}
}
}
|
siddhartha-gadgil/ProvingGround
|
mantle/src/main/scala/provingground/interface/LeanParser.scala
|
Scala
|
mit
| 26,002
|
/**
* (c) Copyright 2012 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.input
import java.io.Closeable
abstract class InputSource extends Closeable {
/**
* Read a line of input from the underlying source and return it,
* or return None if no more input is available.
*/
def readLine(prompt: String): Option[String]
/** Close any underlying resources. */
def close(): Unit
}
|
alexandre-normand/kiji-schema-shell
|
src/main/scala/org/kiji/schema/shell/input/InputSource.scala
|
Scala
|
apache-2.0
| 1,074
|
package com.norbitltd.spoiwo.natures.csv
import com.norbitltd.spoiwo.utils.FileUtils
import com.norbitltd.spoiwo.model._
import org.joda.time.LocalDate
object Model2CsvConversions {
implicit class CsvWorkbook(wb : Workbook) {
/**
* Converts the defined workbook into the sheet name -> csv content map for all of the sheets.
* @return A sheet name -> CSV content map for each of the sheets
*/
def convertAsCsv(properties : CsvProperties = CsvProperties.Default) : Map[String, String] =
convertWorkbookToCsv(wb, properties)
def saveAsCsv(fileName : String, properties : CsvProperties = CsvProperties.Default) {
val convertedCsvData = convertAsCsv(properties)
if( wb.sheets.size <= 1 ) {
convertedCsvData.values.foreach(csvContent => FileUtils.write(fileName, csvContent))
} else {
val fileNameCore = fileName.replace(".csv", "").replace(".CSV", "")
convertedCsvData.foreach { case( sheetName, csvContent) =>
val sheetFileName = fileNameCore + "_" + sheetName + ".csv"
FileUtils.write(sheetFileName, csvContent)
}
}
}
}
implicit class CsvSheet(s : Sheet) {
def convertAsCsv(properties : CsvProperties = CsvProperties.Default) : String =
convertSheetToCsv(s, properties)._2
def saveAsCsv(fileName : String, properties : CsvProperties = CsvProperties.Default) {
Workbook(s).saveAsCsv(fileName, properties)
}
}
private def convertWorkbookToCsv(wb : Workbook, properties : CsvProperties = CsvProperties.Default) : Map[String, String] = {
require(wb.sheets.size <= 1 || wb.sheets.forall(_.name.isDefined),
"When converting workbook with multiple sheets to CSV format it is required to specify the unique name for each of them!")
wb.sheets.map(s => convertSheetToCsv(s, properties)).toMap
}
private def convertSheetToCsv(s : Sheet, properties : CsvProperties = CsvProperties.Default) : (String, String) = {
s.name.getOrElse("") -> s.rows.map(r => convertRowToCsv(r, properties)).mkString("\n")
}
private def convertRowToCsv(r : Row, properties : CsvProperties = CsvProperties.Default) : String =
r.cells.map(c => convertCellToCsv(c, properties)).mkString(properties.separator)
private def convertCellToCsv(c : Cell, properties : CsvProperties) : String = c match {
case x : StringCell => x.value
case x : NumericCell => x.value.toString
case x : BooleanCell => if(x.value) properties.defaultBooleanTrueString else properties.defaultBooleanFalseString
case x : DateCell => LocalDate.fromDateFields(x.value).toString(properties.defaultDateFormat)
case x : CalendarCell => LocalDate.fromCalendarFields(x.value).toString(properties.defaultDateFormat)
case x : FormulaCell => throw new IllegalArgumentException("Use of formulas not allowed when converting to CSV format!")
}
}
|
intracer/spoiwo
|
src/main/scala/com/norbitltd/spoiwo/natures/csv/Model2CsvConversions.scala
|
Scala
|
mit
| 2,880
|
package net.scalytica.symbiotic.components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.TagMod
import japgolly.scalajs.react.vdom.prefix_<^._
object Modal {
case class Props(
id: String,
header: Option[String] = None,
ariaLabel: String = "",
body: TagMod,
footer: Option[TagMod] = None
)
val component = ReactComponentB[Props]("Modal").stateless.render_P { p =>
<.div(
^.id := p.id,
^.className := "modal fade",
^.tabIndex := "-1",
^.role := "dialog",
"aria-labelledby".reactAttr := p.ariaLabel
)(
<.div(^.className := "modal-dialog", ^.role := "document")(
<.div(^.className := "modal-content")(
p.header
.map(
h =>
<.div(^.className := "modal-header")(
<.button(
^.`type` := "button",
^.className := "close",
"data-dismiss".reactAttr := "modal",
"aria-label".reactAttr := "Close"
)(
<.span(
"aria-hidden".reactAttr := "true",
<.i(^.className := "fa fa-times")
)
),
<.h4(^.id := s"${p.id}-label", ^.className := "modal-title")(
h
)
)
)
.getOrElse(EmptyTag),
<.div(^.className := "modal-body")(p.body),
p.footer
.map(ft => <.div(^.className := "modal-footer")(ft))
.getOrElse(EmptyTag)
)
)
)
}.build
def apply(
id: String,
header: Option[String] = None,
ariaLabel: String = "",
body: TagMod,
footer: Option[TagMod] = None
) = component(Props(id, header, ariaLabel, body, footer))
}
|
kpmeen/symbiotic
|
examples/symbiotic-client/src/main/scala/net/scalytica/symbiotic/components/Modal.scala
|
Scala
|
apache-2.0
| 1,859
|
package org.kanyec.ast
import org.objectweb.asm.MethodVisitor
import org.objectweb.asm.Opcodes._
import org.kanyec.SymbolTable
case class DivisionExpressionNode(expression: AstNode ,operand: AstNode ) extends AstNode{
def generate(mv: MethodVisitor, symbolTable: SymbolTable) {
expression.generate(mv, symbolTable)
operand.generate(mv, symbolTable)
mv.visitInsn(IDIV)
}
}
|
nicksam112/kanyec
|
src/main/scala/org/kanyec/ast/DivisionExpressionNode.scala
|
Scala
|
apache-2.0
| 390
|
package io.pivotal.gemfire.spark.connector.internal
import io.pivotal.gemfire.spark.connector.GemFireConnectionConf
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSuite, Matchers}
class DefaultGemFireConnectionManagerTest extends FunSuite with Matchers with MockitoSugar {
test("DefaultGemFireConnectionFactory get/closeConnection") {
// note: connConf 1-4 share the same set of locators
val connConf1 = new GemFireConnectionConf(Seq(("host1", 1234)))
val connConf2 = new GemFireConnectionConf(Seq(("host2", 5678)))
val connConf3 = new GemFireConnectionConf(Seq(("host1", 1234), ("host2", 5678)))
val connConf4 = new GemFireConnectionConf(Seq(("host2", 5678), ("host1", 1234)))
val connConf5 = new GemFireConnectionConf(Seq(("host5", 3333)))
val props: Map[String, String] = Map.empty
val mockConnFactory: DefaultGemFireConnectionFactory = mock[DefaultGemFireConnectionFactory]
val mockConn1 = mock[DefaultGemFireConnection]
val mockConn2 = mock[DefaultGemFireConnection]
when(mockConnFactory.newConnection(connConf3.locators, props)).thenReturn(mockConn1)
when(mockConnFactory.newConnection(connConf5.locators, props)).thenReturn(mockConn2)
assert(DefaultGemFireConnectionManager.getConnection(connConf3)(mockConnFactory) == mockConn1)
// note: following 3 lines do not trigger connFactory.newConnection(...)
assert(DefaultGemFireConnectionManager.getConnection(connConf1)(mockConnFactory) == mockConn1)
assert(DefaultGemFireConnectionManager.getConnection(connConf2)(mockConnFactory) == mockConn1)
assert(DefaultGemFireConnectionManager.getConnection(connConf4)(mockConnFactory) == mockConn1)
assert(DefaultGemFireConnectionManager.getConnection(connConf5)(mockConnFactory) == mockConn2)
// connFactory.newConnection(...) were invoked only twice
verify(mockConnFactory, times(1)).newConnection(connConf3.locators, props)
verify(mockConnFactory, times(1)).newConnection(connConf5.locators, props)
assert(DefaultGemFireConnectionManager.connections.size == 3)
DefaultGemFireConnectionManager.closeConnection(connConf1)
assert(DefaultGemFireConnectionManager.connections.size == 1)
DefaultGemFireConnectionManager.closeConnection(connConf5)
assert(DefaultGemFireConnectionManager.connections.isEmpty)
}
test("DefaultGemFireConnectionFactory newConnection(...) throws RuntimeException") {
val connConf1 = new GemFireConnectionConf(Seq(("host1", 1234)))
val props: Map[String, String] = Map.empty
val mockConnFactory: DefaultGemFireConnectionFactory = mock[DefaultGemFireConnectionFactory]
when(mockConnFactory.newConnection(connConf1.locators, props)).thenThrow(new RuntimeException())
intercept[RuntimeException] { DefaultGemFireConnectionManager.getConnection(connConf1)(mockConnFactory) }
verify(mockConnFactory, times(1)).newConnection(connConf1.locators, props)
}
test("DefaultGemFireConnectionFactory close() w/ non-exist connection") {
val props: Map[String, String] = Map.empty
val mockConnFactory: DefaultGemFireConnectionFactory = mock[DefaultGemFireConnectionFactory]
val connConf1 = new GemFireConnectionConf(Seq(("host1", 1234)))
val connConf2 = new GemFireConnectionConf(Seq(("host2", 5678)))
val mockConn1 = mock[DefaultGemFireConnection]
when(mockConnFactory.newConnection(connConf1.locators, props)).thenReturn(mockConn1)
assert(DefaultGemFireConnectionManager.getConnection(connConf1)(mockConnFactory) == mockConn1)
assert(DefaultGemFireConnectionManager.connections.size == 1)
// connection does not exists in the connection manager
DefaultGemFireConnectionManager.closeConnection(connConf2)
assert(DefaultGemFireConnectionManager.connections.size == 1)
}
}
|
ysung-pivotal/incubator-geode
|
gemfire-spark-connector/gemfire-spark-connector/src/test/scala/io/pivotal/gemfire/spark/connector/internal/DefaultGemFireConnectionManagerTest.scala
|
Scala
|
apache-2.0
| 3,818
|
package com.codacy.client.stash.client.auth
import scalaj.http.HttpRequest
class TokenAuthenticator(token: String) extends Authenticator {
override def withAuthentication(request: HttpRequest): HttpRequest =
request.header("Authorization", s"Bearer $token")
}
|
codacy/stash-scala-client
|
src/main/scala/com/codacy/client/stash/client/auth/TokenAuthenticator.scala
|
Scala
|
apache-2.0
| 268
|
package com.github.mdr.graphospasm.grapheditor.figure
import org.eclipse.draw2d.MarginBorder
import org.eclipse.draw2d.FreeformLayout
import org.eclipse.draw2d.FreeformLayer
class GraphDiagramFigure extends FreeformLayer {
setLayoutManager(new FreeformLayout)
setBorder(new MarginBorder(5))
}
|
mdr/graphospasm
|
com.github.mdr.graphospasm.grapheditor/src/main/scala/com/github/mdr/graphospasm/grapheditor/figure/GraphDiagramFigure.scala
|
Scala
|
mit
| 300
|
package com.github.yoskhdia.sqscala
import com.amazonaws.services.sqs.model.QueueDoesNotExistException
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import org.specs2.specification.core.Env
import scala.util.Try
import scala.concurrent.duration._
class SqsClientOperations(specEnv: Env) extends Specification with BeforeAfterAll {
private[this] var client: SqsClient = _
override def beforeAll(): Unit = client = ConfiguredSqsClient("aws.sqs")
override def afterAll(): Unit = client.shutdown()
val unit = ()
"create queue by client.queue method" in { implicit ee: ExecutionEnv =>
val queueName = QueueName("test1")
Try(client.queue(queueName, createIfNotExists = true)) must beSuccessfulTry
client.deleteQueue(queueName) must be_==(unit).await
}
"create queue by client.createQueue method" in { implicit ee: ExecutionEnv =>
val queueName = QueueName("test2")
client.createQueue(queueName) must be_==(unit).awaitFor(2.seconds) // sometimes create queue is slow.
client.deleteQueue(queueName) must be_==(unit).await
}
"get queue is failed when queue is not created yet and createIfNotExists = false" in { implicit ee: ExecutionEnv =>
val queueName = QueueName("test3")
client.queue(queueName, createIfNotExists = false) must throwA[QueueDoesNotExistException]
client.queue(queueName) must throwA[QueueDoesNotExistException]
}
}
|
yoskhdia/sqscala
|
src/test/scala/com/github/yoskhdia/sqscala/SqsClientOperations.scala
|
Scala
|
mit
| 1,475
|
package cc.factorie.app.nlp
import org.scalatest._
import cc.factorie.app.nlp.pos.{PennPosTag, PennPosDomain}
import cc.factorie.app.nlp.parse._
/** Test serialization of Document to BSON.
@author John Sullivan, Andrew McCallum
*/
class TestDocumentStore extends FlatSpec with Matchers {
def fix = new {
val doc1 = new Document("If it's your job to eat a frog, it's best to do it first thing in the morning. And If it's your job to eat two frogs, it's best to eat the biggest one first.")
DocumentAnnotatorPipeline(segment.DeterministicNormalizingTokenizer, segment.DeterministicSentenceSegmenter).process(doc1)
for (token <- doc1.tokens) token.attr += new PennPosTag(token, token.positionInSentence % PennPosDomain.size)
for (sentence <- doc1.sentences) sentence.attr += new ParseTree(sentence, Range(0, sentence.length).toArray, Range(0, sentence.length).map(_ % ParseTreeLabelDomain.length).toArray)
doc1.annotators(classOf[PennPosTag]) = this.getClass
doc1.annotators(classOf[ParseTree]) = this.getClass
}
"DocumentCubbie" should "serialize and deserialize properly" in {
val f = fix
import f._
val cubbie = new StandardDocumentCubbie() := doc1
val doc2 = cubbie.document
assert(doc1.tokens.toSeq.map(_.string) == doc2.tokens.toSeq.map(_.string))
assert(doc1.tokens.toSeq.map(_.posTag.categoryValue) == doc2.tokens.toSeq.map(_.posTag.categoryValue))
}
/*
it should "preserve document annotation metadata" in {
val f = fix
import f._
val cubbie = new StandardDocumentCubbie() := doc1
val doc2 = cubbie.document
assert(doc1.annotators.keySet == doc2.annotators.keySet)
}
*/
}
|
hlin117/factorie
|
src/test/scala/cc/factorie/app/nlp/TestDocumentStore.scala
|
Scala
|
apache-2.0
| 1,676
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.net.{URLClassLoader, URL}
import java.util.Enumeration
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
/**
* URL class loader that exposes the `addURL` and `getURLs` methods in URLClassLoader.
*在URLClassLoader中显示“addURL”和“getURLs”方法的URL类加载器
* URLClassLoader是ClassLoader的子类,它用于从指向JAR文件和目录的URL的搜索路径加载类和资源
* 也就是说,通过URLClassLoader就可以加载指定jar中的class到内存中。
*/
private[spark] class MutableURLClassLoader(urls: Array[URL], parent: ClassLoader)
extends URLClassLoader(urls, parent) {
override def addURL(url: URL): Unit = {
super.addURL(url)
}
override def getURLs(): Array[URL] = {
super.getURLs()
}
}
/**
* A mutable class loader that gives preference to its own URLs over the parent class loader
* when loading classes and resources.
* 一个可变类加载器,当加载类和资源时,可以通过父类加载器优先选择自己的URL.
*/
private[spark] class ChildFirstURLClassLoader(urls: Array[URL], parent: ClassLoader)
extends MutableURLClassLoader(urls, null) {
private val parentClassLoader = new ParentClassLoader(parent)
/**
* Used to implement fine-grained class loading locks similar to what is done by Java 7. This
* prevents deadlock issues when using non-hierarchical class loaders.
* 用于实现类似于Java 7所做的细粒度类加载锁定防止在使用非层次化类加载器时出现死锁问题。
*
* Note that due to some issues with implementing class loaders in
* Scala, Java 7's `ClassLoader.registerAsParallelCapable` method is not called.
* 请注意,由于实现类加载器的一些问题Scala,Java 7的`ClassLoader.registerAsParallelCapable`方法不被调用
*/
private val locks = new ConcurrentHashMap[String, Object]()
override def loadClass(name: String, resolve: Boolean): Class[_] = {
var lock = locks.get(name)
if (lock == null) {
val newLock = new Object()
lock = locks.putIfAbsent(name, newLock)
if (lock == null) {
lock = newLock
}
}
lock.synchronized {
try {
super.loadClass(name, resolve)
} catch {
case e: ClassNotFoundException =>
parentClassLoader.loadClass(name, resolve)
}
}
}
override def getResource(name: String): URL = {
val url = super.findResource(name)
val res = if (url != null) url else parentClassLoader.getResource(name)
res
}
override def getResources(name: String): Enumeration[URL] = {
val urls = super.findResources(name)
val res =
if (urls != null && urls.hasMoreElements()) {
urls
} else {
parentClassLoader.getResources(name)
}
res
}
override def addURL(url: URL) {
super.addURL(url)
}
}
|
tophua/spark1.52
|
core/src/main/scala/org/apache/spark/util/MutableURLClassLoader.scala
|
Scala
|
apache-2.0
| 3,722
|
package de.dominicscheurer.fsautils {
import Types._
import Conversions._
import RegularExpressions._
abstract class FSM {
def isDFA = this.isInstanceOf[DFA]
def isNFA = this.isInstanceOf[NFA]
def asDFA: Option[DFA] =
if (isDFA)
Some(this.asInstanceOf[DFA])
else
None
def asNFA: Option[NFA] =
if (isNFA)
Some(this.asInstanceOf[NFA])
else
None
def toXml: scala.xml.Elem
def toPrettyXml: String = {
val printer = new scala.xml.PrettyPrinter(80, 2)
printer.format(toXml)
}
def toStringUpToDelta(
indentBeginner: String,
indentSpace: String,
alphabetDesignator: String,
alphabet: Set[Letter],
statesDesignator: String,
states: Set[State],
initialStateDesignator: String,
initialState: State,
acceptingDesignator: String,
accepting: Set[State]): String = {
val indent = indentBeginner + indentSpace
val dindent = indent + indentSpace
var sb = new StringBuilder()
sb ++= indent ++= alphabetDesignator ++= " = {"
alphabet.foreach(s => sb ++= s.name ++= ",")
sb = sb.dropRight(1 - alphabet.isEmpty)
sb ++= "}\n"
sb ++= indent ++= statesDesignator ++= " = {"
states.foreach(s => sb ++= s.toString() ++= ",")
sb = sb.dropRight(1 - states.isEmpty)
sb ++= "}\n"
sb ++= indent ++= initialStateDesignator ++= " = " ++= initialState.toString ++= "\n"
sb ++= indent ++= acceptingDesignator ++= " = {"
accepting.foreach(s => sb ++= s.toString() ++= ",")
sb = sb.dropRight(1 - accepting.isEmpty)
sb ++= "}\n"
sb.toString
}
def accepts(word: String): Boolean
def accepts(word: Word): Boolean
def accepts(word: Word, fromState: State): Boolean
def extendAlphabet(newLetters: Set[Letter]): NFA
def adjustAlphabet(other: FSM): NFA =
if (other.isDFA)
extendAlphabet(other.asDFA.get.alphabet)
else
extendAlphabet(other.asNFA.get.alphabet)
def unary_! : FSM
def * : FSM
def ++(other: DFA): FSM
def ++(other: NFA): FSM
def ++(other: FSM): FSM =
if (other isDFA)
this ++ other.asDFA.get
else
this ++ other.asNFA.get
def &(other: DFA): FSM
def &(other: NFA): FSM
def &(other: FSM): FSM =
if (other isDFA)
this & other.asDFA.get
else
this & other.asNFA.get
def |(other: DFA): FSM
def |(other: NFA): FSM
def |(other: FSM): FSM =
if (other isDFA)
this | other.asDFA.get
else
this | other.asNFA.get
def \(other: DFA): FSM
def \(other: NFA): FSM
def \(other: FSM): FSM =
if (other isDFA)
this \ other.asDFA.get
else
this \ other.asNFA.get
def ==(other: DFA): Boolean
def ==(other: NFA): Boolean
def ==(other: FSM): Boolean =
if (other isDFA)
this == other.asDFA.get
else
this == other.asNFA.get
def isEmpty: Boolean
def toRegExp: RE
}
}
|
rindPHI/FSAUtils
|
src/de/dominicscheurer/fsautils/FSM.scala
|
Scala
|
mit
| 3,617
|
package nl.htm.importer
package swordfish
import nl.htm.importer._
import org.apache.poi.ss.usermodel.WorkbookFactory
import java.io.InputStream
import org.apache.poi.ss.usermodel.Cell
import org.apache.poi.ss.usermodel.Row
import org.apache.poi.ss.usermodel.Sheet
case class SwordfishExcelSettings(in: InputStream, countries: List[(String, String)])
object Swordfish2013ExcelImporter extends Importer[SwordfishExcelSettings] {
import Swordfish2013Importer._
implicit def cellToString(cell: Cell): String = if (cell == null) "" else cell.getStringCellValue()
implicit def cellToInt(cell: Cell): Int = cell.getNumericCellValue().toInt
def doImport(settings: SwordfishExcelSettings): EventData = {
val workbook = WorkbookFactory.create(settings.in)
val total = workbook.getSheet("Total")
val headerRow = total.getRow(0)
val header = Map((for (i <- 0 to headerRow.getLastCellNum() - 1) yield (headerRow.getCell(i).getStringCellValue(), i)): _*)
val tournaments = Swordfish2013Importer.tournamentNames.map { case (id, (name, mnemonic)) => Tournament(id, name, mnemonic, "swordfish-2013-" + (if (id == "rapier") "rapier" else "default")) }
println(total.getLastRowNum())
println(settings.countries)
println("Importing participants")
val participants = for (rowIndex <- 1 to total.getLastRowNum() if total.getRow(rowIndex).getCell(0) != null) yield {
val row = total.getRow(rowIndex)
val countryNameRaw = row.getCell(header("Country")) match {
case cell: Cell => cell.getStringCellValue()
case _ => ""
}
val countryName = Swordfish2013Importer.countryReplacements.getOrElse(countryNameRaw, countryNameRaw)
println(countryName)
val country = settings.countries.find { case (_, name) => countryName == name }.map(_._1).getOrElse("")
val (clubCode, clubName) = normalizeClub(row.getCell(header("Club")))
Participant(
List(SourceId("swordfish2013", row.getCell(header("ID")).getNumericCellValue().toInt.toString)),
normalizeName(row.getCell(header("Name"))),
shortenName(normalizeName(row.getCell(header("Name")))),
clubName,
clubCode,
country,
row.getCell(header("T-Shirt")))
}
val subscriptions = tournaments.flatMap {
case t @ Tournament(_, name, _, _) =>
println("Importing tournament " + name)
val sheet = workbook.getSheet(name)
if (sheet != null) {
val poolFighterNumbers = findPoolFighterNumbers(sheet)
val subscriptions = for (rowIndex <- 2 to sheet.getLastRowNum() if sheet.getRow(rowIndex).getCell(0) != null && sheet.getRow(rowIndex).getCell(1) != null && sheet.getRow(rowIndex).getCell(5) != null) yield {
val row = sheet.getRow(rowIndex)
val (primary, xp) = Swordfish2013Importer.parseSubscriptionString(row.getCell(5))
row.getCell(1).getNumericCellValue().toInt.toString -> Subscription(primary, row.getCell(0), xp, poolFighterNumbers.get(row.getCell(0).getNumericCellValue().toInt))
}
val myParticipants = subscriptions.flatMap {
case (id, sub) =>
participants.find(_.sourceIds.head.id == id) map { p =>
sub -> p
}
}
Some(t -> myParticipants.toList)
} else {
None
}
} :+ (tournaments.find(_.id == "wrestling").get -> findWrestlers(total, header, participants))
EventData(3, participants.toList, tournaments, subscriptions.toMap)
}
def getPoolNumberFromCell(cell: Cell): Option[Int] = cell match {
case c: Cell if c.getStringCellValue().startsWith("Pool ") =>
Some(c.getStringCellValue().dropWhile(!_.isDigit).toInt)
case _ => None
}
def findPoolColumns(row: Row, columnIndex: Int, acc: Map[Int, Int] = Map()): Map[Int, Int] = {
if (columnIndex > row.getLastCellNum()) {
acc
} else {
findPoolColumns(row, columnIndex + 1, getPoolNumberFromCell(row.getCell(columnIndex)).map(poolNr => acc + (poolNr -> columnIndex)).getOrElse(acc))
}
}
def findPoolFighterNumbers(sheet: Sheet): Map[Int, Int] = {
val poolColumns = findPoolColumns(sheet.getRow(0), 0).dropRight(if (sheet.getSheetName() == "Longsword - Ladies") 1 else 0)
poolColumns.flatMap {
case (poolNr, columnIndex) =>
val options = for (i <- 2 to sheet.getLastRowNum()) yield {
val row = sheet.getRow(i)
row.getCell(columnIndex) match {
case c: Cell => Some(c.getNumericCellValue().toInt -> poolNr)
case _ => None
}
}
options.flatten.toList
} toMap
}
def findWrestlers(sheet: Sheet, header: Map[String, Int], participants: Seq[Participant]): List[(Subscription, Participant)] = {
val columnIndex = header("Wrestling")
var fighterNr = 0
val subs = for (i <- 1 to sheet.getLastRowNum() if sheet.getRow(i).getCell(columnIndex) != null) yield {
val row = sheet.getRow(i)
val id = row.getCell(header("ID")).getNumericCellValue().toInt.toString
val (primary, xp) = Swordfish2013Importer.parseSubscriptionString(row.getCell(columnIndex))
val p = participants.find(_.sourceIds.head.id == id)
p.map { p =>
fighterNr += 1
Subscription(primary, fighterNr, xp) -> p
}
}
subs.flatten.toList
}
}
|
hema-tournament-manager/htm
|
htm-importer/src/main/scala/nl/htm/importer/swordfish/Swordfish2013ExcelImporter.scala
|
Scala
|
apache-2.0
| 5,370
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.{DataInputStream, DataOutputStream}
import java.net.Socket
import java.nio.ByteBuffer
import java.util.Properties
import kafka.api.IntegrationTestHarness
import kafka.network.SocketServer
import kafka.utils.NotNothing
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, RequestHeader, ResponseHeader}
import scala.annotation.nowarn
import scala.collection.Seq
import scala.reflect.ClassTag
abstract class BaseRequestTest extends IntegrationTestHarness {
private var correlationId = 0
// If required, set number of brokers
override def brokerCount: Int = 3
// If required, override properties by mutating the passed Properties object
protected def brokerPropertyOverrides(properties: Properties): Unit = {}
override def modifyConfigs(props: Seq[Properties]): Unit = {
props.foreach { p =>
p.put(KafkaConfig.ControlledShutdownEnableProp, "false")
brokerPropertyOverrides(p)
}
}
def anySocketServer: SocketServer = {
servers.find { server =>
val state = server.brokerState.currentState
state != NotRunning.state && state != BrokerShuttingDown.state
}.map(_.socketServer).getOrElse(throw new IllegalStateException("No live broker is available"))
}
def controllerSocketServer: SocketServer = {
servers.find { server =>
server.kafkaController.isActive
}.map(_.socketServer).getOrElse(throw new IllegalStateException("No controller broker is available"))
}
def notControllerSocketServer: SocketServer = {
servers.find { server =>
!server.kafkaController.isActive
}.map(_.socketServer).getOrElse(throw new IllegalStateException("No non-controller broker is available"))
}
def brokerSocketServer(brokerId: Int): SocketServer = {
servers.find { server =>
server.config.brokerId == brokerId
}.map(_.socketServer).getOrElse(throw new IllegalStateException(s"Could not find broker with id $brokerId"))
}
def connect(socketServer: SocketServer = anySocketServer,
listenerName: ListenerName = listenerName): Socket = {
new Socket("localhost", socketServer.boundPort(listenerName))
}
private def sendRequest(socket: Socket, request: Array[Byte]): Unit = {
val outgoing = new DataOutputStream(socket.getOutputStream)
outgoing.writeInt(request.length)
outgoing.write(request)
outgoing.flush()
}
def receive[T <: AbstractResponse](socket: Socket, apiKey: ApiKeys, version: Short)
(implicit classTag: ClassTag[T], @nowarn("cat=unused") nn: NotNothing[T]): T = {
val incoming = new DataInputStream(socket.getInputStream)
val len = incoming.readInt()
val responseBytes = new Array[Byte](len)
incoming.readFully(responseBytes)
val responseBuffer = ByteBuffer.wrap(responseBytes)
ResponseHeader.parse(responseBuffer, apiKey.responseHeaderVersion(version))
val responseStruct = apiKey.parseResponse(version, responseBuffer)
AbstractResponse.parseResponse(apiKey, responseStruct, version) match {
case response: T => response
case response =>
throw new ClassCastException(s"Expected response with type ${classTag.runtimeClass}, but found ${response.getClass}")
}
}
def sendAndReceive[T <: AbstractResponse](request: AbstractRequest,
socket: Socket,
clientId: String = "client-id",
correlationId: Option[Int] = None)
(implicit classTag: ClassTag[T], nn: NotNothing[T]): T = {
send(request, socket, clientId, correlationId)
receive[T](socket, request.api, request.version)
}
def connectAndReceive[T <: AbstractResponse](request: AbstractRequest,
destination: SocketServer = anySocketServer,
listenerName: ListenerName = listenerName)
(implicit classTag: ClassTag[T], nn: NotNothing[T]): T = {
val socket = connect(destination, listenerName)
try sendAndReceive[T](request, socket)
finally socket.close()
}
/**
* Serializes and sends the request to the given api.
*/
def send(request: AbstractRequest,
socket: Socket,
clientId: String = "client-id",
correlationId: Option[Int] = None): Unit = {
val header = nextRequestHeader(request.api, request.version, clientId, correlationId)
sendWithHeader(request, header, socket)
}
def sendWithHeader(request: AbstractRequest, header: RequestHeader, socket: Socket): Unit = {
val serializedBytes = request.serialize(header).array
sendRequest(socket, serializedBytes)
}
def nextRequestHeader[T <: AbstractResponse](apiKey: ApiKeys,
apiVersion: Short,
clientId: String = "client-id",
correlationIdOpt: Option[Int] = None): RequestHeader = {
val correlationId = correlationIdOpt.getOrElse {
this.correlationId += 1
this.correlationId
}
new RequestHeader(apiKey, apiVersion, clientId, correlationId)
}
}
|
sslavic/kafka
|
core/src/test/scala/unit/kafka/server/BaseRequestTest.scala
|
Scala
|
apache-2.0
| 6,241
|
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.index
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom.{Geometry, GeometryCollection, Point, Polygon}
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone, Interval}
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.SimpleFeatureType
import scala.annotation.tailrec
import scala.util.parsing.combinator.RegexParsers
// A secondary index consists of interleaved elements of a composite key stored in
// Accumulo's key (row, column family, and column qualifier)
//
// A spatio-temporal index utilizes the location and the time of an entry to construct
// the secondary index.
//
// An index schema consists of the mapping of the composite key (time,space) to the three
// elements of a Accumulo key. The mapping is specified using the printf-like format string.
// The format string consists of an entry for each of the row, column family, and column
// qualifier. The entry consists of formatting directives of the composite key. A directive
// has the following form:
//
// %[formatting options]#[formatting code]
//
// The following format codes are available
// s => the separator character
// r => a random partitioner - creates shards on [0, option], that is
// (option + 1) separate partitions
// gh => geohash formatter - options are the start character and number of characters
// d => date formatter - options are any properly formed date format string
// cstr => constant string formatter
//
// An entry consists of a sequence of formatting directives with and must start with the
// separator directive. For example, the following entry format:
//
// %~#s%999#r%0,4#gh%HHmm#d
//
// specifies that the separator character is a '~', then a random partition element between 000
// and 999, then the first four characters of the geohash, then the hours and minutes of the time
// of the entry. The resulting Accumulo key element might look like "342~tmw1~1455"
//
// A full schema consists of 3 entry formatting directives separated by '::'. The following is
// an example of a fully specified index schema:
//
// %~#s%999#r%0,4#gh%HHmm#d::%~#s%4,2#gh::%~#s%6,1#gh%yyyyMMdd#d
object IndexSchema extends RegexParsers with Logging {
val minDateTime = new DateTime(0, 1, 1, 0, 0, 0, DateTimeZone.forID("UTC"))
val maxDateTime = new DateTime(9999, 12, 31, 23, 59, 59, DateTimeZone.forID("UTC"))
val everywhen = new Interval(minDateTime, maxDateTime)
val everywhere = WKTUtils.read("POLYGON((-180 -90, 0 -90, 180 -90, 180 90, 0 90, -180 90, -180 -90))").asInstanceOf[Polygon]
def somewhen(interval: Interval): Option[Interval] =
interval match {
case null => None
case i if i == everywhen => None
case _ => Some(interval)
}
def innerSomewhere(geom: Geometry): Option[Geometry] =
geom match {
case null => None
case p if p == everywhere => None
case g: Geometry => Some(g)
case _ => None
}
// This function helps catch nulls and 'entire world' polygons.
def somewhere(geom: Geometry): Option[Geometry] =
geom match {
case null => None
case gc: GeometryCollection =>
val wholeWorld = (0 until gc.getNumGeometries).foldRight(false) {
case (i, seenEverywhere) => gc.getGeometryN(i).equals(everywhere) || seenEverywhere
}
if(wholeWorld) None else Some(gc)
case g: Geometry => innerSomewhere(g)
}
val DEFAULT_TIME = new DateTime(0, DateTimeZone.forID("UTC"))
val CODE_START = "%"
val CODE_END = "#"
val GEO_HASH_CODE = "gh"
val DATE_CODE = "d"
val CONSTANT_CODE = "cstr"
val INDEX_DATA_CODE = "i"
val RANDOM_CODE = "r"
val SEPARATOR_CODE = "s"
val ID_CODE = "id"
val PART_DELIMITER = "::"
def pattern[T](p: => Parser[T], code: String): Parser[T] = CODE_START ~> p <~ (CODE_END + code)
// A separator character, typically '%~#s' would indicate that elements are to be separated
// with a '~'
def sep = pattern("\\\\W".r, SEPARATOR_CODE)
// A random partitioner. '%999#r' would write a random value between 000 and 998 inclusive
def randPartitionPattern = pattern("\\\\d+".r,RANDOM_CODE)
def randEncoder: Parser[PartitionTextFormatter] = randPartitionPattern ^^ {
case d => PartitionTextFormatter(d.toInt)
}
def offset = "[0-9]+".r ^^ { _.toInt }
def bits = "[0-9]+".r ^^ { _.toInt }
// A geohash encoder. '%2,4#gh' indicates that two characters starting at character 4 should
// be extracted from the geohash and written to the field
def geohashPattern = pattern((offset <~ ",") ~ bits, GEO_HASH_CODE)
def geohashEncoder: Parser[GeoHashTextFormatter] = geohashPattern ^^ {
case o ~ b => GeoHashTextFormatter(o, b)
}
// A date encoder. '%YYYY#d' would pull out the year from the date and write it to the key
def datePattern = pattern("\\\\w+".r, DATE_CODE)
def dateEncoder: Parser[DateTextFormatter] = datePattern ^^ {
case t => DateTextFormatter(t)
}
// A constant string encoder. '%fname#cstr' would yield fname
// We match any string other that does *not* contain % or # since we use those for delimiters
def constStringPattern = pattern("[^%#]+".r, CONSTANT_CODE)
def constantStringEncoder: Parser[ConstantTextFormatter] = constStringPattern ^^ {
case str => ConstantTextFormatter(str)
}
// An index or data flag encoder. '%#i' is the pattern
// We match the empty string
def indexOrDataPattern = pattern("".r, INDEX_DATA_CODE)
def indexOrDataEncoder: Parser[IndexOrDataTextFormatter] = indexOrDataPattern ^^ {
case _ => IndexOrDataTextFormatter()
}
// a key element consists of a separator and any number of random partitions, geohashes, and dates
def keypart: Parser[CompositeTextFormatter] =
(sep ~ rep(randEncoder | geohashEncoder | dateEncoder | constantStringEncoder | indexOrDataEncoder)) ^^ {
case sep ~ xs => CompositeTextFormatter(xs, sep)
}
// the column qualifier must end with an ID-encoder
def cqpart: Parser[CompositeTextFormatter] =
phrase(sep ~ rep(randEncoder | geohashEncoder | dateEncoder | constantStringEncoder) ~ idEncoder) ^^ {
case sep ~ xs ~ id => CompositeTextFormatter(xs :+ id, sep)
}
// An index key is three keyparts, one for row, colf, and colq
def formatter = keypart ~ PART_DELIMITER ~ keypart ~ PART_DELIMITER ~ cqpart ^^ {
case rowf ~ PART_DELIMITER ~ cff ~ PART_DELIMITER ~ cqf => (rowf, cff, cqf)
}
// builds the encoder from a string representation
def buildKeyEncoder(sft: SimpleFeatureType, s: String): STIndexEncoder = {
val (rowf, cff, cqf) = parse(formatter, s).get
STIndexEncoder(sft, rowf, cff, cqf)
}
// extracts an entire date encoder from a key part
@tailrec
def extractDateEncoder(seq: Seq[TextFormatter], offset: Int, sepLength: Int): Option[(String, Int)] =
seq match {
case DateTextFormatter(f)::xs => Some(f,offset)
case x::xs => extractDateEncoder(xs, offset + x.numBits + sepLength, sepLength)
case Nil => None
}
// builds the date decoder to deserialize the entire date from the parts of the index key
def dateDecoderParser = keypart ~ PART_DELIMITER ~ keypart ~ PART_DELIMITER ~ cqpart ^^ {
case rowf ~ PART_DELIMITER ~ cff ~ PART_DELIMITER ~ cqf => {
// extract the per-key-portion date encoders; each is optional
val rowVals: Option[(String,Int)] = extractDateEncoder(rowf.lf, 0, rowf.sep.length)
val cfVals: Option[(String,Int)] = extractDateEncoder(cff.lf, 0, cff.sep.length)
val cqVals: Option[(String,Int)] = extractDateEncoder(cqf.lf, 0, cqf.sep.length)
// build a non-None list of these date extractors
val netVals : Iterable[(AbstractExtractor,String)] =
rowVals.map(_ match { case (f,offset) => { (RowExtractor(offset, f.length), f)}}) ++
cfVals.map(_ match { case (f,offset) => { (ColumnFamilyExtractor(offset, f.length), f)}}) ++
cqVals.map(_ match { case (f,offset) => { (ColumnQualifierExtractor(offset, f.length), f)}})
// consolidate this into a single extractor-sequence and date format
val consolidatedVals: (Seq[AbstractExtractor],String) = netVals.
foldLeft((List[AbstractExtractor](),""))((t1,t2) => t1 match { case (extractors,fs) =>
t2 match { case (extractor,f) => (extractors ++ List(extractor), fs + f)
}})
// issue: not all schema contain a date-portion;
// for those that do, you have already parsed it;
// for those that do not, you must return None
consolidatedVals match {
case (extractors,fs) if (!extractors.isEmpty) => Some(DateDecoder(extractors, fs))
case _ => None
}
}}
def buildDateDecoder(s: String): Option[DateDecoder] = parse(dateDecoderParser, s).get
// extracts the geohash encoder from a keypart
@tailrec
def extractGeohashEncoder(seq: Seq[TextFormatter], offset: Int, sepLength: Int): (Int, (Int, Int)) =
seq match {
case GeoHashTextFormatter(off, bits)::xs => (offset, (off, bits))
case x::xs => extractGeohashEncoder(xs, offset + x.numBits + sepLength, sepLength)
case Nil => (0,(0,0))
}
// builds a geohash decoder to extract the entire geohash from the parts of the index key
def ghDecoderParser = keypart ~ PART_DELIMITER ~ keypart ~ PART_DELIMITER ~ cqpart ^^ {
case rowf ~ PART_DELIMITER ~ cff ~ PART_DELIMITER ~ cqf => {
val (roffset, (ghoffset, rbits)) = extractGeohashEncoder(rowf.lf, 0, rowf.sep.length)
val (cfoffset, (ghoffset2, cfbits)) = extractGeohashEncoder(cff.lf, 0, cff.sep.length)
val (cqoffset, (ghoffset3, cqbits)) = extractGeohashEncoder(cqf.lf, 0, cqf.sep.length)
val l = List((ghoffset, RowExtractor(roffset, rbits)),
(ghoffset2, ColumnFamilyExtractor(cfoffset, cfbits)),
(ghoffset3, ColumnQualifierExtractor(cqoffset, cqbits)))
GeohashDecoder(l.sortBy { case (off, _) => off }.map { case (_, e) => e })
}
}
def buildGeohashDecoder(s: String): GeohashDecoder = parse(ghDecoderParser, s).get
def extractIdEncoder(seq: Seq[TextFormatter], offset: Int, sepLength: Int): Int =
seq match {
case IdFormatter(maxLength)::xs => maxLength
case _ => sys.error("Id must be first element of column qualifier")
}
// An id encoder. '%15#id' would pad the id out to 15 characters
def idEncoder: Parser[IdFormatter] = pattern("[0-9]*".r, ID_CODE) ^^ {
case len if len.length > 0 => IdFormatter(len.toInt)
case _ => IdFormatter(0)
}
def idDecoderParser = keypart ~ PART_DELIMITER ~ keypart ~ PART_DELIMITER ~ cqpart ^^ {
case rowf ~ PART_DELIMITER ~ cff ~ PART_DELIMITER ~ cqf => {
val bits = extractIdEncoder(cqf.lf, 0, cqf.sep.length)
IdDecoder(Seq(ColumnQualifierExtractor(0, bits)))
}
}
def buildIdDecoder(s: String) = parse(idDecoderParser, s).get
def constStringPlanner: Parser[ConstStringPlanner] = constStringPattern ^^ {
case str => ConstStringPlanner(str)
}
def indexOrDataPlanner: Parser[IndexOrDataPlanner] = indexOrDataPattern ^^ {
case _ => IndexOrDataPlanner()
}
def randPartitionPlanner: Parser[RandomPartitionPlanner] = randPartitionPattern ^^ {
case d => RandomPartitionPlanner(d.toInt)
}
def datePlanner: Parser[DatePlanner] = datePattern ^^ {
case fmt => DatePlanner(DateTimeFormat.forPattern(fmt).withZoneUTC())
}
def geohashKeyPlanner: Parser[GeoHashKeyPlanner] = geohashPattern ^^ {
case o ~ b => GeoHashKeyPlanner(o, b)
}
def keyPlanner: Parser[KeyPlanner] =
sep ~ rep(constStringPlanner | datePlanner | randPartitionPlanner | geohashKeyPlanner | indexOrDataPlanner) <~ "::.*".r ^^ {
case sep ~ list => CompositePlanner(list, sep)
}
def buildKeyPlanner(s: String) = parse(keyPlanner, s) match {
case Success(result, _) => result
case fail: NoSuccess => throw new Exception(fail.msg)
}
def geohashColumnFamilyPlanner: Parser[GeoHashColumnFamilyPlanner] = (keypart ~ PART_DELIMITER) ~> (sep ~ rep(randEncoder | geohashEncoder | dateEncoder | constantStringEncoder)) <~ (PART_DELIMITER ~ keypart) ^^ {
case sep ~ xs => xs.find(tf => tf match {
case gh: GeoHashTextFormatter => true
case _ => false
}).map(ghtf => ghtf match {
case GeoHashTextFormatter(o, n) => GeoHashColumnFamilyPlanner(o,n)
}).get
}
def buildColumnFamilyPlanner(s: String): ColumnFamilyPlanner = parse(geohashColumnFamilyPlanner, s) match {
case Success(result, _) => result
case fail: NoSuccess => throw new Exception(fail.msg)
}
// only those geometries known to contain only point data can guarantee that
// they do not contain duplicates
def mayContainDuplicates(featureType: SimpleFeatureType): Boolean =
try {
featureType == null || featureType.getGeometryDescriptor.getType.getBinding != classOf[Point]
} catch {
case e: Exception =>
logger.warn(s"Error comparing default geometry for feature type ${featureType.getName}")
true
}
// utility method to ask for the maximum allowable shard number
def maxShard(schema: String): Int = {
val (rowf, _, _) = parse(formatter, schema).get
rowf match {
case CompositeTextFormatter(Seq(PartitionTextFormatter(numPartitions), xs@_*), sep) => numPartitions
case _ => 1 // couldn't find a matching partitioner
}
}
def getIndexEntryDecoder(s: String) = {
val geohashDecoder = buildGeohashDecoder(s)
val dateDecoder = buildDateDecoder(s)
IndexEntryDecoder(geohashDecoder, dateDecoder)
}
}
/**
* Class to facilitate the building of custom index schemas.
*
* @param separator
*/
class IndexSchemaBuilder(separator: String) {
import org.locationtech.geomesa.accumulo.index.IndexSchema._
var newPart = true
val schema = new StringBuilder()
/**
* Adds a random number, useful for sharding.
*
* @param maxValue
* @return the schema builder instance
*/
def randomNumber(maxValue: Int): IndexSchemaBuilder = append(RANDOM_CODE, maxValue)
/**
* Adds an index/data flag.
*
* @return the schema builder instance
*/
def indexOrDataFlag(): IndexSchemaBuilder = append(INDEX_DATA_CODE)
/**
* Adds a constant value.
*
* @param constant
* @return the schema builder instance
*/
def constant(constant: String): IndexSchemaBuilder = append(CONSTANT_CODE, constant)
/**
* Adds a date value.
*
* @param format format to apply to the date, equivalent to SimpleDateFormat
* @return the schema builder instance
*/
def date(format: String): IndexSchemaBuilder = append(DATE_CODE, format)
/**
* Adds a geohash value.
*
* @param offset
* @param length
* @return the schema builder instance
*/
def geoHash(offset: Int, length: Int): IndexSchemaBuilder = append(GEO_HASH_CODE, offset, ',', length)
/**
* Add an ID value.
*
* @return the schema builder instance
*/
def id(): IndexSchemaBuilder = id(-1)
/**
* Add an ID value.
*
* @param length ID will be padded to this length
* @return the schema builder instance
*/
def id(length: Int): IndexSchemaBuilder = {
if (length > 0) {
append(ID_CODE, length)
} else {
append(ID_CODE)
}
}
/**
* End the current part of the schema format. Schemas consist of (in order) key part, column
* family part and column qualifier part. The schema builder starts on the key part.
*
* The schema builder does not validate parts. This method should be called exactly two times to
* build a typical schema.
*
* @return the schema builder instance
*/
def nextPart(): IndexSchemaBuilder = {
schema.append(PART_DELIMITER)
newPart = true
this
}
/**
*
* @return the formatted schema string
*/
def build(): String = schema.toString()
override def toString(): String = build
/**
* Clears internal state
*/
def reset(): Unit = {
schema.clear()
newPart = true
}
/**
* Wraps the code in the appropriate delimiters and adds the provided values
*
* @param code
* @param values
* @return
*/
private def append(code: String, values: Any*): IndexSchemaBuilder = {
if (newPart) {
schema.append(CODE_START).append(separator).append(CODE_END).append(SEPARATOR_CODE)
newPart = false
}
schema.append(CODE_START)
values.foreach(schema.append(_))
schema.append(CODE_END).append(code)
this
}
}
|
drackaer/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/IndexSchema.scala
|
Scala
|
apache-2.0
| 17,058
|
package com.github.marklister.base64
import com.github.marklister.base64.Base64._
import utest._
object Base64TestSuite extends TestSuite {
val tests = TestSuite {
'encode1 {
assert("ABCDEFG".getBytes.toBase64 == ("QUJDREVGRw=="))
}
'encode2 {
assert("homuho".getBytes.toBase64 == ("aG9tdWhv"))
}
'encode3 {
assert("hogepiyofoobar".getBytes.toBase64 == ("aG9nZXBpeW9mb29iYXI="))
}
// Decoder tests
'decode4 {
assert("aG9nZXBpeW9mb29iYXI=".toByteArray sameElements ("hogepiyofoobar".getBytes))
}
'decode5 {
assert("+/+/+/+/".toByteArray.sameElements(("-_-_-_-_").toByteArray(base64Url)))
}
//RFC 4648 Test vectors
'testBigInt {
assert(BigInt("14fb9c03d97e", 16).toByteArray.toBase64 == ("FPucA9l+"))
}
'test7 {
val testVectors = Seq("" -> "",
"f" -> "Zg==",
"fo" -> "Zm8=",
"foo" -> "Zm9v",
"foob" -> "Zm9vYg==",
"fooba" -> "Zm9vYmE=",
"foobar" -> "Zm9vYmFy"
)
for (t <- testVectors) {
//test encoding
assert(t._1.getBytes.toBase64 == (t._2))
}
for (t <- testVectors) {
//test decoding
assert(t._2.toByteArray sameElements (t._1.getBytes))
}
}
'testunpaddedDecode {
val testVectors = Seq("" -> "",
"f" -> "Zg==",
"fo" -> "Zm8=",
"foo" -> "Zm9v",
"foob" -> "Zm9vYg==",
"fooba" -> "Zm9vYmE=",
"foobar" -> "Zm9vYmFy"
)
for (t <- testVectors) {
//test decoding
assert(t._2.reverse.dropWhile(_ == '=').reverse.toByteArray(base64Url) sameElements (t._1.getBytes))
}
}
'testBase64UrlPadding {
val testVectors = Seq("" -> "",
"f" -> "Zg%3D%3D",
"fo" -> "Zm8%3D",
"foo" -> "Zm9v",
"foob" -> "Zm9vYg%3D%3D",
"fooba" -> "Zm9vYmE%3D",
"foobar" -> "Zm9vYmFy"
)
for (t <- testVectors) {
//test encoding
assert(t._1.getBytes.toBase64(base64Url) == t._2)
}
}
'testBase64UrlDecoding {
val testVectors = Seq("" -> "",
"f" -> "Zg%3D%3D",
"fo" -> "Zm8%3D",
"foo" -> "Zm9v",
"foob" -> "Zm9vYg%3D%3D",
"fooba" -> "Zm9vYmE%3D",
"foobar" -> "Zm9vYmFy"
)
for (t <- testVectors) {
//test encoding
assert(t._2.toByteArray(base64Url) sameElements (t._1.getBytes))
}
}
}
}
|
marklister/base64
|
shared/src/test/scala/Base64Spec.scala
|
Scala
|
bsd-2-clause
| 2,484
|
def f(f: => Int) = {}
println(/* applicable: false */f {})
println(/* */f {1})
|
ilinum/intellij-scala
|
testdata/resolve2/function/block/BlockToFunctionInt.scala
|
Scala
|
apache-2.0
| 79
|
package fr.renoux.gaston.model
import fr.renoux.gaston.util.Context
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class ScheduleSpec extends AnyFlatSpec with Matchers {
import fr.renoux.gaston.SimpleTestModel.Persons._
import fr.renoux.gaston.SimpleTestModel.Problems._
import fr.renoux.gaston.SimpleTestModel.Slots._
import fr.renoux.gaston.SimpleTestModel.Solutions._
import fr.renoux.gaston.SimpleTestModel.Topics._
private implicit val problem: Problem = Complete
private implicit val context: Context = Context.Default
private val Simple = Schedule.from(
Morning(
Acting(Arthur, Bianca),
Cooking(Corwin, Daniela)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
)
"planning" should "work" in {
Best.planning should be(Map(
Morning -> Set(Acting, Dancing, Grinding),
AfterNoon -> Set(Bathing, Eating, Helping),
Evening -> Set(Cooking, Fighting, Inking)
))
}
"topicToSlot" should "work" in {
Best.topicToSlot should be(Map(
Acting -> Morning,
Dancing -> Morning,
Grinding -> Morning,
Bathing -> AfterNoon,
Eating -> AfterNoon,
Helping -> AfterNoon,
Cooking -> Evening,
Fighting -> Evening,
Inking -> Evening
))
}
/*
"personsBySlot" should "work" in {
Best.personsBySlot should be(Map(
Morning -> Set(Arthur, Iago, Hercule, Daniela, Corwin, Bianca, Garion, Fiona),
AfterNoon -> Set(Bianca, Arthur, Eric, Daniela, Corwin, Hercule, Garion, Fiona),
Evening -> Set(Corwin, Bianca, Fiona, Eric, Daniela, Iago, Hercule, Garion)
))
} */
"personsByTopic" should "work" in {
Best.personsByTopic should be(Map(
Acting -> Set(Arthur, Iago, Hercule),
Dancing -> Set(Daniela, Corwin, Bianca),
Grinding -> Set(Garion, Fiona),
Bathing -> Set(Bianca, Arthur),
Eating -> Set(Eric, Daniela, Corwin),
Helping -> Set(Hercule, Garion, Fiona),
Cooking -> Set(Corwin, Bianca),
Fighting -> Set(Fiona, Eric, Daniela),
Inking -> Set(Iago, Hercule, Garion)
))
}
"personGroups" should "work" in {
Best.personGroups.toSet should be(Set(
Set(Arthur, Iago, Hercule),
Set(Daniela, Corwin, Bianca),
Set(Garion, Fiona),
Set(Bianca, Arthur),
Set(Eric, Daniela, Corwin),
Set(Hercule, Garion, Fiona),
Set(Corwin, Bianca),
Set(Fiona, Eric, Daniela),
Set(Iago, Hercule, Garion)
))
}
"add" should "replace an existing record if one already exists for that slot and topic" in {
Simple.add(Record(Morning, Acting, Set(Eric, Fiona))) should be(Schedule.from(
Morning(
Acting(Eric, Fiona),
Cooking(Corwin, Daniela)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
))
}
it should "add a new record with an existing slot and a new topic" in {
Simple.add(Record(Morning, Eating, Set(Eric, Fiona))) should be(Schedule.from(
Morning(
Acting(Arthur, Bianca),
Cooking(Corwin, Daniela),
Eating(Eric, Fiona)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
))
}
it should "add a new record with a new slot and a new topic" in {
Simple.add(Record(Evening, Eating, Set(Eric, Fiona))) should be(Schedule.from(
Morning(
Acting(Arthur, Bianca),
Cooking(Corwin, Daniela)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
),
Evening(
Eating(Eric, Fiona)
)
))
}
/*
"merge" should "work with two schedules" in {
val simple2 = Schedule(
AfterNoon(
Bathing(Arthur, Eric), //new person on existing topic
Eating(Fiona, Garion) //new topic on existing slot
),
Evening( //new slot
Fighting(Arthur, Bianca)
)
)
val result = Schedule(
Morning(
Acting(Arthur, Bianca),
Cooking(Corwin, Daniela)
),
AfterNoon(
Bathing(Arthur, Bianca, Eric), //new person on existing topic
Dancing(Corwin, Daniela),
Eating(Fiona, Garion) //new topic on existing slot
),
Evening( //new slot
Fighting(Arthur, Bianca)
),
)
Simple.merge(simple2) should be(result)
simple2.merge(Simple) should be(result)
}
it should "work with an empty schedule" in {
Schedule.empty.merge(Best) should be(Best)
Best.merge(Schedule.empty) should be(Best)
} */
"addPersonToExistingTopic" should "add someone to an existing topic" in {
Simple.addPersonToExistingTopic(Morning, Acting, Eric) should be(Schedule.from(
Morning(
Acting(Arthur, Bianca, Eric),
Cooking(Corwin, Daniela)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
))
}
it should "not do anything with a non-existing topic" in {
Simple.addPersonToExistingTopic(Morning, Grinding, Eric) should be(Simple)
}
"swapPersons" should "swap two persons on the same slot" in {
Simple.swapPersons(Morning, Acting -> Arthur, Cooking -> Daniela) should be(Schedule.from(
Morning(
Acting(Daniela, Bianca),
Cooking(Corwin, Arthur)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
))
}
/*
it should "not do anything if the topics are missing" in {
Simple.swapPersons(Morning, Grinding -> Arthur, Cooking -> Daniela) should be (Simple)
}
it should "not do anything if the topics are wrong" in {
Simple.swapPersons(Morning, Cooking -> Arthur, Cooking -> Daniela) should be (Simple)
}
it should "not do anything if the slot is missing" in {
Simple.swapPersons(Evening, Acting -> Arthur, Cooking -> Daniela) should be(Simple)
}
*/
"movePerson" should "move a person from a source topic to a destination topic" in {
Simple.movePerson(Morning, Acting, Cooking, Arthur) should be(Schedule.from(
Morning(
Acting(Bianca),
Cooking(Corwin, Daniela, Arthur)
),
AfterNoon(
Bathing(Arthur, Bianca),
Dancing(Corwin, Daniela)
)
))
}
/*
it should "not do anything if the topics are missing" in {
Simple.movePerson(Morning, Acting, Grinding, Arthur) should be (Simple)
Simple.movePerson(Morning, Grinding, Cooking, Arthur) should be (Simple)
}
it should "not do anything if the topics are wrong" in {
Simple.movePerson(Morning, Cooking, Acting, Arthur) should be(Simple)
}*/
/*
it should "not do anything if the slot is missing" in {
Simple.movePerson(Evening, Acting, Cooking, Arthur) should be(Simple)
}*/
"isSound" should "validate a correct schedule" in {
Simple.isSound should be(true)
Best.isSound should be(true)
}
it should "reject a schedule demanding ubiquity" in {
Schedule.from(
Morning(
Acting(Arthur, Bianca),
Cooking(Arthur, Daniela)
)
).isSound should be(false)
}
it should "reject a schedule with the same topic several times" in {
Schedule.from(
Morning(
Acting(Arthur),
Cooking(Corwin)
),
AfterNoon(
Acting(Corwin),
Bathing(Arthur)
)
).isSound should be(false)
}
}
|
gaelrenoux/gaston
|
src/test/scala/fr/renoux/gaston/model/ScheduleSpec.scala
|
Scala
|
apache-2.0
| 7,406
|
/*
* Copyright 2018 Vladimir Konstantinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.illfaku.korro.internal.server
import com.github.illfaku.korro.config.ServerConfig
import com.github.illfaku.korro.internal.common.{HttpInstructions, HttpLoggingHandler}
import akka.actor.ActorRef
import io.netty.channel.ChannelInitializer
import io.netty.channel.socket.SocketChannel
import io.netty.handler.codec.http.HttpServerCodec
import io.netty.handler.logging.{LogLevel, LoggingHandler}
private[server] class HttpChannelInitializer(
parent: ActorRef,
config: ServerConfig,
instructions: HttpInstructions
) extends ChannelInitializer[SocketChannel] {
override def initChannel(ch: SocketChannel): Unit = {
ch.pipeline.addLast("netty-http-codec", new HttpServerCodec)
ch.pipeline.addLast("netty-logger", new LoggingHandler(config.nettyLogger, LogLevel.TRACE))
ch.pipeline.addLast("korro-http-handler", new HttpChannelHandler(parent, instructions))
}
}
|
yet-another-cafebabe/korro
|
src/main/scala/com/github/illfaku/korro/internal/server/HttpChannelInitializer.scala
|
Scala
|
lgpl-3.0
| 1,506
|
package promisering
import unfiltered.netty.cycle.Planify
import unfiltered.netty.Http
import java.net.URL
object Server {
def main(args: Array[String]) {
Http(8080)
.resources(new URL(getClass().getResource("/www/robots.txt"), "."))
.handler(Planify(Github.oauth orElse Rings.pages)).run
}
}
|
softprops/promise-ring
|
src/main/scala/server.scala
|
Scala
|
mit
| 315
|
package compiler
import java.io.File
import java.lang.{Boolean => JBoolean}
import java.util.Optional
import java.util.function.{Supplier, Function => JFunction}
import ch.qos.logback.classic.Logger
import core.cache.FsbtCache
import core.config.FsbtModule
import core.config.FsbtModule.{FsbtProjectName, FsbtProjectRef}
import sbt.internal.inc.javac.{JavaCompiler, JavaTools, Javadoc}
import sbt.internal.inc.{AnalyzingCompiler, ZincUtil}
import xsbti._
import xsbti.compile.{IncOptions, _}
class ZincCompiler {
private lazy val cp: IncrementalCompiler = ZincCompilerUtil.defaultIncrementalCompiler()
private val positionMapper =
new JFunction[Position, Position] {
override def apply(p: Position): Position = p
}
private val reporter =
ReporterUtil.getDefault(
ReporterConfig.create(
"",
Int.MaxValue,
true,
Array.empty[JFunction[String, JBoolean]],
Array.empty[JFunction[java.nio.file.Path, JBoolean]],
java.util.logging.Level.SEVERE,
positionMapper
)
)
private def zincLogger(moduleName: FsbtProjectRef)(implicit logger: Logger) = new xsbti.Logger {
override def debug(msg: Supplier[String]): Unit = ()
// logger.debug(msg.get())
override def error(msg: Supplier[String]): Unit = logger.error(s"[$moduleName] ${msg.get}")
override def warn(msg: Supplier[String]): Unit = ()
// logger.warn(msg.get())
override def trace(exception: Supplier[Throwable]): Unit = logger.trace(exception.get().getMessage, exception.get())
override def info(msg: Supplier[String]): Unit = logger.info(s"[$moduleName] ${msg.get}")
}
// TODO: consider caching mini setup between launches, so that we don't get a fresh compilation with each launch
def setup(implicit logger: Logger): Setup = Setup.create(
getPerClasspathEntryLookup,
false,
new File(FsbtModule.zincCache),
CompilerCache.fresh,
IncOptions.create(),
reporter,
Optional.empty(),
Array.empty)
def compilers(projectName: FsbtProjectName)(implicit logger: Logger): Compilers = Compilers.create(compiler(projectName), javaTools)
def compile(classPath: Array[File], sourceFiles: Array[File], config: FsbtModule)(implicit logger: Logger): Option[CompileResult] = {
val previousResult = FsbtCache.getCompileResult(config)
val inputs = Inputs.create(
compilers(config.projectName),
CompileOptions.create()
.withClasspath(classPath)
.withClassesDirectory(config.target.toJava)
.withSources(sourceFiles),
setup,
previousResult)
// try{
val sourceString = sourceFiles.foldLeft("")((x, y) => s"$x ${y.getPath}")
val cr = cp.compile(inputs, zincLogger(config.projectName))
if (cr.hasModified) {
FsbtCache.updateCache(config, cr)
}
Some(cr)
// }catch{
// case ex: Exception => logger.debug("FKC", ex)
// None
// }
}
private def getPerClasspathEntryLookup(implicit logger: Logger) = new PerClasspathEntryLookup {
override def definesClass(classpathEntry: File): DefinesClass = (className: String) => {
// logger.debug(s"checking $className on classpath")
true
}
override def analysis(classpathEntry: File): Optional[CompileAnalysis] = Optional.empty()
}
private def getBridge(projectName: FsbtProjectName)(implicit logger: Logger) = {
val qq = ZincUtil.constantBridgeProvider(ScalaLocator.scalaInstance, ScalaLocator.getJar("compiler-bridge"))
qq.fetchCompiledBridge(ScalaLocator.scalaInstance, zincLogger(projectName))
}
def compiler(projectName: FsbtProjectName)(implicit logger: Logger): AnalyzingCompiler = ZincUtil.scalaCompiler(ScalaLocator.scalaInstance, getBridge(projectName))
def javaTools = JavaTools(JavaCompiler.fork(), Javadoc.fork())
}
|
Humblehound/fsbt
|
server/src/main/scala/compiler/ZincCompiler.scala
|
Scala
|
mit
| 3,840
|
package messages.parser
abstract class AST
abstract class Data extends AST
abstract class Subjects extends AST
abstract class Operation extends AST
case class Entity(value: String) extends AST
case class Content(entity: Entity, op: String) extends Data
case class To(recipients: Seq[Entity]) extends Subjects
case class Send(entities: Seq[Content], to: To) extends Operation
|
lymr/fun-chat
|
fun-chat-server/src/main/scala/messages/parser/AST.scala
|
Scala
|
mit
| 382
|
// See LICENSE.txt for license details.
package examples
import chisel3._
class LogShifter extends Module {
val io = IO(new Bundle {
val in = Input(UInt(16.W))
val shamt = Input(UInt(4.W))
val out = Output(UInt(16.W))
})
val s0 = RegInit(0.U(16.W))
when (io.shamt(3) === 1.U) {
s0 := io.in << 8.U
} .otherwise {
s0 := io.in
}
val s1 = RegInit(0.U(16.W))
when (io.shamt(2) === 1.U) {
s1 := s0 << 4.U
} .otherwise {
s1 := s0
}
val s2 = RegInit(0.U(16.W))
when (io.shamt(1) === 1.U) {
s2 := s1 << 2.U
} .otherwise {
s2 := s1
}
when (io.shamt(1) === 1.U) {
io.out := s2 << 1.U
} .otherwise {
io.out := s2
}
}
|
timtian090/Playground
|
chiselTutorial/src/main/scala/examples/LogShifter.scala
|
Scala
|
mit
| 690
|
package me.eax.examples.thrift
import java.io.ByteArrayOutputStream
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import me.eax.examples.thrift.game._
import org.apache.thrift.protocol.TSimpleJSONProtocol
import org.apache.thrift.transport.TIOStreamTransport
object Main extends App {
def heroToJson(hero: Hero): String = {
val out = new ByteArrayOutputStream()
hero.write(new TSimpleJSONProtocol(new TIOStreamTransport(out)))
new String(ByteBuffer.wrap(out.toByteArray).array(), StandardCharsets.UTF_8)
}
val mage = Hero(
name = "afiskon", hp = 25L, xp = 1024L,
ClassSpecificInfo.Mage(MageInfo(Set(Spell.Thunderbolt, Spell.Fireball), mana = 100L))
)
val warrior = Hero(
name = "eax", hp = 50L, xp = 256L,
ClassSpecificInfo.Warrior(WarriorInfo(Some(Weapon.Sword), 0L))
)
println(s"mage = $mage")
println(s"warrior = $warrior")
println(s"weapons = ${Weapon.list}, spells = ${Spell.list}")
println(s"mageJson = ${heroToJson(mage)}")
println(s"warriorJson = ${heroToJson(warrior)}")
}
|
afiskon/scala-thrift-example
|
src/main/scala/me/eax/examples/thrift/Main.scala
|
Scala
|
mit
| 1,060
|
/*
Author: uberset
Date: 2015-12-26
Licence: GPL v2
*/
package uberset.l1_compiler
import java.io.PrintWriter
class GeneratorRpn(
val out: PrintWriter
) extends Generator {
override def begin(): Unit = out.print("Program(")
override def end(): Unit = out.print(")")
override def library(): Unit = ()
override def printStr(): Unit = out.print("printStr ")
override def printInt(): Unit = out.print("printInt ")
override def printChr(): Unit = out.print("printChr ")
override def printBoo(): Unit = out.print("printBoo ")
override def printLn(): Unit = out.print("printLn ")
override def pushStr(v: String): Unit = out.print(s"Str($v) ")
override def pushInt(v: String): Unit = out.print(s"Int($v) ")
override def pushChr(v: Char): Unit = out.print(s"Chr(${v.toInt}) ")
override def pushBoo(v: Boolean): Unit = out.print(s"Boo($v) ")
override def negI(): Unit = out.print("negI ")
override def mulI(): Unit = out.print("mulI ")
override def divI(): Unit = out.print("divI ")
override def modI(): Unit = out.print("modI ")
override def addI(): Unit = out.print("addI ")
override def subI(): Unit = out.print("subI ")
}
|
uberset/L1-compiler
|
src/main/scala/uberset/l1_compiler/GeneratorRpn.scala
|
Scala
|
gpl-2.0
| 1,206
|
package com.twitter.finagle.protobuf.rpc.channel
class ProtoBufCodec(val service: Service) extends CodecFactory[(String, Message), (String, Message)] {
val maxFrameSize = 1.megabytes.inBytes.intValue
val repo = SimpleMethodLookup(service)
def server = Function.const {
new Codec[(String, Message), (String, Message)] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("decoder", new ServerSideDecoder(repo, service))
pipeline.addLast("encoder", new CustomProtobufEncoder(repo));
pipeline
}
}
}
}
def client = Function.const {
new Codec[(String, Message), (String, Message)] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("encoder", new CustomProtobufEncoder(repo))
pipeline.addLast("decoder",
new ClientSideDecoder(repo, service))
pipeline
}
}
}
}
}
|
firebase/finagle
|
finagle-protobuf/src/main/scala/com/twitter/finagle/protobuf/rpc/channel/ProtoBufCodec.scala
|
Scala
|
apache-2.0
| 1,085
|
package org.psliwa.idea.composerJson.intellij
import com.intellij.codeInsight.completion.{CompletionParameters, InsertHandler, InsertionContext}
import com.intellij.codeInsight.lookup.LookupElement
import com.intellij.patterns.PsiElementPattern
import com.intellij.psi.PsiElement
import com.intellij.util.ProcessingContext
import org.psliwa.idea.composerJson.util.CharOffsetFinder._
import org.psliwa.idea.composerJson.util.OffsetFinder.ImplicitConversions._
package object codeAssist {
private[codeAssist] val EmptyNamePlaceholder = org.psliwa.idea.composerJson.EmptyPsiElementNamePlaceholder
type Capture = PsiElementPattern.Capture[_ <: PsiElement]
private[codeAssist] type InsertHandlerFinder = BaseLookupElement => Option[InsertHandler[LookupElement]]
private[codeAssist] type LookupElements = CompletionParameters => Iterable[BaseLookupElement]
private val autoPopupCondition = (context: InsertionContext) => {
val text = context.getEditor.getDocument.getCharsSequence
ensure('"' || ' ')(context.getEditor.getCaretModel.getOffset - 1)(text).isDefined
}
private[codeAssist] val StringPropertyValueInsertHandler =
new AutoPopupInsertHandler(Some(new PropertyValueInsertHandler("\\"\\"")), autoPopupCondition)
private[codeAssist] val ObjectPropertyValueInsertHandler =
new AutoPopupInsertHandler(Some(new PropertyValueInsertHandler("{}")), autoPopupCondition)
private[codeAssist] val ArrayPropertyValueInsertHandler =
new AutoPopupInsertHandler(Some(new PropertyValueInsertHandler("[]")), autoPopupCondition)
private[codeAssist] val EmptyPropertyValueInsertHandler =
new AutoPopupInsertHandler(Some(new PropertyValueInsertHandler("")), autoPopupCondition)
}
|
psliwa/idea-composer-plugin
|
src/main/scala/org/psliwa/idea/composerJson/intellij/codeAssist/package.scala
|
Scala
|
mit
| 1,708
|
package sri.mobile.apis.ios
import scala.scalajs.js
@js.native
trait LinkingIOS extends js.Object {
def addEventListener(tpe: String, handler: js.Function): Unit = js.native
def removeEventListener(tpe: String, handler: js.Function): Unit = js.native
def openURL(url: String): Unit = js.native
def popInitialURL(): Unit = js.native
def canOpenURL(url: String, callback: js.Function): Unit = js.native
}
|
chandu0101/sri
|
mobile/src/main/scala/sri/mobile/apis/ios/LinkingIOS.scala
|
Scala
|
apache-2.0
| 421
|
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector.datasource
import quasar.api.resource.ResourcePathType
trait PhysicalDatasource[F[_], G[_], Q, R] extends Datasource[F, G, Q, R, ResourcePathType.Physical]
|
slamdata/quasar
|
connector/src/main/scala/quasar/connector/datasource/PhysicalDatasource.scala
|
Scala
|
apache-2.0
| 788
|
package lila.swiss
import com.softwaremill.macwire._
import play.api.Configuration
import scala.concurrent.duration._
import lila.common.config._
import lila.common.{ AtMost, Every, ResilientScheduler }
import lila.socket.Socket.{ GetVersion, SocketVersion }
@Module
final class Env(
appConfig: Configuration,
db: lila.db.Db,
gameRepo: lila.game.GameRepo,
userRepo: lila.user.UserRepo,
onStart: lila.round.OnStart,
remoteSocketApi: lila.socket.RemoteSocket,
chatApi: lila.chat.ChatApi,
cacheApi: lila.memo.CacheApi,
lightUserApi: lila.user.LightUserApi,
historyApi: lila.history.HistoryApi,
gameProxyRepo: lila.round.GameProxyRepo,
roundSocket: lila.round.RoundSocket,
mongoCache: lila.memo.MongoCache.Api,
baseUrl: lila.common.config.BaseUrl
)(implicit
ec: scala.concurrent.ExecutionContext,
system: akka.actor.ActorSystem,
mat: akka.stream.Materializer,
idGenerator: lila.game.IdGenerator,
mode: play.api.Mode
) {
private val colls = wire[SwissColls]
private val sheetApi = wire[SwissSheetApi]
private lazy val rankingApi: SwissRankingApi = wire[SwissRankingApi]
val trf: SwissTrf = wire[SwissTrf]
private val pairingSystem = new PairingSystem(trf, rankingApi, appConfig.get[String]("swiss.bbpairing"))
private val scoring = wire[SwissScoring]
private val director = wire[SwissDirector]
private val boardApi = wire[SwissBoardApi]
private val statsApi = wire[SwissStatsApi]
lazy val verify = wire[SwissCondition.Verify]
val api: SwissApi = wire[SwissApi]
lazy val roundPager = wire[SwissRoundPager]
private def teamOf = api.teamOf _
private lazy val socket = wire[SwissSocket]
def version(swissId: Swiss.Id): Fu[SocketVersion] =
socket.rooms.ask[SocketVersion](swissId.value)(GetVersion)
lazy val standingApi = wire[SwissStandingApi]
lazy val json = wire[SwissJson]
lazy val forms = wire[SwissForm]
lazy val feature = wire[SwissFeature]
private lazy val cache: SwissCache = wire[SwissCache]
lazy val getName = new GetSwissName(cache.name.sync)
private lazy val officialSchedule = wire[SwissOfficialSchedule]
lila.common.Bus.subscribeFun(
"finishGame",
"adjustCheater",
"adjustBooster",
"teamKick"
) {
case lila.game.actorApi.FinishGame(game, _, _) => api.finishGame(game).unit
case lila.hub.actorApi.team.KickFromTeam(teamId, userId) => api.kickFromTeam(teamId, userId).unit
case lila.hub.actorApi.mod.MarkCheater(userId, true) => api.kickLame(userId).unit
case lila.hub.actorApi.mod.MarkBooster(userId) => api.kickLame(userId).unit
}
ResilientScheduler(
every = Every(1 seconds),
timeout = AtMost(20 seconds),
initialDelay = 20 seconds
) { api.startPendingRounds }
ResilientScheduler(
every = Every(10 seconds),
timeout = AtMost(15 seconds),
initialDelay = 20 seconds
) { api.checkOngoingGames }
ResilientScheduler(
every = Every(1 hour),
timeout = AtMost(15 seconds),
initialDelay = 5 minutes
) { officialSchedule.generate }
}
private class SwissColls(db: lila.db.Db) {
val swiss = db(CollName("swiss"))
val player = db(CollName("swiss_player"))
val pairing = db(CollName("swiss_pairing"))
}
|
luanlv/lila
|
modules/swiss/src/main/Env.scala
|
Scala
|
mit
| 3,268
|
package org.dele.misc
import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream}
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream
import org.apache.commons.io.IOUtils
/**
* Created by jiaji on 11/26/2016.
*/
object TgzDecompressTest extends App {
val bfFileInputStream = new BufferedInputStream(new FileInputStream("E:\\\\VMShare\\\\malware-161126-12.tgz"))
val tarIn = new TarArchiveInputStream(new GzipCompressorInputStream(bfFileInputStream))
var tarEntry = tarIn.getNextEntry
var tarEntryIdx = 0
while (tarEntry != null) {
val fileOrDir = if (tarEntry.isDirectory) "DIR" else "FILE"
println(s"Extracting [${tarEntry.getName}]($fileOrDir)")
if (!tarEntry.isDirectory) {
val bfos = new BufferedOutputStream(new FileOutputStream(f"E:\\\\VMShare\\\\tmp\\\\$tarEntryIdx%04d.json"))
val bufSize = 4096
val buf = new Array[Byte](bufSize)
var cnt = tarIn.read(buf, 0, bufSize)
while (cnt != -1) {
bfos.write(buf, 0, cnt)
cnt = tarIn.read(buf, 0, bufSize)
}
bfos.close()
}
tarEntry = tarIn.getNextEntry
tarEntryIdx = tarEntryIdx + 1
}
tarIn.close()
}
|
new2scala/text-util
|
misc/src/test/scala/org/dele/misc/TgzDecompressTest.scala
|
Scala
|
apache-2.0
| 1,278
|
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.task
import org.openmole.core.tools.service.{ Logger, Random }
import org.openmole.core.workflow.data._
import org.openmole.core.serializer.plugin._
import org.openmole.core.workflow.tools._
import org.openmole.core.workspace.{ Workspace, ConfigurationLocation }
import org.openmole.core.tools.service._
object Task extends Logger {
val OpenMOLEVariablePrefix = new ConfigurationLocation("Task", "OpenMOLEVariablePrefix")
Workspace += (OpenMOLEVariablePrefix, "oM")
def prefixedVariable(name: String) = Workspace.preference(OpenMOLEVariablePrefix) + name
val openMOLESeed = Prototype[Long](prefixedVariable("Seed"))
def buildRNG(context: Context) = Random.newRNG(context(Task.openMOLESeed))
}
trait Task <: InputOutputCheck {
/**
*
* Perform this task.
*
* @param context the context in which the task will be executed
*/
def perform(context: Context): Context = perform(context, process)
protected def process(context: Context): Context
/**
*
* Get the name of the task.
*
* @return the name of the task
*/
def name: String
/**
*
* Get the input data of the task.
*
* @return the input of the task
*/
def inputs: DataSet
/**
*
* Get the output data of the task.
*
* @return the output data of the task
*/
def outputs: DataSet
/**
*
* Get all the defaults configured for this task.
*
* @return the defaults configured for this task.
*/
def defaults: DefaultSet
override def toString: String = name
}
|
ISCPIF/PSEExperiments
|
openmole-src/openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/task/Task.scala
|
Scala
|
agpl-3.0
| 2,275
|
package julienrf.course.gameoflife
import javax.swing.JFrame
import doodle.core.{Circle, Image}
import doodle.jvm.DoodlePanel
object Ui {
def show(events: Events[Image]): Unit = {
val panel = DoodlePanel(Circle(0) lineWidth 0)
events.foreach(image => panel.image = image)
val frame = new JFrame("Game of life")
frame.getContentPane.add(panel)
frame.pack()
frame.setVisible(true)
}
}
|
julienrf/scala-lessons
|
course/code/src/main/scala/julienrf/course/gameoflife/Ui.scala
|
Scala
|
mit
| 416
|
package com.bnctech.etcd
import com.bnctech.etcd.protocol.{EtcdListResponse, EtcdResponse}
import com.bnctech.etcd.utils.Converter._
import io.vertx.core.http.{HttpClient, HttpClientOptions, HttpClientResponse, HttpMethod}
import io.vertx.core.json.Json
import io.vertx.core.{AsyncResult, Future, Handler, Vertx}
import scala.util.{Failure, Success, Try}
/**
* Etcd client
*/
object EtcdClient {
private val SLASH = "/"
private val BASE_URL: String = "/v2/keys"
private val HEADER = "application/x-www-form-urlencoded"
def prepareUrl[A](key: String = "")(implicit list: List[(String, Option[A])] = Nil): String = {
val appUrl = list filter {
_._2.isDefined
} map { case (param, value) => s"$param=${value.get}" } match {
case Nil => ""
case parametersList => "?" concat {
parametersList mkString "&"
}
}
s"$BASE_URL$SLASH$key$appUrl"
}
}
/**
* Etcd client
*
* @param host Etcd hostname
* @param port Etcd port
* @param vertx Vertx instance
*/
class EtcdClient(host: String, port: Int, vertx: Vertx) {
import EtcdClient._
private val options: HttpClientOptions = new HttpClientOptions()
.setDefaultHost(host)
.setDefaultPort(port)
private val httpClient: HttpClient = vertx.createHttpClient(options)
/**
* Get the value of a key
*
* @param key Key to retrieve
* @param handler Callback handling the response
*/
def get(key: String, handler: Handler[AsyncResult[EtcdResponse]]): Unit = {
httpClient.request(HttpMethod.GET, prepareUrl(key), executeKey(handler)).end()
}
/**
* Set a value for a key
* If the key does not exist then it will be created. Otherwise the value will be updated
*
* @param key Key which will have the value set
* @param value Value to set
* @param ttl Optional time to live for the key
* @param handler Callback handling the response
*/
def set(key: String, value: Any, ttl: Int, handler: Handler[AsyncResult[EtcdResponse]]): Unit =
set(key, value, Option(ttl), handler)
/**
* Set a value for a key
* If the key does not exist then it will be created. Otherwise the value will be updated
*
* @param key Key which will have the value set
* @param value Value to set
* @param handler Callback handling the response
*/
def set(key: String, value: Any, handler: Handler[AsyncResult[EtcdResponse]]): Unit =
set(key, value, None, handler)
/**
* Set a value for a key
* If the key does not exist then it will be created. Otherwise the value will be updated
*
* @param key Key which will have the value set
* @param value Value to set
* @param ttl Optional time to live for the key
* @param handler Callback handling the response
*/
private def set(key: String, value: Any, ttl: Option[Int], handler: Handler[AsyncResult[EtcdResponse]]): Unit =
httpClient.request(HttpMethod.PUT, prepareUrl(key), executeKey(handler))
.putHeader("Content-Type", HEADER)
.end(s"value=$value" concat (ttl map { ttl => s"&ttl=$ttl" } getOrElse ""))
/**
* Delete a key
*
* @param key Key to delete
* @param handler Callback handling the response
*/
def delete(key: String, handler: Handler[AsyncResult[EtcdResponse]]): Unit =
httpClient.request(HttpMethod.DELETE, prepareUrl(key), executeKey(handler)).end()
/**
* Watch every change on a key or a directory
*
* @param key Key or directory to watch
* @param waitIndex Start the watch from this index
* @param recursive Recursively watch a directory
* @return [[Watcher]] object for this key
*/
def watch(key: String, waitIndex: Long, recursive: Boolean): Watcher = watch(key, Some(waitIndex), recursive)
/**
* Watch every change on a key or a directory without recursive watch
*
* @param key Key or directory to watch
* @param waitIndex Start the watch from this index
* @return [[Watcher]] object for this key
*/
def watch(key: String, waitIndex: Long): Watcher = watch(key, waitIndex, false)
/**
* Watch every change on a key or a directory
*
* @param key Key or directory to watch
* @param recursive Recursively watch a directory
* @return [[Watcher]] object for this key
*/
def watch(key: String, recursive: Boolean): Watcher = watch(key, None, recursive)
/**
* Watch every change on a key or a directory without recursive watch
*
* @param key Key or directory to watch
* @return [[Watcher]] object for this key
*/
def watch(key: String): Watcher = watch(key, false)
/**
* Watch every change on a key or a directory
*
* @param key Key or directory to watch
* @param waitIndex Start the watch from this index
* @param recursive Recursively watch a directory
* @return [[Watcher]] object for this key
*/
private def watch(key: String, waitIndex: Option[Long] = None, recursive: Boolean): Watcher =
new Watcher(httpClient, key, waitIndex, recursive, vertx)
/**
* Create a directory
*
* @param dir Directory to create
* @param handler Callback handling the response
*/
def createDir(dir: String, handler: Handler[AsyncResult[EtcdListResponse]]): Unit =
httpClient.request(HttpMethod.PUT, prepareUrl(dir), executeList(handler))
.putHeader("Content-Type", HEADER)
.end("dir=true")
/**
* List a directory
*
* @param dir Directory to list
* @param recursive List the directory recursively
* @param handler Callback handling the response
*/
def listDir(dir: String, recursive: Boolean, handler: Handler[AsyncResult[EtcdListResponse]]): Unit =
listDir(dir, Option(recursive), handler)
/**
* List a directory not recursively
*
* @param dir Directory to list
* @param handler Callback handling the response
*/
def listDir(dir: String, handler: Handler[AsyncResult[EtcdListResponse]]): Unit =
listDir(dir, None, handler)
/**
* List a directory
*
* @param dir Directory to list
* @param recursive List the directory recursively
* @param handler Callback handling the response
*/
private def listDir(dir: String, recursive: Option[Boolean], handler: Handler[AsyncResult[EtcdListResponse]]): Unit = {
implicit val list = List(("recursive", recursive))
httpClient.request(HttpMethod.GET, prepareUrl(dir), executeList(handler)).end()
}
/**
* Delete a directory
*
* @param dir Directory to delete
* @param recursive Delete the directory recursively
* @param handler Callback handling the response
*/
def deleteDir(dir: String, recursive: Boolean, handler: Handler[AsyncResult[EtcdResponse]]): Unit =
deleteDir(dir, Option(recursive), handler)
/**
* Delete a directory not recursively
*
* @param dir Directory to delete
* @param handler Callback handling the response
*/
def deleteDir(dir: String, handler: Handler[AsyncResult[EtcdResponse]]): Unit =
deleteDir(dir, None, handler)
/**
* Delete a directory
*
* @param dir Directory to delete
* @param recursive Delete the directory recursively
* @param handler Callback handling the response
*/
private def deleteDir(dir: String, recursive: Option[Boolean], handler: Handler[AsyncResult[EtcdResponse]]): Unit = {
implicit val list = List(("recursive", recursive))
httpClient.request(HttpMethod.DELETE, prepareUrl(dir), executeKey(handler)).end()
}
private def executeKey(handler: Handler[AsyncResult[EtcdResponse]]): Handler[HttpClientResponse] = {
(response: HttpClientResponse) => {
response.bodyHandler(buffer =>
response.statusCode() match {
case 200 | 201 =>
Try {
Json.decodeValue(buffer.toString(), classOf[EtcdResponse])
} match {
case Success(etcdResponse) =>
Option(etcdResponse.getNode.getValue) foreach {
case value: String => etcdResponse.getNode.setValue(convertStringToObject(value))
}
handler handle Future.succeededFuture(etcdResponse)
case Failure(e) => handler handle Future.failedFuture(e)
}
case _ =>
handleError(handler, buffer)
})
}
}
private def executeList(handler: Handler[AsyncResult[EtcdListResponse]]): Handler[HttpClientResponse] = {
(response: HttpClientResponse) => {
response.bodyHandler(buffer =>
response.statusCode() match {
case 200 | 201 =>
Try {
Json.decodeValue(buffer.toString(), classOf[EtcdListResponse])
} match {
case Success(etcdListResponse) =>
convertListElement(etcdListResponse.getNode)
handler handle Future.succeededFuture[EtcdListResponse](etcdListResponse)
case Failure(e) => handler handle Future.failedFuture(e)
}
case _ =>
handleError(handler, buffer)
})
}
}
}
|
BnC-Technologies/vertx-etcd
|
vertx-etcd/src/main/scala/com/bnctech/etcd/EtcdClient.scala
|
Scala
|
mit
| 9,168
|
package ru.wordmetrix.webcrawler
import java.net.URI
import akka.actor.{Actor, ActorRef, Props, actorRef2Scala}
import ru.wordmetrix.smartfile.SmartFile.fromFile
import ru.wordmetrix.utils.impl.URIEx
import ru.wordmetrix.utils.{CFG, CFGAware}
object WebGet {
abstract sealed trait WebGetMessage
case class WebGetRequest(seed: URI, gather: ActorRef) extends WebGetMessage
def props(cfg: CFG): Props =
Props(new WebGet()(cfg))
}
/*
* WebGet gets an information from web pages
*/
class WebGet()(implicit cfg: CFG)
extends Actor with CFGAware {
override val name = "WebGet"
import Gather._
import SeedQueue._
import WebGet._
def receive(): Receive = {
case WebGetRequest(seed, gather) => {
//log("Getting a page %s ", seed)
try {
val page = (cfg.cache / seed.toFilename).readLines.mkString("")
log("Getting from caches %s ", seed)
gather ! GatherPage(seed, page)
} catch {
case x: Throwable =>
if (cfg.isdebug) log("Getting from source %s ", seed)
time(s"Getting from source $seed") {
try {
val connection = seed.toURL.openConnection()
connection.getContentType().split(";").head match {
case "text/html" => {
val text = io.Source.fromInputStream(
connection.getInputStream()).
getLines().mkString("\\n")
(cfg.cache / seed.toFilename).write(text)
gather ! GatherPage(seed, text)
}
case _ => None
}
} catch {
case x: Throwable =>
this.log("Download fault %s because %s", seed, x)
}
}
}
sender ! SeedQueueGet
}
case SeedQueueEmpty =>
context.stop(self)
}
}
|
electricmind/webcrawler
|
src/main/scala/ru/wordmetrix/webcrawler/WebGet.scala
|
Scala
|
apache-2.0
| 1,849
|
package zio.modelchecker.example
import org.scalatest.matchers.Matcher
import org.scalatest.Assertion
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import zio.UIO._
import zio.{Semaphore, UIO}
import zio.modelchecker.Interpreter._
/**
* The dining philosophers:
* https://en.wikipedia.org/wiki/Dining_philosophers_problem
*/
object Philosophers {
private def philosopher(
firstStick: Semaphore,
secondStick: Semaphore
): UIO[Unit] =
succeed((/* THINK */ )) *>
firstStick.acquire *>
secondStick.acquire *>
succeed((/* EAT */ )) *>
secondStick.release *>
firstStick.release
def run(n: Int): UIO[Unit] =
for {
sticks <- foreach(in = 0 until n)(_ => Semaphore.make(1))
_ <- foreachPar(0 until n) { i =>
philosopher(sticks(i), sticks((i + 1) % n))
}
} yield ()
def runOk(n: Int): UIO[Unit] =
for {
sticks <- foreach(in = 0 until n)(_ => Semaphore.make(1))
_ <- foreachPar(0 until n) {
case 0 => philosopher(sticks(n - 1), sticks(0))
case i => philosopher(sticks(i), sticks((i + 1) % n))
}
} yield ()
}
class PhilosophersSpec extends AnyFlatSpec with Matchers {
implicit class Syntax[E, A](results: Set[A]) {
def could(matcher: Matcher[A]): Assertion =
atLeast(1, results) should matcher
}
behavior of "Philosophers"
they should "sometimes deadlock in wrong configuration" in {
notFailing(Philosophers.run(3)) could be(None)
}
they should "never deadlock" in {
all(notFailing(Philosophers.runOk(3))) should not(be(None))
}
}
|
Jentsch/modelchecker
|
zio/src/test/scala/zio/modelchecker/example/Philosophers.scala
|
Scala
|
mit
| 1,638
|
package monocle.internal.focus.features.selectfield
import monocle.internal.focus.FocusBase
import monocle.internal.focus.features.SelectParserBase
private[focus] trait SelectFieldParser {
this: FocusBase with SelectParserBase =>
import this.macroContext.reflect._
object SelectField extends FocusParser {
def unapply(term: Term): Option[FocusResult[(RemainingCode, FocusAction)]] = term match {
case Select(CaseClass(remainingCode), fieldName) =>
val fromType = getType(remainingCode)
val action = getFieldAction(fromType, fieldName)
val remainingCodeWithAction = action.map(a => (RemainingCode(remainingCode), a))
Some(remainingCodeWithAction)
case Select(remainingCode, fieldName) =>
Some(FocusError.NotACaseClass(remainingCode.tpe.show, fieldName).asResult)
case _ => None
}
}
private def getFieldAction(fromType: TypeRepr, fieldName: String): FocusResult[FocusAction] =
getFieldType(fromType, fieldName).flatMap { toType =>
Right(FocusAction.SelectField(fieldName, fromType, getSuppliedTypeArgs(fromType), toType))
}
}
|
julien-truffaut/Monocle
|
core/shared/src/main/scala-3.x/monocle/internal/focus/features/selectfield/SelectFieldParser.scala
|
Scala
|
mit
| 1,155
|
/**
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.schedoscope.test
import java.text.SimpleDateFormat
import java.util.Date
import java.util.ArrayList
import java.util.HashMap
import scala.collection.JavaConversions._
import org.codehaus.jackson.map.ObjectMapper
import org.codehaus.jackson.map.`type`.TypeFactory
import org.schedoscope.dsl.storageformats._
import org.schedoscope.dsl.{Structure, View}
import org.slf4j.LoggerFactory
/**
* Helper for serialization/deserialization of hive data types
*
*/
object ViewSerDe {
val logger = LoggerFactory.getLogger("gna")
/**
* Recursively convert nested Java collections to Scala collections
*
* @param j the java collection to convert
* @return
*/
private def toScala(j: Any): Any = j match {
case jum: java.util.Map[_, _] => jum.map { case (k, v) => (toScala(k), toScala(v)) }.toList.toMap
case jal: java.util.List[_] => jal.map(toScala).toList
case _ => j
}
/**
* Escape data before writing it to hive.
*
* @param v
* @return
*/
def serialize(v: View with WritableView): String = {
v.storageFormat match {
case tf: TextFile => {
val fterm = if (tf.fieldTerminator == null) "\t" else tf.fieldTerminator.replaceAll("\\\\t", "\t")
val lterm = if (tf.lineTerminator == null) "\n" else tf.lineTerminator.replaceAll("\\\\n", "\n")
v.rowData.map(row =>
v.fields.map(cell => {
serializeCell(row(cell.n), false, tf)
}).mkString(fterm))
.mkString(lterm)
}
case _ => throw new RuntimeException("Can only serialize views stored as textfile")
}
}
/**
* Converts the string representation of a Field to a Value according to the type information
* provided by schedoscope
*
*/
def deserializeField[T](t: Manifest[T], v: String): Any = {
if (v == null || "null".equals(v)) {
return v
}
if (t == manifest[Int])
v.asInstanceOf[String].toInt
else if (t == manifest[Long])
v.asInstanceOf[String].toLong
else if (t == manifest[Byte])
v.asInstanceOf[String].toByte
else if (t == manifest[Boolean])
v.asInstanceOf[String].toBoolean
else if (t == manifest[Double])
v.asInstanceOf[String].toDouble
else if (t == manifest[Float])
v.asInstanceOf[String].toFloat
else if (t == manifest[String])
v.asInstanceOf[String]
else if (t == manifest[Date])
v.asInstanceOf[String] // TODO: parse date?
else if (classOf[Structure].isAssignableFrom(t.runtimeClass)) {
val res: HashMap[String, _] = new ObjectMapper().readValue(v.toString, TypeFactory.mapType(classOf[HashMap[_, _]], classOf[String], classOf[Any]))
toScala(res)
} else if (t.runtimeClass == classOf[List[_]]) {
val res: ArrayList[_] = new ObjectMapper().readValue(v.toString, TypeFactory.collectionType(classOf[ArrayList[_]], classOf[Any]))
toScala(res)
} else if (t.runtimeClass == classOf[Map[_, _]]) {
val res: HashMap[String, _] = new ObjectMapper().readValue(v.toString, TypeFactory.mapType(classOf[java.util.HashMap[_, _]], classOf[String], classOf[Any]))
toScala(res)
} else throw new RuntimeException("Could not deserialize field of type " + t + " with value " + v)
}
private def serializeCell(c: Any, inList: Boolean, format: TextFile): String = {
c match {
case null => {
"\\N"
}
case s: Structure with values => {
s.fields.map(f => serializeCell(s.fs(f.n), false, format)).mkString(if (inList) format.mapKeyTerminator else format.collectionItemTerminator)
}
case l: List[_] => {
l.map(e => serializeCell(e, true, format)).mkString(format.collectionItemTerminator)
}
case m: Map[_, _] => {
m.map(e => serializeCell(e._1, false, format) + format.mapKeyTerminator + serializeCell(e._2, false, format)).mkString(format.collectionItemTerminator)
}
case d: Date => new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").format(d)
case _ => {
c.toString
}
}
}
}
|
utzwestermann/schedoscope
|
schedoscope-core/src/main/scala/org/schedoscope/test/ViewSerDe.scala
|
Scala
|
apache-2.0
| 4,673
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.Random
import scala.collection.{mutable, Map}
import scala.collection.mutable.ArrayBuffer
import scala.io.Codec
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus
import org.apache.hadoop.io.{BytesWritable, NullWritable, Text}
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.spark._
import org.apache.spark.Partitioner._
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.partial.BoundedDouble
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.partial.GroupedCountEvaluator
import org.apache.spark.partial.PartialResult
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.util.{BoundedPriorityQueue, Utils}
import org.apache.spark.util.collection.OpenHashMap
import org.apache.spark.util.random.{BernoulliCellSampler, BernoulliSampler, PoissonSampler,
SamplingUtils}
/**
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
* partitioned collection of elements that can be operated on in parallel. This class contains the
* basic operations available on all RDDs, such as `map`, `filter`, and `persist`. In addition,
* [[org.apache.spark.rdd.PairRDDFunctions]] contains operations available only on RDDs of key-value
* pairs, such as `groupByKey` and `join`;
* [[org.apache.spark.rdd.DoubleRDDFunctions]] contains operations available only on RDDs of
* Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
* can be saved as SequenceFiles.
* All operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
* through implicit.
*
* Internally, each RDD is characterized by five main properties:
*
* - A list of partitions
* - A function for computing each split
* - A list of dependencies on other RDDs
* - Optionally, a Partitioner for key-value RDDs (e.g. to say that the RDD is hash-partitioned)
* - Optionally, a list of preferred locations to compute each split on (e.g. block locations for
* an HDFS file)
*
* All of the scheduling and execution in Spark is done based on these methods, allowing each RDD
* to implement its own way of computing itself. Indeed, users can implement custom RDDs (e.g. for
* reading data from a new storage system) by overriding these functions. Please refer to the
* <a href="http://people.csail.mit.edu/matei/papers/2012/nsdi_spark.pdf">Spark paper</a>
* for more details on RDD internals.
*/
abstract class RDD[T: ClassTag](
@transient private var _sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
if (classOf[RDD[_]].isAssignableFrom(elementClassTag.runtimeClass)) {
// This is a warning instead of an exception in order to avoid breaking user programs that
// might have defined nested RDDs without running jobs with them.
logWarning("Spark does not support nested RDDs (see SPARK-5063)")
}
private def sc: SparkContext = {
if (_sc == null) {
throw new SparkException(
"This RDD lacks a SparkContext. It could happen in the following cases: \\n(1) RDD " +
"transformations and actions are NOT invoked by the driver, but inside of other " +
"transformations; for example, rdd1.map(x => rdd2.values.count() * x) is invalid " +
"because the values transformation and count action cannot be performed inside of the " +
"rdd1.map transformation. For more information, see SPARK-5063.\\n(2) When a Spark " +
"Streaming job recovers from checkpoint, this exception will be hit if a reference to " +
"an RDD not defined by the streaming job is used in DStream operations. For more " +
"information, See SPARK-13758.")
}
_sc
}
/** Construct an RDD with just a one-to-one dependency on one parent */
def this(@transient oneParent: RDD[_]) =
this(oneParent.context, List(new OneToOneDependency(oneParent)))
private[spark] def conf = sc.conf
// =======================================================================
// Methods that should be implemented by subclasses of RDD
// =======================================================================
/**
* :: DeveloperApi ::
* Implemented by subclasses to compute a given partition.
*/
@DeveloperApi
def compute(split: Partition, context: TaskContext): Iterator[T]
/**
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*
* The partitions in this array must satisfy the following property:
* `rdd.partitions.zipWithIndex.forall { case (partition, index) => partition.index == index }`
*/
protected def getPartitions: Array[Partition]
/**
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getDependencies: Seq[Dependency[_]] = deps
/**
* Optionally overridden by subclasses to specify placement preferences.
*/
protected def getPreferredLocations(split: Partition): Seq[String] = Nil
/** Optionally overridden by subclasses to specify how they are partitioned. */
@transient val partitioner: Option[Partitioner] = None
// =======================================================================
// Methods and fields available on all RDDs
// =======================================================================
/** The SparkContext that created this RDD. */
def sparkContext: SparkContext = sc
/** A unique ID for this RDD (within its SparkContext). */
val id: Int = sc.newRddId()
/** A friendly name for this RDD */
@transient var name: String = null
/** Assign a name to this RDD */
def setName(_name: String): this.type = {
name = _name
this
}
/**
* Mark this RDD for persisting using the specified level.
*
* @param newLevel the target storage level
* @param allowOverride whether to override any existing level with the new one
*/
private def persist(newLevel: StorageLevel, allowOverride: Boolean): this.type = {
// TODO: Handle changes of StorageLevel
if (storageLevel != StorageLevel.NONE && newLevel != storageLevel && !allowOverride) {
throw new UnsupportedOperationException(
"Cannot change storage level of an RDD after it was already assigned a level")
}
// If this is the first time this RDD is marked for persisting, register it
// with the SparkContext for cleanups and accounting. Do this only once.
if (storageLevel == StorageLevel.NONE) {
sc.cleaner.foreach(_.registerRDDForCleanup(this))
sc.persistRDD(this)
}
storageLevel = newLevel
this
}
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. This can only be used to assign a new storage level if the RDD does not
* have a storage level set yet. Local checkpointing is an exception.
*/
def persist(newLevel: StorageLevel): this.type = {
if (isLocallyCheckpointed) {
// This means the user previously called localCheckpoint(), which should have already
// marked this RDD for persisting. Here we should override the old storage level with
// one that is explicitly requested by the user (after adapting it to use disk).
persist(LocalRDDCheckpointData.transformStorageLevel(newLevel), allowOverride = true)
} else {
persist(newLevel, allowOverride = false)
}
}
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def persist(): this.type = persist(StorageLevel.MEMORY_ONLY)
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def cache(): this.type = persist()
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
* @return This RDD.
*/
def unpersist(blocking: Boolean = true): this.type = {
logInfo("Removing RDD " + id + " from persistence list")
sc.unpersistRDD(id, blocking)
storageLevel = StorageLevel.NONE
this
}
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel: StorageLevel = storageLevel
// Our dependencies and partitions will be gotten by calling subclass's methods below, and will
// be overwritten when we're checkpointed
private var dependencies_ : Seq[Dependency[_]] = null
@transient private var partitions_ : Array[Partition] = null
/** An Option holding our checkpoint RDD, if we are checkpointed */
private def checkpointRDD: Option[CheckpointRDD[T]] = checkpointData.flatMap(_.checkpointRDD)
/**
* Get the list of dependencies of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def dependencies: Seq[Dependency[_]] = {
checkpointRDD.map(r => List(new OneToOneDependency(r))).getOrElse {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
dependencies_
}
}
/**
* Get the array of partitions of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def partitions: Array[Partition] = {
checkpointRDD.map(_.partitions).getOrElse {
if (partitions_ == null) {
partitions_ = getPartitions
partitions_.zipWithIndex.foreach { case (partition, index) =>
require(partition.index == index,
s"partitions($index).partition == ${partition.index}, but it should equal $index")
}
}
partitions_
}
}
/**
* Returns the number of partitions of this RDD.
*/
@Since("1.6.0")
final def getNumPartitions: Int = partitions.length
/**
* Get the preferred locations of a partition, taking into account whether the
* RDD is checkpointed.
*/
final def preferredLocations(split: Partition): Seq[String] = {
checkpointRDD.map(_.getPreferredLocations(split)).getOrElse {
getPreferredLocations(split)
}
}
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
if (storageLevel != StorageLevel.NONE) {
getOrCompute(split, context)
} else {
computeOrReadCheckpoint(split, context)
}
}
/**
* Return the ancestors of the given RDD that are related to it only through a sequence of
* narrow dependencies. This traverses the given RDD's dependency tree using DFS, but maintains
* no ordering on the RDDs returned.
*/
private[spark] def getNarrowAncestors: Seq[RDD[_]] = {
val ancestors = new mutable.HashSet[RDD[_]]
def visit(rdd: RDD[_]) {
val narrowDependencies = rdd.dependencies.filter(_.isInstanceOf[NarrowDependency[_]])
val narrowParents = narrowDependencies.map(_.rdd)
val narrowParentsNotVisited = narrowParents.filterNot(ancestors.contains)
narrowParentsNotVisited.foreach { parent =>
ancestors.add(parent)
visit(parent)
}
}
visit(this)
// In case there is a cycle, do not include the root itself
ancestors.filterNot(_ == this).toSeq
}
/**
* Compute an RDD partition or read it from a checkpoint if the RDD is checkpointing.
*/
private[spark] def computeOrReadCheckpoint(split: Partition, context: TaskContext): Iterator[T] =
{
if (isCheckpointedAndMaterialized) {
firstParent[T].iterator(split, context)
} else {
compute(split, context)
}
}
/**
* Gets or computes an RDD partition. Used by RDD.iterator() when an RDD is cached.
*/
private[spark] def getOrCompute(partition: Partition, context: TaskContext): Iterator[T] = {
val blockId = RDDBlockId(id, partition.index)
var readCachedBlock = true
// This method is called on executors, so we need call SparkEnv.get instead of sc.env.
SparkEnv.get.blockManager.getOrElseUpdate(blockId, storageLevel, elementClassTag, () => {
readCachedBlock = false
computeOrReadCheckpoint(partition, context)
}) match {
case Left(blockResult) =>
if (readCachedBlock) {
val existingMetrics = context.taskMetrics().inputMetrics
existingMetrics.incBytesRead(blockResult.bytes)
new InterruptibleIterator[T](context, blockResult.data.asInstanceOf[Iterator[T]]) {
override def next(): T = {
existingMetrics.incRecordsRead(1)
delegate.next()
}
}
} else {
new InterruptibleIterator(context, blockResult.data.asInstanceOf[Iterator[T]])
}
case Right(iter) =>
new InterruptibleIterator(context, iter.asInstanceOf[Iterator[T]])
}
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](sc)(body)
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[U: ClassTag](f: T => U): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.map(cleanF))
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U: ClassTag](f: T => TraversableOnce[U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.flatMap(cleanF))
}
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: T => Boolean): RDD[T] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[T, T](
this,
(context, pid, iter) => iter.filter(cleanF),
preservesPartitioning = true)
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
map(x => (x, null)).reduceByKey((x, y) => x, numPartitions).map(_._1)
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(): RDD[T] = withScope {
distinct(partitions.length)
}
/**
* Return a new RDD that has exactly numPartitions partitions.
*
* Can increase or decrease the level of parallelism in this RDD. Internally, this uses
* a shuffle to redistribute data.
*
* If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
* which can avoid performing a shuffle.
*/
def repartition(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
coalesce(numPartitions, shuffle = true)
}
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*
* This results in a narrow dependency, e.g. if you go from 1000 partitions
* to 100 partitions, there will not be a shuffle, instead each of the 100
* new partitions will claim 10 of the current partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can pass shuffle = true. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @note With shuffle = true, you can actually coalesce to a larger number
* of partitions. This is useful if you have a small number of partitions,
* say 100, potentially with a few partitions being abnormally large. Calling
* coalesce(1000, shuffle = true) will result in 1000 partitions with the
* data distributed using a hash partitioner. The optional partition coalescer
* passed in must be serializable.
*/
def coalesce(numPartitions: Int, shuffle: Boolean = false,
partitionCoalescer: Option[PartitionCoalescer] = Option.empty)
(implicit ord: Ordering[T] = null)
: RDD[T] = withScope {
require(numPartitions > 0, s"Number of partitions ($numPartitions) must be positive.")
if (shuffle) {
/** Distributes elements evenly across output partitions, starting from a random partition. */
val distributePartition = (index: Int, items: Iterator[T]) => {
var position = (new Random(index)).nextInt(numPartitions)
items.map { t =>
// Note that the hash code of the key will just be the key itself. The HashPartitioner
// will mod it with the number of total partitions.
position = position + 1
(position, t)
}
} : Iterator[(Int, T)]
// include a shuffle step so that our upstream tasks are still distributed
new CoalescedRDD(
new ShuffledRDD[Int, T, T](mapPartitionsWithIndex(distributePartition),
new HashPartitioner(numPartitions)),
numPartitions,
partitionCoalescer).values
} else {
new CoalescedRDD(this, numPartitions, partitionCoalescer)
}
}
/**
* Return a sampled subset of this RDD.
*
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
* @param fraction expected size of the sample as a fraction of this RDD's size
* without replacement: probability that each element is chosen; fraction must be [0, 1]
* with replacement: expected number of times each element is chosen; fraction must be greater
* than or equal to 0
* @param seed seed for the random number generator
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[RDD]].
*/
def sample(
withReplacement: Boolean,
fraction: Double,
seed: Long = Utils.random.nextLong): RDD[T] = {
require(fraction >= 0,
s"Fraction must be nonnegative, but got ${fraction}")
withScope {
require(fraction >= 0.0, "Negative fraction value: " + fraction)
if (withReplacement) {
new PartitionwiseSampledRDD[T, T](this, new PoissonSampler[T](fraction), true, seed)
} else {
new PartitionwiseSampledRDD[T, T](this, new BernoulliSampler[T](fraction), true, seed)
}
}
}
/**
* Randomly splits this RDD with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1
* @param seed random seed
*
* @return split RDDs in an array
*/
def randomSplit(
weights: Array[Double],
seed: Long = Utils.random.nextLong): Array[RDD[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
withScope {
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
randomSampleWithRange(x(0), x(1), seed)
}.toArray
}
}
/**
* Internal method exposed for Random Splits in DataFrames. Samples an RDD given a probability
* range.
* @param lb lower bound to use for the Bernoulli sampler
* @param ub upper bound to use for the Bernoulli sampler
* @param seed the seed for the Random number generator
* @return A random sub-sample of the RDD without replacement.
*/
private[spark] def randomSampleWithRange(lb: Double, ub: Double, seed: Long): RDD[T] = {
this.mapPartitionsWithIndex( { (index, partition) =>
val sampler = new BernoulliCellSampler[T](lb, ub)
sampler.setSeed(seed + index)
sampler.sample(partition)
}, preservesPartitioning = true)
}
/**
* Return a fixed-size sampled subset of this RDD in an array
*
* @param withReplacement whether sampling is done with replacement
* @param num size of the returned sample
* @param seed seed for the random number generator
* @return sample of specified size in an array
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def takeSample(
withReplacement: Boolean,
num: Int,
seed: Long = Utils.random.nextLong): Array[T] = withScope {
val numStDev = 10.0
require(num >= 0, "Negative number of elements requested")
require(num <= (Int.MaxValue - (numStDev * math.sqrt(Int.MaxValue)).toInt),
"Cannot support a sample size > Int.MaxValue - " +
s"$numStDev * math.sqrt(Int.MaxValue)")
if (num == 0) {
new Array[T](0)
} else {
val initialCount = this.count()
if (initialCount == 0) {
new Array[T](0)
} else {
val rand = new Random(seed)
if (!withReplacement && num >= initialCount) {
Utils.randomizeInPlace(this.collect(), rand)
} else {
val fraction = SamplingUtils.computeFractionForSampleSize(num, initialCount,
withReplacement)
var samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
// If the first sample didn't turn out large enough, keep trying to take samples;
// this shouldn't happen often because we use a big multiplier for the initial size
var numIters = 0
while (samples.length < num) {
logWarning(s"Needed to re-sample due to insufficient sample size. Repeat #$numIters")
samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
numIters += 1
}
Utils.randomizeInPlace(samples, rand).take(num)
}
}
}
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: RDD[T]): RDD[T] = withScope {
sc.union(this, other)
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def ++(other: RDD[T]): RDD[T] = withScope {
this.union(other)
}
/**
* Return this RDD sorted by the given key function.
*/
def sortBy[K](
f: (T) => K,
ascending: Boolean = true,
numPartitions: Int = this.partitions.length)
(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T] = withScope {
this.keyBy[K](f)
.sortByKey(ascending, numPartitions)
.values
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*/
def intersection(other: RDD[T]): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)))
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*
* @param partitioner Partitioner to use for the resulting RDD
*/
def intersection(
other: RDD[T],
partitioner: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)), partitioner)
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did. Performs a hash partition across the cluster
*
* @note This method performs a shuffle internally.
*
* @param numPartitions How many partitions to use in the resulting RDD
*/
def intersection(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
intersection(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): RDD[Array[T]] = withScope {
new MapPartitionsRDD[Array[T], T](this, (context, pid, iter) => Iterator(iter.toArray))
}
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
new CartesianRDD(sc, this, other)
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy[K](f, defaultPartitioner(this))
}
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](
f: T => K,
numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy(f, new HashPartitioner(numPartitions))
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
: RDD[(K, Iterable[T])] = withScope {
val cleanF = sc.clean(f)
this.map(t => (cleanF(t), t)).groupByKey(p)
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command))
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String, env: Map[String, String]): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command), env)
}
/**
* Return an RDD created by piping elements to a forked external process. The resulting RDD
* is computed by executing the given process once per partition. All elements
* of each input partition are written to a process's stdin as lines of input separated
* by a newline. The resulting partition consists of the process's stdout output, with
* each line of stdout resulting in one element of the output partition. A process is invoked
* even for empty partitions.
*
* The print behavior can be customized by providing two functions.
*
* @param command command to run in forked process.
* @param env environment variables to set.
* @param printPipeContext Before piping elements, this function is called as an opportunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
* @param printRDDElement Use this function to customize how to pipe elements. This function
* will be called with each RDD element as the 1st parameter, and the
* print line function (like out.println()) as the 2nd parameter.
* An example of pipe the RDD data of groupBy() in a streaming way,
* instead of constructing a huge String to concat all the elements:
* {{{
* def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
* for (e <- record._2) {f(e)}
* }}}
* @param separateWorkingDir Use separate working directories for each task.
* @param bufferSize Buffer size for the stdin writer for the piped process.
* @param encoding Char encoding used for interacting (via stdin, stdout and stderr) with
* the piped process
* @return the result RDD
*/
def pipe(
command: Seq[String],
env: Map[String, String] = Map(),
printPipeContext: (String => Unit) => Unit = null,
printRDDElement: (T, String => Unit) => Unit = null,
separateWorkingDir: Boolean = false,
bufferSize: Int = 8192,
encoding: String = Codec.defaultCharsetCodec.name): RDD[String] = withScope {
new PipedRDD(this, command, env,
if (printPipeContext ne null) sc.clean(printPipeContext) else null,
if (printRDDElement ne null) sc.clean(printRDDElement) else null,
separateWorkingDir,
bufferSize,
encoding)
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitions[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(iter),
preservesPartitioning)
}
/**
* [performance] Spark's internal mapPartitionsWithIndex method that skips closure cleaning.
* It is a performance API to be used carefully only if we are sure that the RDD elements are
* serializable and don't require closure cleaning.
*
* @param preservesPartitioning indicates whether the input function preserves the partitioner,
* which should be `false` unless this is a pair RDD and the input function doesn't modify
* the keys.
*/
private[spark] def mapPartitionsWithIndexInternal[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => f(index, iter),
preservesPartitioning)
}
/**
* [performance] Spark's internal mapPartitions method that skips closure cleaning.
*/
private[spark] def mapPartitionsInternal[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => f(iter),
preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitionsWithIndex[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(index, iter),
preservesPartitioning)
}
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
zipPartitions(other, preservesPartitioning = false) { (thisIter, otherIter) =>
new Iterator[(T, U)] {
def hasNext: Boolean = (thisIter.hasNext, otherIter.hasNext) match {
case (true, true) => true
case (false, false) => false
case _ => throw new SparkException("Can only zip RDDs with " +
"same number of elements in each partition")
}
def next(): (T, U) = (thisIter.next(), otherIter.next())
}
}
}
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, preservesPartitioning)
}
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B])
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C])
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, rdd4, preservesPartitioning = false)(f)
}
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: T => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => iter.foreach(cleanF))
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => cleanF(iter))
}
/**
* Return an array that contains all of the elements in this RDD.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def collect(): Array[T] = withScope {
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
/**
* Return an iterator that contains all of the elements in this RDD.
*
* The iterator will consume as much memory as the largest partition in this RDD.
*
* @note This results in multiple Spark jobs, and if the input RDD is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input RDD should be cached first.
*/
def toLocalIterator: Iterator[T] = withScope {
def collectPartition(p: Int): Array[T] = {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray, Seq(p)).head
}
(0 until partitions.length).iterator.flatMap(i => collectPartition(i))
}
/**
* Return an RDD that contains all matching values by applying `f`.
*/
def collect[U: ClassTag](f: PartialFunction[T, U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
filter(cleanF.isDefinedAt).map(cleanF)
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: RDD[T]): RDD[T] = withScope {
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.length)))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
subtract(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(
other: RDD[T],
p: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
if (partitioner == Some(p)) {
// Our partitioner knows how to handle T (which, since we have a partitioner, is
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
val p2 = new Partitioner() {
override def numPartitions: Int = p.numPartitions
override def getPartition(k: Any): Int = p.getPartition(k.asInstanceOf[(Any, _)]._1)
}
// Unfortunately, since we're making a new p2, we'll get ShuffleDependencies
// anyway, and when calling .keys, will not have a partitioner set, even though
// the SubtractedRDD will, thanks to p2's de-tupled partitioning, already be
// partitioned by the right/real keys (e.g. p).
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p2).keys
} else {
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p).keys
}
}
/**
* Reduces the elements of this RDD using the specified commutative and
* associative binary operator.
*/
def reduce(f: (T, T) => T): T = withScope {
val cleanF = sc.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
var jobResult: Option[T] = None
val mergeResult = (index: Int, taskResult: Option[T]) => {
if (taskResult.isDefined) {
jobResult = jobResult match {
case Some(value) => Some(f(value, taskResult.get))
case None => taskResult
}
}
}
sc.runJob(this, reducePartition, mergeResult)
// Get the final result out of our Option, or throw an exception if the RDD was empty
jobResult.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Reduces the elements of this RDD in a multi-level tree pattern.
*
* @param depth suggested depth of the tree (default: 2)
* @see [[org.apache.spark.rdd.RDD#reduce]]
*/
def treeReduce(f: (T, T) => T, depth: Int = 2): T = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
val cleanF = context.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
val partiallyReduced = mapPartitions(it => Iterator(reducePartition(it)))
val op: (Option[T], Option[T]) => Option[T] = (c, x) => {
if (c.isDefined && x.isDefined) {
Some(cleanF(c.get, x.get))
} else if (c.isDefined) {
c
} else if (x.isDefined) {
x
} else {
None
}
}
partiallyReduced.treeAggregate(Option.empty[T])(op, op, depth)
.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function
* op(t1, t2) is allowed to modify t1 and return it as its result value to avoid object
* allocation; however, it should not modify t2.
*
* This behaves somewhat differently from fold operations implemented for non-distributed
* collections in functional languages like Scala. This fold operation may be applied to
* partitions individually, and then fold those results into the final result, rather than
* apply the fold to each element sequentially in some defined ordering. For functions
* that are not commutative, the result may differ from that of a fold applied to a
* non-distributed collection.
*
* @param zeroValue the initial value for the accumulated result of each partition for the `op`
* operator, and also the initial value for the combine results from different
* partitions for the `op` operator - this will typically be the neutral
* element (e.g. `Nil` for list concatenation or `0` for summation)
* @param op an operator used to both accumulate results within a partition and combine results
* from different partitions
*/
def fold(zeroValue: T)(op: (T, T) => T): T = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanOp = sc.clean(op)
val foldPartition = (iter: Iterator[T]) => iter.fold(zeroValue)(cleanOp)
val mergeResult = (index: Int, taskResult: T) => jobResult = op(jobResult, taskResult)
sc.runJob(this, foldPartition, mergeResult)
jobResult
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*
* @param zeroValue the initial value for the accumulated result of each partition for the
* `seqOp` operator, and also the initial value for the combine results from
* different partitions for the `combOp` operator - this will typically be the
* neutral element (e.g. `Nil` for list concatenation or `0` for summation)
* @param seqOp an operator used to accumulate results within a partition
* @param combOp an associative operator used to combine results from different partitions
*/
def aggregate[U: ClassTag](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U): U = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.serializer.newInstance())
val cleanSeqOp = sc.clean(seqOp)
val cleanCombOp = sc.clean(combOp)
val aggregatePartition = (it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
val mergeResult = (index: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
sc.runJob(this, aggregatePartition, mergeResult)
jobResult
}
/**
* Aggregates the elements of this RDD in a multi-level tree pattern.
*
* @param depth suggested depth of the tree (default: 2)
* @see [[org.apache.spark.rdd.RDD#aggregate]]
*/
def treeAggregate[U: ClassTag](zeroValue: U)(
seqOp: (U, T) => U,
combOp: (U, U) => U,
depth: Int = 2): U = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
if (partitions.length == 0) {
Utils.clone(zeroValue, context.env.closureSerializer.newInstance())
} else {
val cleanSeqOp = context.clean(seqOp)
val cleanCombOp = context.clean(combOp)
val aggregatePartition =
(it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
var partiallyAggregated = mapPartitions(it => Iterator(aggregatePartition(it)))
var numPartitions = partiallyAggregated.partitions.length
val scale = math.max(math.ceil(math.pow(numPartitions, 1.0 / depth)).toInt, 2)
// If creating an extra level doesn't help reduce
// the wall-clock time, we stop tree aggregation.
// Don't trigger TreeAggregation when it doesn't save wall-clock time
while (numPartitions > scale + math.ceil(numPartitions.toDouble / scale)) {
numPartitions /= scale
val curNumPartitions = numPartitions
partiallyAggregated = partiallyAggregated.mapPartitionsWithIndex {
(i, iter) => iter.map((i % curNumPartitions, _))
}.reduceByKey(new HashPartitioner(curNumPartitions), cleanCombOp).values
}
partiallyAggregated.reduce(cleanCombOp)
}
}
/**
* Return the number of elements in the RDD.
*/
def count(): Long = sc.runJob(this, Utils.getIteratorSize _).sum
/**
* Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*
* The confidence is the probability that the error bounds of the result will
* contain the true value. That is, if countApprox were called repeatedly
* with confidence 0.9, we would expect 90% of the results to contain the
* true count. The confidence must be in the range [0,1] or an exception will
* be thrown.
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countApprox(
timeout: Long,
confidence: Double = 0.95): PartialResult[BoundedDouble] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
val countElements: (TaskContext, Iterator[T]) => Long = { (ctx, iter) =>
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}
val evaluator = new CountEvaluator(partitions.length, confidence)
sc.runApproximateJob(this, countElements, evaluator, timeout)
}
/**
* Return the count of each unique value in this RDD as a local map of (value, count) pairs.
*
* @note This method should only be used if the resulting map is expected to be small, as
* the whole thing is loaded into the driver's memory.
* To handle very large results, consider using
*
* {{{
* rdd.map(x => (x, 1L)).reduceByKey(_ + _)
* }}}
*
* , which returns an RDD[T, Long] instead of a map.
*/
def countByValue()(implicit ord: Ordering[T] = null): Map[T, Long] = withScope {
map(value => (value, null)).countByKey()
}
/**
* Approximate version of countByValue().
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countByValueApprox(timeout: Long, confidence: Double = 0.95)
(implicit ord: Ordering[T] = null)
: PartialResult[Map[T, BoundedDouble]] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
if (elementClassTag.runtimeClass.isArray) {
throw new SparkException("countByValueApprox() does not support arrays")
}
val countPartition: (TaskContext, Iterator[T]) => OpenHashMap[T, Long] = { (ctx, iter) =>
val map = new OpenHashMap[T, Long]
iter.foreach {
t => map.changeValue(t, 1L, _ + 1L)
}
map
}
val evaluator = new GroupedCountEvaluator[T](partitions.length, confidence)
sc.runApproximateJob(this, countPartition, evaluator, timeout)
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* The relative accuracy is approximately `1.054 / sqrt(2^p)`. Setting a nonzero (`sp` is greater
* than `p`) would trigger sparse representation of registers, which may reduce the memory
* consumption and increase accuracy when the cardinality is small.
*
* @param p The precision value for the normal set.
* `p` must be a value between 4 and `sp` if `sp` is not zero (32 max).
* @param sp The precision value for the sparse set, between 0 and 32.
* If `sp` equals 0, the sparse representation is skipped.
*/
def countApproxDistinct(p: Int, sp: Int): Long = withScope {
require(p >= 4, s"p ($p) must be >= 4")
require(sp <= 32, s"sp ($sp) must be <= 32")
require(sp == 0 || p <= sp, s"p ($p) cannot be greater than sp ($sp)")
val zeroCounter = new HyperLogLogPlus(p, sp)
aggregate(zeroCounter)(
(hll: HyperLogLogPlus, v: T) => {
hll.offer(v)
hll
},
(h1: HyperLogLogPlus, h2: HyperLogLogPlus) => {
h1.addAll(h2)
h1
}).cardinality()
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = withScope {
require(relativeSD > 0.000017, s"accuracy ($relativeSD) must be greater than 0.000017")
val p = math.ceil(2.0 * math.log(1.054 / relativeSD) / math.log(2)).toInt
countApproxDistinct(if (p < 4) 4 else p, 0)
}
/**
* Zips this RDD with its element indices. The ordering is first based on the partition index
* and then the ordering of items within each partition. So the first item in the first
* partition gets index 0, and the last item in the last partition receives the largest index.
*
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
* This method needs to trigger a spark job when this RDD contains more than one partitions.
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The index assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithIndex(): RDD[(T, Long)] = withScope {
new ZippedWithIndexRDD(this)
}
/**
* Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k,
* 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
* won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex]].
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The unique ID assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithUniqueId(): RDD[(T, Long)] = withScope {
val n = this.partitions.length.toLong
this.mapPartitionsWithIndex { case (k, iter) =>
Utils.getIteratorZipWithIndex(iter, 0L).map { case (item, i) =>
(item, i * n + k)
}
}
}
/**
* Take the first num elements of the RDD. It works by first scanning one partition, and use the
* results from that partition to estimate the number of additional partitions needed to satisfy
* the limit.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @note Due to complications in the internal implementation, this method will raise
* an exception if called on an RDD of `Nothing` or `Null`.
*/
def take(num: Int): Array[T] = withScope {
val scaleUpFactor = Math.max(conf.getInt("spark.rdd.limit.scaleUpFactor", 4), 2)
if (num == 0) {
new Array[T](0)
} else {
val buf = new ArrayBuffer[T]
val totalParts = this.partitions.length
var partsScanned = 0
while (buf.size < num && partsScanned < totalParts) {
// The number of partitions to try in this iteration. It is ok for this number to be
// greater than totalParts because we actually cap it at totalParts in runJob.
var numPartsToTry = 1L
if (partsScanned > 0) {
// If we didn't find any rows after the previous iteration, quadruple and retry.
// Otherwise, interpolate the number of partitions we need to try, but overestimate
// it by 50%. We also cap the estimation in the end.
if (buf.isEmpty) {
numPartsToTry = partsScanned * scaleUpFactor
} else {
// the left side of max is >=1 whenever partsScanned >= 2
numPartsToTry = Math.max((1.5 * num * partsScanned / buf.size).toInt - partsScanned, 1)
numPartsToTry = Math.min(numPartsToTry, partsScanned * scaleUpFactor)
}
}
val left = num - buf.size
val p = partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts).toInt)
val res = sc.runJob(this, (it: Iterator[T]) => it.take(left).toArray, p)
res.foreach(buf ++= _.take(num - buf.size))
partsScanned += p.size
}
buf.toArray
}
}
/**
* Return the first element in this RDD.
*/
def first(): T = withScope {
take(1) match {
case Array(t) => t
case _ => throw new UnsupportedOperationException("empty collection")
}
}
/**
* Returns the top k (largest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of
* [[takeOrdered]]. For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).top(1)
* // returns Array(12)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).top(2)
* // returns Array(6, 5)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
takeOrdered(num)(ord.reverse)
}
/**
* Returns the first k (smallest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of [[top]].
* For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).takeOrdered(1)
* // returns Array(2)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).takeOrdered(2)
* // returns Array(2, 3)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
if (num == 0) {
Array.empty
} else {
val mapRDDs = mapPartitions { items =>
// Priority keeps the largest elements, so let's reverse the ordering.
val queue = new BoundedPriorityQueue[T](num)(ord.reverse)
queue ++= util.collection.Utils.takeOrdered(items, num)(ord)
Iterator.single(queue)
}
if (mapRDDs.partitions.length == 0) {
Array.empty
} else {
mapRDDs.reduce { (queue1, queue2) =>
queue1 ++= queue2
queue1
}.toArray.sorted(ord)
}
}
}
/**
* Returns the max of this RDD as defined by the implicit Ordering[T].
* @return the maximum element of the RDD
* */
def max()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.max)
}
/**
* Returns the min of this RDD as defined by the implicit Ordering[T].
* @return the minimum element of the RDD
* */
def min()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.min)
}
/**
* @note Due to complications in the internal implementation, this method will raise an
* exception if called on an RDD of `Nothing` or `Null`. This may be come up in practice
* because, for example, the type of `parallelize(Seq())` is `RDD[Nothing]`.
* (`parallelize(Seq())` should be avoided anyway in favor of `parallelize(Seq[T]())`.)
* @return true if and only if the RDD contains no elements at all. Note that an RDD
* may be empty even when it has at least 1 partition.
*/
def isEmpty(): Boolean = withScope {
partitions.length == 0 || take(1).length == 0
}
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String): Unit = withScope {
// https://issues.apache.org/jira/browse/SPARK-2075
//
// NullWritable is a `Comparable` in Hadoop 1.+, so the compiler cannot find an implicit
// Ordering for it and will use the default `null`. However, it's a `Comparable[NullWritable]`
// in Hadoop 2.+, so the compiler will call the implicit `Ordering.ordered` method to create an
// Ordering for `NullWritable`. That's why the compiler will generate different anonymous
// classes for `saveAsTextFile` in Hadoop 1.+ and Hadoop 2.+.
//
// Therefore, here we provide an explicit Ordering `null` to make sure the compiler generate
// same bytecodes for `saveAsTextFile`.
val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
val textClassTag = implicitly[ClassTag[Text]]
val r = this.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
text.set(x.toString)
(NullWritable.get(), text)
}
}
RDD.rddToPairRDDFunctions(r)(nullWritableClassTag, textClassTag, null)
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path)
}
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit = withScope {
// https://issues.apache.org/jira/browse/SPARK-2075
val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
val textClassTag = implicitly[ClassTag[Text]]
val r = this.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
text.set(x.toString)
(NullWritable.get(), text)
}
}
RDD.rddToPairRDDFunctions(r)(nullWritableClassTag, textClassTag, null)
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path, codec)
}
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String): Unit = withScope {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
.map(x => (NullWritable.get(), new BytesWritable(Utils.serialize(x))))
.saveAsSequenceFile(path)
}
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: T => K): RDD[(K, T)] = withScope {
val cleanedF = sc.clean(f)
map(x => (cleanedF(x), x))
}
/** A private method for tests, to look at the contents of each partition */
private[spark] def collectPartitions(): Array[Array[T]] = withScope {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with `SparkContext#setCheckpointDir` and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint(): Unit = RDDCheckpointData.synchronized {
// NOTE: we use a global lock here due to complexities downstream with ensuring
// children RDD partitions point to the correct parent partitions. In the future
// we should revisit this consideration.
if (context.checkpointDir.isEmpty) {
throw new SparkException("Checkpoint directory has not been set in the SparkContext")
} else if (checkpointData.isEmpty) {
checkpointData = Some(new ReliableRDDCheckpointData(this))
}
}
/**
* Mark this RDD for local checkpointing using Spark's existing caching layer.
*
* This method is for users who wish to truncate RDD lineages while skipping the expensive
* step of replicating the materialized data in a reliable distributed file system. This is
* useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
*
* Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
* data is written to ephemeral local storage in the executors instead of to a reliable,
* fault-tolerant storage. The effect is that if an executor fails during the computation,
* the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
*
* This is NOT safe to use with dynamic allocation, which removes executors along
* with their cached blocks. If you must use both features, you are advised to set
* `spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
*
* The checkpoint directory set through `SparkContext#setCheckpointDir` is not used.
*/
def localCheckpoint(): this.type = RDDCheckpointData.synchronized {
if (conf.getBoolean("spark.dynamicAllocation.enabled", false) &&
conf.contains("spark.dynamicAllocation.cachedExecutorIdleTimeout")) {
logWarning("Local checkpointing is NOT safe to use with dynamic allocation, " +
"which removes executors along with their cached blocks. If you must use both " +
"features, you are advised to set `spark.dynamicAllocation.cachedExecutorIdleTimeout` " +
"to a high value. E.g. If you plan to use the RDD for 1 hour, set the timeout to " +
"at least 1 hour.")
}
// Note: At this point we do not actually know whether the user will call persist() on
// this RDD later, so we must explicitly call it here ourselves to ensure the cached
// blocks are registered for cleanup later in the SparkContext.
//
// If, however, the user has already called persist() on this RDD, then we must adapt
// the storage level he/she specified to one that is appropriate for local checkpointing
// (i.e. uses disk) to guarantee correctness.
if (storageLevel == StorageLevel.NONE) {
persist(LocalRDDCheckpointData.DEFAULT_STORAGE_LEVEL)
} else {
persist(LocalRDDCheckpointData.transformStorageLevel(storageLevel), allowOverride = true)
}
// If this RDD is already checkpointed and materialized, its lineage is already truncated.
// We must not override our `checkpointData` in this case because it is needed to recover
// the checkpointed data. If it is overridden, next time materializing on this RDD will
// cause error.
if (isCheckpointedAndMaterialized) {
logWarning("Not marking RDD for local checkpoint because it was already " +
"checkpointed and materialized")
} else {
// Lineage is not truncated yet, so just override any existing checkpoint data with ours
checkpointData match {
case Some(_: ReliableRDDCheckpointData[_]) => logWarning(
"RDD was already marked for reliable checkpointing: overriding with local checkpoint.")
case _ =>
}
checkpointData = Some(new LocalRDDCheckpointData(this))
}
this
}
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
*/
def isCheckpointed: Boolean = checkpointData.exists(_.isCheckpointed)
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
* This is introduced as an alias for `isCheckpointed` to clarify the semantics of the
* return value. Exposed for testing.
*/
private[spark] def isCheckpointedAndMaterialized: Boolean = isCheckpointed
/**
* Return whether this RDD is marked for local checkpointing.
* Exposed for testing.
*/
private[rdd] def isLocallyCheckpointed: Boolean = {
checkpointData match {
case Some(_: LocalRDDCheckpointData[T]) => true
case _ => false
}
}
/**
* Gets the name of the directory to which this RDD was checkpointed.
* This is not defined if the RDD is checkpointed locally.
*/
def getCheckpointFile: Option[String] = {
checkpointData match {
case Some(reliable: ReliableRDDCheckpointData[T]) => reliable.getCheckpointDir
case _ => None
}
}
// =======================================================================
// Other internal methods and fields
// =======================================================================
private var storageLevel: StorageLevel = StorageLevel.NONE
/** User code that created this RDD (e.g. `textFile`, `parallelize`). */
@transient private[spark] val creationSite = sc.getCallSite()
/**
* The scope associated with the operation that created this RDD.
*
* This is more flexible than the call site and can be defined hierarchically. For more
* detail, see the documentation of {{RDDOperationScope}}. This scope is not defined if the
* user instantiates this RDD himself without using any Spark operations.
*/
@transient private[spark] val scope: Option[RDDOperationScope] = {
Option(sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY)).map(RDDOperationScope.fromJson)
}
private[spark] def getCreationSite: String = Option(creationSite).map(_.shortForm).getOrElse("")
private[spark] def elementClassTag: ClassTag[T] = classTag[T]
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
// Whether to checkpoint all ancestor RDDs that are marked for checkpointing. By default,
// we stop as soon as we find the first such RDD, an optimization that allows us to write
// less data but is not safe for all workloads. E.g. in streaming we may checkpoint both
// an RDD and its parent in every batch, in which case the parent may never be checkpointed
// and its lineage never truncated, leading to OOMs in the long run (SPARK-6847).
private val checkpointAllMarkedAncestors =
Option(sc.getLocalProperty(RDD.CHECKPOINT_ALL_MARKED_ANCESTORS))
.map(_.toBoolean).getOrElse(false)
/** Returns the first parent RDD */
protected[spark] def firstParent[U: ClassTag]: RDD[U] = {
dependencies.head.rdd.asInstanceOf[RDD[U]]
}
/** Returns the jth parent RDD: e.g. rdd.parent[T](0) is equivalent to rdd.firstParent[T] */
protected[spark] def parent[U: ClassTag](j: Int) = {
dependencies(j).rdd.asInstanceOf[RDD[U]]
}
/** The [[org.apache.spark.SparkContext]] that this RDD was created on. */
def context: SparkContext = sc
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(cls: Class[T]): RDD[T] = {
val classTag: ClassTag[T] = ClassTag.apply(cls)
this.retag(classTag)
}
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(implicit classTag: ClassTag[T]): RDD[T] = {
this.mapPartitions(identity, preservesPartitioning = true)(classTag)
}
// Avoid handling doCheckpoint multiple times to prevent excessive recursion
@transient private var doCheckpointCalled = false
/**
* Performs the checkpointing of this RDD by saving this. It is called after a job using this RDD
* has completed (therefore the RDD has been materialized and potentially stored in memory).
* doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint(): Unit = {
RDDOperationScope.withScope(sc, "checkpoint", allowNesting = false, ignoreParent = true) {
if (!doCheckpointCalled) {
doCheckpointCalled = true
if (checkpointData.isDefined) {
if (checkpointAllMarkedAncestors) {
// TODO We can collect all the RDDs that needs to be checkpointed, and then checkpoint
// them in parallel.
// Checkpoint parents first because our lineage will be truncated after we
// checkpoint ourselves
dependencies.foreach(_.rdd.doCheckpoint())
}
checkpointData.get.checkpoint()
} else {
dependencies.foreach(_.rdd.doCheckpoint())
}
}
}
}
/**
* Changes the dependencies of this RDD from its original parents to a new RDD (`newRDD`)
* created from the checkpoint file, and forget its old dependencies and partitions.
*/
private[spark] def markCheckpointed(): Unit = {
clearDependencies()
partitions_ = null
deps = null // Forget the constructor argument for dependencies too
}
/**
* Clears the dependencies of this RDD. This method must ensure that all references
* to the original parent RDDs are removed to enable the parent RDDs to be garbage
* collected. Subclasses of RDD may override this method for implementing their own cleaning
* logic. See [[org.apache.spark.rdd.UnionRDD]] for an example.
*/
protected def clearDependencies() {
dependencies_ = null
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString: String = {
// Get a debug description of an rdd without its children
def debugSelf(rdd: RDD[_]): Seq[String] = {
import Utils.bytesToString
val persistence = if (storageLevel != StorageLevel.NONE) storageLevel.description else ""
val storageInfo = rdd.context.getRDDStorageInfo(_.id == rdd.id).map(info =>
" CachedPartitions: %d; MemorySize: %s; ExternalBlockStoreSize: %s; DiskSize: %s".format(
info.numCachedPartitions, bytesToString(info.memSize),
bytesToString(info.externalBlockStoreSize), bytesToString(info.diskSize)))
s"$rdd [$persistence]" +: storageInfo
}
// Apply a different rule to the last child
def debugChildren(rdd: RDD[_], prefix: String): Seq[String] = {
val len = rdd.dependencies.length
len match {
case 0 => Seq.empty
case 1 =>
val d = rdd.dependencies.head
debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]], true)
case _ =>
val frontDeps = rdd.dependencies.take(len - 1)
val frontDepStrings = frontDeps.flatMap(
d => debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]]))
val lastDep = rdd.dependencies.last
val lastDepStrings =
debugString(lastDep.rdd, prefix, lastDep.isInstanceOf[ShuffleDependency[_, _, _]], true)
(frontDepStrings ++ lastDepStrings)
}
}
// The first RDD in the dependency stack has no parents, so no need for a +-
def firstDebugString(rdd: RDD[_]): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val nextPrefix = (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix $desc"
} ++ debugChildren(rdd, nextPrefix)
}
def shuffleDebugString(rdd: RDD[_], prefix: String = "", isLastChild: Boolean): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val thisPrefix = prefix.replaceAll("\\\\|\\\\s+$", "")
val nextPrefix = (
thisPrefix
+ (if (isLastChild) " " else "| ")
+ (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset)))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$thisPrefix+-$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix$desc"
} ++ debugChildren(rdd, nextPrefix)
}
def debugString(
rdd: RDD[_],
prefix: String = "",
isShuffle: Boolean = true,
isLastChild: Boolean = false): Seq[String] = {
if (isShuffle) {
shuffleDebugString(rdd, prefix, isLastChild)
} else {
debugSelf(rdd).map(prefix + _) ++ debugChildren(rdd, prefix)
}
}
firstDebugString(this).mkString("\\n")
}
override def toString: String = "%s%s[%d] at %s".format(
Option(name).map(_ + " ").getOrElse(""), getClass.getSimpleName, id, getCreationSite)
def toJavaRDD() : JavaRDD[T] = {
new JavaRDD(this)(elementClassTag)
}
}
/**
* Defines implicit functions that provide extra functionalities on RDDs of specific types.
*
* For example, [[RDD.rddToPairRDDFunctions]] converts an RDD into a [[PairRDDFunctions]] for
* key-value-pair RDDs, and enabling extra functionalities such as [[PairRDDFunctions.reduceByKey]].
*/
object RDD {
private[spark] val CHECKPOINT_ALL_MARKED_ANCESTORS =
"spark.checkpoint.checkpointAllMarkedAncestors"
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
implicit def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null): PairRDDFunctions[K, V] = {
new PairRDDFunctions(rdd)
}
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]): AsyncRDDActions[T] = {
new AsyncRDDActions(rdd)
}
implicit def rddToSequenceFileRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V],
keyWritableFactory: WritableFactory[K],
valueWritableFactory: WritableFactory[V])
: SequenceFileRDDFunctions[K, V] = {
implicit val keyConverter = keyWritableFactory.convert
implicit val valueConverter = valueWritableFactory.convert
new SequenceFileRDDFunctions(rdd,
keyWritableFactory.writableClass(kt), valueWritableFactory.writableClass(vt))
}
implicit def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](rdd: RDD[(K, V)])
: OrderedRDDFunctions[K, V, (K, V)] = {
new OrderedRDDFunctions[K, V, (K, V)](rdd)
}
implicit def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]): DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd)
}
implicit def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T])
: DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd.map(x => num.toDouble(x)))
}
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
core/src/main/scala/org/apache/spark/rdd/RDD.scala
|
Scala
|
apache-2.0
| 78,062
|
package cbb.cloudphylo
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
object Utils {
def split(content: String): mutable.HashMap[String, String] = {
val results = new mutable.HashMap[String, String]
content.split(">").foreach((p) => {
val lines = p.split(("\\\\r?\\\\n"))
results(seqId(lines(0))) = lines.tail.mkString
})
results
}
private def seqId(tag: String): String = {
val sep = if ( tag.contains("\\t") ) {
"\\t"
} else if (tag.contains(" ")) {
" "
} else {
"\\\\|"
}
tag.split(sep)(0)
}
def longestORF(seq: String): Int = {
Array(longestORF(seq, 0), longestORF(seq, 1), longestORF(seq, 2)).max
}
private def longestORF(seq: String, offset: Int): Int = {
var orfStart = false
var l = 0
val orfls = ListBuffer.empty[Int]
orfls.append(l)
seq.slice(offset, seq.length).sliding(3, 3).foreach( (c) => {
c match {
case "ATG" => orfStart = true
case x@("TAA" | "TAG" | "TGA") => {
if (orfStart) {
orfls.append(l+1)
l = 0
}
orfStart = false
}
case _ =>
}
if (orfStart) l += 1
})
orfls.max*3
}
}
|
xingjianxu/cloudphylo
|
src/main/scala/cbb/cloudphylo/Utils.scala
|
Scala
|
apache-2.0
| 1,252
|
package dokutoku.golden_thumb.crop
import cpw.mods.fml.relauncher.Side
import cpw.mods.fml.relauncher.SideOnly
import dokutoku.golden_thumb.lib.WorldType.Nether
import dokutoku.golden_thumb.lib.Rarity.Rare
import dokutoku.golden_thumb.FX.Effects.metalFXEffect
import net.minecraft.world.World
import java.util.Random
class SoulsandCrop(id: Int, germ: Int) extends GoldenCrop(id, germ) {
val worldType = Nether
val rarity = Rare
@SideOnly(Side.CLIENT)
override def randomDisplayTick(world: World, x: Int, y: Int, z: Int, rand: Random) : Unit = {
world.spawnParticle("portal", x + rand.nextFloat(), y + 0.1f, z + rand.nextFloat(),
0.0f, 0.00f, 0.0f);
}
}
|
AtomFusion/GoldenThumb
|
dokutoku/golden_thumb/crop/SoulsandCrop.scala
|
Scala
|
mit
| 693
|
import sbt._
import Keys._
import xerial.sbt.Sonatype.SonatypeKeys
import com.github.retronym.SbtOneJar
object SimpleAuthBuild extends Build {
lazy val buildSettings = Seq(
organization := "fi.pyppe",
version := "1.1-SNAPSHOT",
scalaVersion := "2.11.1",
crossScalaVersions := Seq("2.11.1", "2.10.4"),
crossVersion := CrossVersion.binary,
exportJars := true,
homepage := Some(url("https://github.com/Pyppe/play-simple-auth")),
startYear := Some(2014),
description := "Simple authentication (Facebook, Github, Google, Linkedin, Twitter)"
) ++ Publish.settings
val PlayVersion = "2.3.4"
lazy val dependencies = Seq(
"com.typesafe.play" %% "play" % PlayVersion % "provided",
"com.typesafe.play" %% "play-json" % PlayVersion % "provided",
"com.typesafe.play" %% "play-ws" % PlayVersion % "provided",
// Testing:
"org.specs2" %% "specs2" % "2.3.12" % "test"
)
lazy val webResolvers = Seq(
"Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
)
lazy val root = Project(
id = "play-simple-auth",
base = file("."),
settings = buildSettings ++
Seq(libraryDependencies ++= dependencies, resolvers ++= webResolvers) ++
SbtOneJar.oneJarSettings
)
}
object Publish {
lazy val settings = Seq(
publishMavenStyle := true,
publishTo <<= version { (v: String) =>
def nexusUrl(path: String) = s"https://oss.sonatype.org$path"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexusUrl("/content/repositories/snapshots"))
else
Some("releases" at nexusUrl("/service/local/staging/deploy/maven2"))
},
scmInfo := Some(ScmInfo(url("http://github.com/Pyppe/play-simple-auth"), "https://github.com/Pyppe/play-simple-auth.git")),
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials"),
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
licenses := Seq("The MIT License (MIT)" -> url("https://github.com/Pyppe/play-simple-auth/blob/master/LICENSE")),
pomExtra := (
<developers>
<developer>
<id>pyppe</id>
<name>Pyry-Samuli Lahti</name>
<url>http://www.pyppe.fi/</url>
</developer>
</developers>
)
) ++ xerial.sbt.Sonatype.sonatypeSettings
}
|
Pyppe/play-simple-auth
|
project/SimpleAuthBuild.scala
|
Scala
|
mit
| 2,407
|
import Macros.*
object Test {
def main(args: Array[String]): Unit = {
matches[Int, Int]
matches[1, Int]
matches[Int, 2]
matches[List[Int], List[Int]]
matches[List[Int], List[Double]]
}
}
|
lampepfl/dotty
|
tests/run-macros/quote-type-matcher/quoted_2.scala
|
Scala
|
apache-2.0
| 218
|
package org.soabridge.scala.breeze.resources
import akka.actor.SupervisorStrategy.Resume
import akka.actor._
/**
* Missing documentation.
*
* @author <a href="steffen.krause@soabridge.com">Steffen Krause</a>
* @since 1.0
*/
private[breeze] class ResourceActor extends Actor {
/* Importing all messages declared in companion object for processing */
import ResourceActor.Messages._
/** Supervisor strategy for the subordinate module handlers. */
override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy() {
//TODO slk: implement supervisor strategy
case _ => Resume
}
/** Message processing */
def receive: Receive = initialize
val initialize: Receive = {
case Start =>
//TODO slk: implement module initialization
context become processing
case Status =>
//TODO slk: implement Status behavior
}
val processing: Receive = {
case Status =>
//TODO slk: implement Status behavior
case Stop =>
//TODO slk: implement Stop behavior
case Terminated =>
//TODO slk: implement watchdog behavior
}
}
/**
* Missing documentation.
*
* @author <a href="steffen.krause@soabridge.com">Steffen Krause</a>
* @since 1.0
*/
private[breeze] object ResourceActor {
/** Actor properties for ResourceActor */
val props: Props = Props[ResourceActor]
/** Accepted messages for ResourceActor */
object Messages {
case object Start
case object Status
case object Stop
}
}
|
SOABridge/breeze-scala
|
src/main/scala/org/soabridge/scala/breeze/resources/ResourceActor.scala
|
Scala
|
gpl-3.0
| 1,484
|
package nl.rabobank.oss.rules.facts
import nl.rabobank.oss.rules.engine.{ErrorEvaluation, Evaluation, ListFactEvaluation, SingularFactEvaluation}
import scala.language.existentials
trait Fact[+A] {
def name: String
def description: String
def toEval: Evaluation[A]
def valueType: String
override def toString: String = name
}
case class SingularFact[+A](name: String, description: String = "", valueType: String = "") extends Fact[A] {
def toEval: Evaluation[A] = new SingularFactEvaluation(this)
}
case class ListFact[+A](name: String, description: String = "", valueType: String = "") extends Fact[List[A]] {
def toEval: Evaluation[List[A]] = new ListFactEvaluation[A](this)
}
case object OriginFact extends Fact[Nothing] {
def name: String = "___meta___OriginFact___meta___"
def description: String = "Meta-fact used in graph construction"
def toEval: Evaluation[Nothing] = new ErrorEvaluation("The OriginFact is a meta-fact used in graph construction to indicate top-level constant evaluations")
def valueType: String = "Nothing"
}
case class SynthesizedFact[+A](factOriginalFact: Fact[Any], synthesizedPostfix: String, description: String = "", valueType: String = "") extends Fact[A] {
def name: String = factOriginalFact.name + "_" + synthesizedPostfix
def toEval: Evaluation[A] = new SingularFactEvaluation[A](this)
}
|
scala-rules/scala-rules
|
engine-core/src/main/scala/nl/rabobank/oss/rules/facts/facts.scala
|
Scala
|
mit
| 1,365
|
package util
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Try
/** Helper class to enhance the default future from scala to allow to convert a list of futures to a list of options */
object FutureEnhancements {
// add global execution context
import scala.concurrent.ExecutionContext.Implicits.global
/** enhances futures with specials functions */
implicit class FutureCompanionOps(val f: Future.type) extends AnyVal {
// idea from here: http://stackoverflow.com/questions/20874186/scala-listfuture-to-futurelist-disregarding-failed-futures
def allAsOptions[T](fItems: /* future items */ List[Future[T]]): Future[List[Option[T]]] = {
val listOfFutureTrys: List[Future[Option[T]]] = fItems.map(futureToFutureOption)
Future.sequence(listOfFutureTrys)
}
def futureToFutureOption[T](f: Future[T]): Future[Option[T]] = {
f.map(Some(_)).recover({ case x => None })
}
}
/** adds mapAll and flatMapAll to futures */
implicit class FutureExtensions[T](f: Future[T]) {
def mapAll[Target](m: Try[T] => Target)(implicit ec: ExecutionContext): Future[Target] = {
val p = Promise[Target]()
f.onComplete { r => p success m(r) }(ec)
p.future
}
def flatMapAll[Target](m: Try[T] => Future[Target])(implicit ec: ExecutionContext): Future[Target] = {
val promise = Promise[Target]()
f.onComplete { r => m(r).onComplete { z => promise complete z }(ec) }(ec)
promise.future
}
}
}
|
Starofall/QryGraph
|
qrygraph/jvm/app/util/FutureEnhancements.scala
|
Scala
|
mit
| 1,504
|
package pl.edu.agh.iet.akka_tracing.couchdb
import java.nio.charset.StandardCharsets
import java.util.Base64
import java.util.concurrent.CompletableFuture
import org.asynchttpclient.{ Request, RequestBuilder, Response }
import org.json4s._
import org.json4s.ext.JavaTypesSerializers
import org.json4s.native.JsonMethods._
import org.slf4j.LoggerFactory
import scala.compat.java8.FutureConverters._
import scala.concurrent.{ ExecutionContext, Future }
private[couchdb] object CouchDbUtils {
private val base64Encoder = Base64.getEncoder
private val logger = LoggerFactory.getLogger(getClass)
private implicit val formats: Formats = DefaultFormats ++ JavaTypesSerializers.all
def getBase64AuthValue(user: String, password: String): String = {
base64Encoder.encodeToString(
s"$user:$password".getBytes(StandardCharsets.UTF_8)
)
}
def buildRequest(
url: String,
method: String,
userOption: Option[String],
passwordOption: Option[String],
queryParams: Option[List[(String, String)]] = None,
body: Option[JValue] = None
): Request = {
val rb = new RequestBuilder(method)
.setUrl(url)
.addHeader("Accept", "application/json")
(userOption, passwordOption) match {
case (Some(user), Some(password)) =>
val encodedAuthString = getBase64AuthValue(user, password)
rb.addHeader("Authorization", s"Basic $encodedAuthString")
case _ =>
}
body foreach { json =>
rb.addHeader("Content-type", "application/json")
.setBody(compact(render(json)))
}
queryParams foreach { params =>
params foreach { param =>
rb.addQueryParam(param._1, param._2)
}
}
rb.build()
}
implicit class RichCompletableFutureOfResponse(future: CompletableFuture[Response]) {
def asScala(implicit ec: ExecutionContext): Future[Response] = {
future.toScala flatMap { response =>
val statusCode = response.getStatusCode
if (statusCode < 200 || statusCode >= 300) {
Future.failed(RequestFailedException(
statusCode, response.getResponseBody(StandardCharsets.UTF_8)
))
}
Future.successful(response)
} recoverWith {
case e@RequestFailedException(status, message) =>
logger.error(s"Error while trying to perform request: $status: $message")
Future.failed(e)
case e =>
logger.error("Error while trying to perform request:", e)
Future.failed(e)
}
}
}
implicit class RichJValue(jValue: JValue) {
def isDefined: Boolean = jValue.toOption.isDefined
}
}
|
akka-tracing-tool/akka-tracing-couchdb-collector
|
src/main/scala/pl/edu/agh/iet/akka_tracing/couchdb/CouchDbUtils.scala
|
Scala
|
gpl-3.0
| 2,633
|
package com.chrisomeara.pillar
import com.datastax.driver.core.Session
import org.scalatest._
import org.scalatest.matchers.ShouldMatchers
import java.io.{ByteArrayOutputStream, PrintStream}
import java.util.Date
import org.scalatest.mock.MockitoSugar
class PrintStreamReporterSpec extends FunSpec with MockitoSugar with Matchers with OneInstancePerTest {
val session = mock[Session]
val migration = Migration("creates things table", new Date(1370489972546L), "up", Some("down"))
val output = new ByteArrayOutputStream()
val stream = new PrintStream(output)
val reporter = new PrintStreamReporter(stream)
val keyspace = "myks"
describe("#initializing") {
it("should print to the stream") {
reporter.initializing(session, keyspace, ReplicationOptions.default)
output.toString should equal("Initializing myks\\n")
}
}
describe("#migrating") {
describe("without date restriction") {
it("should print to the stream") {
reporter.migrating(session, None)
output.toString should equal("Migrating with date restriction None\\n")
}
}
}
describe("#applying") {
it("should print to the stream") {
reporter.applying(migration)
output.toString should equal("Applying 1370489972546: creates things table\\n")
}
}
describe("#reversing") {
it("should print to the stream") {
reporter.reversing(migration)
output.toString should equal("Reversing 1370489972546: creates things table\\n")
}
}
describe("#destroying") {
it("should print to the stream") {
reporter.destroying(session, keyspace)
output.toString should equal("Destroying myks\\n")
}
}
}
|
weirded/pillar
|
src/test/scala/com/chrisomeara/pillar/PrintStreamReporterSpec.scala
|
Scala
|
mit
| 1,682
|
package org.apache.predictionio.examples.friendrecommendation
import org.apache.predictionio.controller._
// For random algorithm
import scala.util.Random
class RandomAlgorithm (val ap: FriendRecommendationAlgoParams)
extends LAlgorithm[FriendRecommendationTrainingData,
RandomModel, FriendRecommendationQuery, FriendRecommendationPrediction] {
override
def train(pd: FriendRecommendationTrainingData): RandomModel = {
new RandomModel(0.5)
}
override
def predict(model: RandomModel, query: FriendRecommendationQuery):
FriendRecommendationPrediction = {
val randomConfidence = Random.nextDouble
val acceptance = randomConfidence >= model.randomThreshold
new FriendRecommendationPrediction(randomConfidence, acceptance)
}
}
|
alex9311/PredictionIO
|
examples/experimental/scala-local-friend-recommendation/src/main/scala/RandomAlgorithm.scala
|
Scala
|
apache-2.0
| 767
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.secondaryindex
import java.io.{File, IOException}
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.{AnalysisException, Row}
import org.apache.spark.sql.test.TestQueryExecutor
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
/**
* Secondary index refresh and registration to the main table
*/
class TestRegisterIndexCarbonTable extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop database if exists carbon cascade")
}
private def restoreData(dblocation: String, tableName: String) = {
val destination = dblocation + CarbonCommonConstants.FILE_SEPARATOR + tableName
val source = dblocation + "_back" + CarbonCommonConstants.FILE_SEPARATOR + tableName
try {
FileUtils.copyDirectory(new File(source), new File(destination))
FileUtils.deleteDirectory(new File(source))
} catch {
case e : Exception =>
throw new IOException("carbon table data restore failed.")
} finally {
}
}
private def backUpData(dblocation: String, tableName: String) = {
val source = dblocation + CarbonCommonConstants.FILE_SEPARATOR + tableName
val destination = dblocation + "_back" + CarbonCommonConstants.FILE_SEPARATOR + tableName
try {
FileUtils.copyDirectory(new File(source), new File(destination))
} catch {
case e : Exception =>
throw new IOException("carbon table data backup failed.")
}
}
test("register tables test") {
val location = TestQueryExecutor.warehouse +
CarbonCommonConstants.FILE_SEPARATOR + "dbName"
sql("drop database if exists carbon cascade")
sql(s"create database carbon location '${location}'")
sql("use carbon")
sql("create table carbon.carbontable (" +
"c1 string,c2 int,c3 string,c5 string) STORED AS carbondata")
sql("insert into carbontable select 'a',1,'aa','aaa'")
sql("create index index_on_c3 on table carbontable (c3, c5) AS 'carbondata'")
backUpData(location, "carbontable")
backUpData(location, "index_on_c3")
sql("drop table carbontable")
restoreData(location, "carbontable")
restoreData(location, "index_on_c3")
sql("refresh table carbontable")
sql("refresh table index_on_c3")
checkAnswer(sql("select count(*) from carbontable"), Row(1))
checkAnswer(sql("select c1 from carbontable"), Seq(Row("a")))
sql("REGISTER INDEX TABLE index_on_c3 ON carbontable")
assert(sql("show indexes on carbontable").collect().nonEmpty)
}
test("test register index on unknown parent table AND index table") {
sql("use carbon")
sql("drop table if exists carbontable")
var exception = intercept[AnalysisException] {
sql("REGISTER INDEX TABLE index_on_c3 ON unknown")
}
assert(exception.getMessage().contains("Table [unknown] does " +
"not exists under database [carbon]"))
sql("create table carbontable (" +
"c1 string,c2 int,c3 string,c5 string) STORED AS carbondata")
exception = intercept[AnalysisException] {
sql("REGISTER INDEX TABLE unknown ON carbontable")
}
assert(exception.getMessage().contains("Secondary Index Table [unknown] does " +
"not exists under database [carbon]"))
sql("drop table if exists carbontable")
}
override def afterAll {
sql("drop database if exists carbon cascade")
sql("use default")
}
}
|
zzcclp/carbondata
|
index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestRegisterIndexCarbonTable.scala
|
Scala
|
apache-2.0
| 4,319
|
/*
* Copyright 2012-2013 Eligotech BV.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eligosource.eventsourced.core
import java.lang.{Boolean => JBoolean}
import akka.japi.{Function => JFunction}
/**
* Processor-specific replay parameters.
*/
sealed abstract class ReplayParams {
/**
* Processor id.
*/
def processorId: Int
/**
* Lower sequence number bound. Sequence number, where a replay should start from.
* Only applicable if replay doesn't start from a snapshot (i.e. `snapshot` is `false`).
*/
def fromSequenceNr: Long
/**
* Upper sequence number bound. Sequence number a replay should end (inclusive). Applicable
* to both, snapshotted and non-snapshotted replays.
*/
def toSequenceNr: Long
/**
* Whether or not replay should start from a snapshot.
*/
def snapshot: Boolean
/**
* Filter applied to saved snapshots. The filtered snapshot with the highest sequence
* number (if any) will be the replay starting point.
*/
def snapshotFilter: SnapshotMetadata => Boolean
/**
* Updates `toSequenceNr` with specified value.
*/
def withToSequenceNr(toSequenceNr: Long): ReplayParams
}
/**
* @see [[org.eligosource.eventsourced.core.ReplayParams]]
*/
object ReplayParams {
/**
* Creates processor-specific replay parameters for non-snapshotted replay with optional
* lower and upper sequence number bounds.
*/
def apply(processorId: Int, fromSequenceNr: Long = 0L, toSequenceNr: Long = Long.MaxValue): ReplayParams =
StandardReplayParams(processorId, fromSequenceNr, toSequenceNr)
/**
* Creates processor-specific replay parameters for snapshotted replay with no upper
* sequence number bound.
*/
def apply(processorId: Int, snapshotFilter: SnapshotMetadata => Boolean): ReplayParams =
SnapshotReplayParams(processorId, snapshotFilter)
/**
* Creates processor-specific replay parameters for snapshotted replay with an upper
* sequence number bound.
*/
def apply(processorId: Int, snapshotFilter: SnapshotMetadata => Boolean, toSequenceNr: Long): ReplayParams =
SnapshotReplayParams(processorId, snapshotFilter, toSequenceNr)
/**
* Creates processor-specific replay parameters for snapshotted replay if `snapshot` is
* `true`, for non-snapshotted replay otherwise. There are no lower and upper sequence
* number bounds.
*/
def apply(processorId: Int, snapshot: Boolean): ReplayParams =
if (snapshot) SnapshotReplayParams(processorId)
else StandardReplayParams(processorId)
/**
* Creates processor-specific replay parameters for snapshotted replay if `snapshot` is
* `true`, for non-snapshotted replay otherwise. There is an upper sequence number bound.
*/
def apply(processorId: Int, snapshot: Boolean, toSequenceNr: Long): ReplayParams =
if (snapshot) SnapshotReplayParams(processorId, toSequenceNr = toSequenceNr)
else StandardReplayParams(processorId, toSequenceNr = toSequenceNr)
/**
* Java API.
*
* Creates processor-specific replay parameters for non-snapshotted replay with no
* lower and upper sequence number bounds.
*/
def create(processorId: Int): ReplayParams =
apply(processorId)
/**
* Java API.
*
* Creates processor-specific replay parameters for non-snapshotted replay with a
* lower sequence number bound but no upper sequence number bound.
*/
def create(processorId: Int, fromSequenceNr: Long): ReplayParams =
apply(processorId, fromSequenceNr)
/**
* Java API.
*
* Creates processor-specific replay parameters for non-snapshotted replay with
* lower and upper sequence number bounds.
*/
def create(processorId: Int, fromSequenceNr: Long, toSequenceNr: Long): ReplayParams =
apply(processorId, fromSequenceNr, toSequenceNr)
/**
* Java API.
*
* Creates processor-specific replay parameters for snapshotted replay with no upper
* sequence number bound.
*/
def create(processorId: Int, snapshotFilter: JFunction[SnapshotMetadata, JBoolean]): ReplayParams =
apply(processorId, smd => snapshotFilter(smd))
/**
* Java API.
*
* Creates processor-specific replay parameters for snapshotted replay with an upper
* sequence number bound.
*/
def create(processorId: Int, snapshotFilter: JFunction[SnapshotMetadata, JBoolean], toSequenceNr: Long): ReplayParams =
apply(processorId, smd => snapshotFilter(smd), toSequenceNr)
/**
* Java API.
*
* Creates processor-specific replay parameters for snapshotted replay if `snapshot` is
* `true`, for non-snapshotted replay otherwise. There are no lower and upper sequence
* number bounds.
*/
def create(processorId: Int, snapshot: Boolean): ReplayParams =
apply(processorId, snapshot)
/**
* Java API.
*
* Creates processor-specific replay parameters for snapshotted replay if `snapshot` is
* `true`, for non-snapshotted replay otherwise. There is an upper sequence number bound.
*/
def create(processorId: Int, snapshot: Boolean, toSequenceNr: Long): ReplayParams =
apply(processorId, snapshot, toSequenceNr)
/**
* Processor-specific replay parameters for non-snapshotted replay.
*/
case class StandardReplayParams(
processorId: Int,
fromSequenceNr: Long = 0L,
toSequenceNr: Long = Long.MaxValue) extends ReplayParams {
/**
* Returns `false`.
*/
val snapshot = false
/**
* Not applicable.
*/
def snapshotFilter = _ => false
/**
* Updates `toSequenceNr` with specified value.
*/
def withToSequenceNr(toSequenceNr: Long) =
copy(toSequenceNr = toSequenceNr)
}
/**
* Processor-specific replay parameters for snapshotted replay.
*
* @param snapshotBaseFilter application defined snapshot filter.
* Selects any saved snapshot by default.
*/
case class SnapshotReplayParams(
processorId: Int,
snapshotBaseFilter: SnapshotMetadata => Boolean = _ => true,
toSequenceNr: Long = Long.MaxValue) extends ReplayParams {
/**
* Returns `0L`.
*/
val fromSequenceNr = 0L
/**
* Return `true`.
*/
val snapshot = true
/**
* Snapshot filter that applies `snapshotBaseFilter` an and a
* `<= toSequenceNr` constraint.
*/
def snapshotFilter: SnapshotMetadata => Boolean =
smd => snapshotBaseFilter(smd) && (smd.sequenceNr <= toSequenceNr)
/**
* Updates `toSequenceNr` with specified value.
*/
def withToSequenceNr(toSequenceNr: Long) =
copy(toSequenceNr = toSequenceNr)
}
}
|
CoderPaulK/eventsourced
|
es-core/src/main/scala/org/eligosource/eventsourced/core/ReplayParams.scala
|
Scala
|
apache-2.0
| 7,095
|
package scalan
import java.io.File
import scalan.compilation.{GraphVizExport, GraphVizConfig}
/**
* Base trait for testing specific rewrite rules
*/
trait RewriteRuleSuite[A] extends BaseShouldTests {
lazy val folder = new File(prefix, suiteName)
def getCtx: TestCtx
trait TestCtx extends ScalanDslExp {
def testLemma: RRewrite[A]
def testExpr(): Exp[A]
def expected: Exp[A]
lazy val rule = patternRewriteRule(testLemma)
}
"ScalanCtx" should "stage Lemma" in {
val ctx = getCtx
ctx.emitDepGraph(ctx.testLemma, folder, "testLemma")(GraphVizConfig.default)
}
it should "create LemmaRule" in {
val ctx = getCtx
import ctx._
ctx.emitDepGraph(List(testLemma, rule.lhs, rule.rhs), folder, "testRule")(GraphVizConfig.default)
}
it should "create ProjectionTree in pattern" in {
val ctx = getCtx
import ctx._
ctx.emitDepGraph(List(rule.lhs, rule.rhs), folder, "testPatternAndRhs")(GraphVizConfig.default)
}
it should "recognize pattern" in {
val ctx = getCtx
import ctx._
patternMatch(rule.lhs, testExpr()) match {
case Some(subst) =>
subst should not be(Map.empty)
case _ =>
fail("should recognize pattern")
}
}
it should "apply pattern" in {
val ctx = getCtx
import ctx._
val test = testExpr()
val rewritten = rule(test)
rewritten match {
case Some(res) =>
ctx.emitDepGraph(List(Pair(test, res)), folder, "LemmaRule/originalAndRewritten")(GraphVizConfig.default)
case _ =>
fail("should apply pattern")
}
}
it should "rewrite when registered" in {
val ctx = getCtx
import ctx._
val withoutRule = testExpr()
addRewriteRules(rule)
val withRule = testExpr()
removeRewriteRules(rule)
ctx.emitDepGraph(List(withoutRule, withRule), folder, "LemmaRule/ruleRewriting")(GraphVizConfig.default)
val expectedResult = expected
alphaEqual(withRule, expectedResult) should be(true)
alphaEqual(withoutRule, expectedResult) should be(false)
val afterRemoval = testExpr()
ctx.emitDepGraph(List(withoutRule, withRule, afterRemoval), folder, "LemmaRule/ruleRewriting")(GraphVizConfig.default)
alphaEqual(afterRemoval, withoutRule) should be(true)
}
}
|
scalan/scalan
|
core/src/test/scala/scalan/RewriteRuleSuite.scala
|
Scala
|
apache-2.0
| 2,271
|
package com.twitter.finagle.memcached.protocol.text
import com.twitter.finagle.memcached.protocol.{
Error => MemcacheError, ClientError, NonexistentCommand, ServerError}
import com.twitter.io.Charsets
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ShowTest extends FunSuite {
val responseToEncoding = new ResponseToEncoding
test("encode errors - ERROR") {
val error = MemcacheError(new NonexistentCommand("No such command"))
val res = responseToEncoding.encode(null, null, error)
assert(res.getClass === classOf[Tokens])
val tokens = res.asInstanceOf[Tokens]
assert(tokens.tokens.size === 1)
assert(tokens.tokens.head.toString(Charsets.Utf8) === "ERROR")
}
test("encode errors - CLIENT_ERROR") {
val error = MemcacheError(new ClientError("Invalid Input"))
val res = responseToEncoding.encode(null, null, error)
assert(res.getClass === classOf[Tokens])
val tokens = res.asInstanceOf[Tokens]
assert(tokens.tokens.size === 2)
assert(tokens.tokens.head.toString(Charsets.Utf8) === "CLIENT_ERROR")
}
test("encode errors - SERVER_ERROR") {
val error = MemcacheError(new ServerError("Out of Memory"))
val res = responseToEncoding.encode(null, null, error)
assert(res.getClass === classOf[Tokens])
val tokens = res.asInstanceOf[Tokens]
assert(tokens.tokens.size === 2)
assert(tokens.tokens.head.toString(Charsets.Utf8) === "SERVER_ERROR")
}
}
|
kristofa/finagle
|
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/protocol/text/ShowTest.scala
|
Scala
|
apache-2.0
| 1,519
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.net.SocketTimeoutException
import java.text.SimpleDateFormat
import java.util
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.regex.{Pattern, PatternSyntaxException}
import java.util.{Date, Properties}
import joptsimple.OptionParser
import kafka.api._
import kafka.consumer.Whitelist
import kafka.utils._
import org.apache.kafka.clients._
import org.apache.kafka.clients.admin.{ListTopicsOptions, TopicDescription}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.{NetworkReceive, Selectable, Selector}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.AbstractRequest.Builder
import org.apache.kafka.common.requests.{AbstractRequest, FetchResponse, ListOffsetRequest, FetchRequest => JFetchRequest}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.utils.{LogContext, Time}
import org.apache.kafka.common.{Node, TopicPartition}
import scala.collection.JavaConverters._
/**
* For verifying the consistency among replicas.
*
* 1. start a fetcher on every broker.
* 2. each fetcher does the following
* 2.1 issues fetch request
* 2.2 puts the fetched result in a shared buffer
* 2.3 waits for all other fetchers to finish step 2.2
* 2.4 one of the fetchers verifies the consistency of fetched results among replicas
*
* The consistency verification is up to the high watermark. The tool reports the
* max lag between the verified offset and the high watermark among all partitions.
*
* If a broker goes down, the verification of the partitions on that broker is delayed
* until the broker is up again.
*
* Caveats:
* 1. The tools needs all brokers to be up at startup time.
* 2. The tool doesn't handle out of range offsets.
*/
object ReplicaVerificationTool extends Logging {
val clientId = "replicaVerificationTool"
val dateFormatString = "yyyy-MM-dd HH:mm:ss,SSS"
val dateFormat = new SimpleDateFormat(dateFormatString)
def getCurrentTimeString() = {
ReplicaVerificationTool.dateFormat.format(new Date(Time.SYSTEM.milliseconds))
}
def main(args: Array[String]): Unit = {
val parser = new OptionParser(false)
val brokerListOpt = parser.accepts("broker-list", "REQUIRED: The list of hostname and port of the server to connect to.")
.withRequiredArg
.describedAs("hostname:port,...,hostname:port")
.ofType(classOf[String])
val fetchSizeOpt = parser.accepts("fetch-size", "The fetch size of each request.")
.withRequiredArg
.describedAs("bytes")
.ofType(classOf[java.lang.Integer])
.defaultsTo(ConsumerConfig.DEFAULT_MAX_PARTITION_FETCH_BYTES)
val maxWaitMsOpt = parser.accepts("max-wait-ms", "The max amount of time each fetch request waits.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1000)
val topicWhiteListOpt = parser.accepts("topic-white-list", "White list of topics to verify replica consistency. Defaults to all topics.")
.withRequiredArg
.describedAs("Java regex (String)")
.ofType(classOf[String])
.defaultsTo(".*")
val initialOffsetTimeOpt = parser.accepts("time", "Timestamp for getting the initial offsets.")
.withRequiredArg
.describedAs("timestamp/-1(latest)/-2(earliest)")
.ofType(classOf[java.lang.Long])
.defaultsTo(-1L)
val reportIntervalOpt = parser.accepts("report-interval-ms", "The reporting interval.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Long])
.defaultsTo(30 * 1000L)
if (args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "Validate that all replicas for a set of topics have the same data.")
val options = parser.parse(args: _*)
CommandLineUtils.checkRequiredArgs(parser, options, brokerListOpt)
val regex = options.valueOf(topicWhiteListOpt)
val topicWhiteListFiler = new Whitelist(regex)
try Pattern.compile(regex)
catch {
case _: PatternSyntaxException =>
throw new RuntimeException(regex + " is an invalid regex.")
}
val fetchSize = options.valueOf(fetchSizeOpt).intValue
val maxWaitMs = options.valueOf(maxWaitMsOpt).intValue
val initialOffsetTime = options.valueOf(initialOffsetTimeOpt).longValue
val reportInterval = options.valueOf(reportIntervalOpt).longValue
// getting topic metadata
info("Getting topic metadata...")
val brokerList = options.valueOf(brokerListOpt)
ToolsUtils.validatePortOrDie(parser, brokerList)
val (topicsMetadata, brokerInfo) = {
val adminClient = createAdminClient(brokerList)
try ((listTopicsMetadata(adminClient), brokerDetails(adminClient)))
finally CoreUtils.swallow(adminClient.close(), this)
}
val filteredTopicMetadata = topicsMetadata.filter { topicMetaData =>
topicWhiteListFiler.isTopicAllowed(topicMetaData.name, excludeInternalTopics = false)
}
if (filteredTopicMetadata.isEmpty) {
error(s"No topics found. $topicWhiteListOpt if specified, is either filtering out all topics or there is no topic.")
Exit.exit(1)
}
val topicPartitionReplicas = filteredTopicMetadata.flatMap { topicMetadata =>
topicMetadata.partitions.asScala.flatMap { partitionMetadata =>
partitionMetadata.replicas.asScala.map { node =>
TopicPartitionReplica(topic = topicMetadata.name, partitionId = partitionMetadata.partition, replicaId = node.id)
}
}
}
debug(s"Selected topic partitions: $topicPartitionReplicas")
val brokerToTopicPartitions = topicPartitionReplicas.groupBy(_.replicaId).map { case (brokerId, partitions) =>
brokerId -> partitions.map { partition => new TopicPartition(partition.topic, partition.partitionId) }
}
debug(s"Topic partitions per broker: $brokerToTopicPartitions")
val expectedReplicasPerTopicPartition = topicPartitionReplicas.groupBy { replica =>
new TopicPartition(replica.topic, replica.partitionId)
}.map { case (topicAndPartition, replicaSet) => topicAndPartition -> replicaSet.size }
debug(s"Expected replicas per topic partition: $expectedReplicasPerTopicPartition")
val topicPartitions = filteredTopicMetadata.flatMap { topicMetaData =>
topicMetaData.partitions.asScala.map { partitionMetadata =>
new TopicPartition(topicMetaData.name, partitionMetadata.partition)
}
}
val consumerProps = consumerConfig(brokerList)
val replicaBuffer = new ReplicaBuffer(expectedReplicasPerTopicPartition,
initialOffsets(topicPartitions, consumerProps, initialOffsetTime),
brokerToTopicPartitions.size,
reportInterval)
// create all replica fetcher threads
val verificationBrokerId = brokerToTopicPartitions.head._1
val counter = new AtomicInteger(0)
val fetcherThreads: Iterable[ReplicaFetcher] = brokerToTopicPartitions.map { case (brokerId, topicPartitions) =>
new ReplicaFetcher(name = s"ReplicaFetcher-$brokerId",
sourceBroker = brokerInfo(brokerId),
topicPartitions = topicPartitions,
replicaBuffer = replicaBuffer,
socketTimeout = 30000,
socketBufferSize = 256000,
fetchSize = fetchSize,
maxWait = maxWaitMs,
minBytes = 1,
doVerification = brokerId == verificationBrokerId,
consumerProps,
fetcherId = counter.incrementAndGet())
}
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
info("Stopping all fetchers")
fetcherThreads.foreach(_.shutdown())
}
})
fetcherThreads.foreach(_.start())
println(ReplicaVerificationTool.getCurrentTimeString() + ": verification process is started.")
}
private def listTopicsMetadata(adminClient: admin.AdminClient): Seq[TopicDescription] = {
val topics = adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names.get
adminClient.describeTopics(topics).all.get.values.asScala.toBuffer
}
private def brokerDetails(adminClient: admin.AdminClient): Map[Int, Node] = {
adminClient.describeCluster.nodes.get.asScala.map(n => (n.id, n)).toMap
}
private def createAdminClient(brokerUrl: String): admin.AdminClient = {
val props = new Properties()
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokerUrl)
admin.AdminClient.create(props)
}
private def initialOffsets(topicPartitions: Seq[TopicPartition], consumerConfig: Properties,
initialOffsetTime: Long): Map[TopicPartition, Long] = {
val consumer = createConsumer(consumerConfig)
try {
if (ListOffsetRequest.LATEST_TIMESTAMP == initialOffsetTime)
consumer.endOffsets(topicPartitions.asJava).asScala.mapValues(_.longValue).toMap
else if (ListOffsetRequest.EARLIEST_TIMESTAMP == initialOffsetTime)
consumer.beginningOffsets(topicPartitions.asJava).asScala.mapValues(_.longValue).toMap
else {
val timestampsToSearch = topicPartitions.map(tp => tp -> (initialOffsetTime: java.lang.Long)).toMap
consumer.offsetsForTimes(timestampsToSearch.asJava).asScala.mapValues(v => v.offset).toMap
}
} finally consumer.close()
}
private def consumerConfig(brokerUrl: String): Properties = {
val properties = new Properties()
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl)
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "ReplicaVerification")
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
properties
}
private def createConsumer(consumerConfig: Properties): KafkaConsumer[String, String] =
new KafkaConsumer(consumerConfig)
}
private case class TopicPartitionReplica(topic: String, partitionId: Int, replicaId: Int)
private case class MessageInfo(replicaId: Int, offset: Long, nextOffset: Long, checksum: Long)
private class ReplicaBuffer(expectedReplicasPerTopicPartition: Map[TopicPartition, Int],
initialOffsets: Map[TopicPartition, Long],
expectedNumFetchers: Int,
reportInterval: Long) extends Logging {
private val fetchOffsetMap = new Pool[TopicPartition, Long]
private val recordsCache = new Pool[TopicPartition, Pool[Int, FetchResponse.PartitionData[MemoryRecords]]]
private val fetcherBarrier = new AtomicReference(new CountDownLatch(expectedNumFetchers))
private val verificationBarrier = new AtomicReference(new CountDownLatch(1))
@volatile private var lastReportTime = Time.SYSTEM.milliseconds
private var maxLag: Long = -1L
private var offsetWithMaxLag: Long = -1L
private var maxLagTopicAndPartition: TopicPartition = null
initialize()
def createNewFetcherBarrier() {
fetcherBarrier.set(new CountDownLatch(expectedNumFetchers))
}
def getFetcherBarrier() = fetcherBarrier.get
def createNewVerificationBarrier() {
verificationBarrier.set(new CountDownLatch(1))
}
def getVerificationBarrier() = verificationBarrier.get
private def initialize() {
for (topicPartition <- expectedReplicasPerTopicPartition.keySet)
recordsCache.put(topicPartition, new Pool[Int, FetchResponse.PartitionData[MemoryRecords]])
setInitialOffsets()
}
private def setInitialOffsets() {
for ((tp, offset) <- initialOffsets)
fetchOffsetMap.put(tp, offset)
}
def addFetchedData(topicAndPartition: TopicPartition, replicaId: Int, partitionData: FetchResponse.PartitionData[MemoryRecords]) {
recordsCache.get(topicAndPartition).put(replicaId, partitionData)
}
def getOffset(topicAndPartition: TopicPartition) = {
fetchOffsetMap.get(topicAndPartition)
}
def verifyCheckSum(println: String => Unit) {
debug("Begin verification")
maxLag = -1L
for ((topicPartition, fetchResponsePerReplica) <- recordsCache) {
debug("Verifying " + topicPartition)
assert(fetchResponsePerReplica.size == expectedReplicasPerTopicPartition(topicPartition),
"fetched " + fetchResponsePerReplica.size + " replicas for " + topicPartition + ", but expected "
+ expectedReplicasPerTopicPartition(topicPartition) + " replicas")
val recordBatchIteratorMap = fetchResponsePerReplica.map { case (replicaId, fetchResponse) =>
replicaId -> fetchResponse.records.batches.iterator
}
val maxHw = fetchResponsePerReplica.values.map(_.highWatermark).max
// Iterate one message at a time from every replica, until high watermark is reached.
var isMessageInAllReplicas = true
while (isMessageInAllReplicas) {
var messageInfoFromFirstReplicaOpt: Option[MessageInfo] = None
for ((replicaId, recordBatchIterator) <- recordBatchIteratorMap) {
try {
if (recordBatchIterator.hasNext) {
val batch = recordBatchIterator.next()
// only verify up to the high watermark
if (batch.lastOffset >= fetchResponsePerReplica.get(replicaId).highWatermark)
isMessageInAllReplicas = false
else {
messageInfoFromFirstReplicaOpt match {
case None =>
messageInfoFromFirstReplicaOpt = Some(
MessageInfo(replicaId, batch.lastOffset, batch.nextOffset, batch.checksum))
case Some(messageInfoFromFirstReplica) =>
if (messageInfoFromFirstReplica.offset != batch.lastOffset) {
println(ReplicaVerificationTool.getCurrentTimeString + ": partition " + topicPartition
+ ": replica " + messageInfoFromFirstReplica.replicaId + "'s offset "
+ messageInfoFromFirstReplica.offset + " doesn't match replica "
+ replicaId + "'s offset " + batch.lastOffset)
Exit.exit(1)
}
if (messageInfoFromFirstReplica.checksum != batch.checksum)
println(ReplicaVerificationTool.getCurrentTimeString + ": partition "
+ topicPartition + " has unmatched checksum at offset " + batch.lastOffset + "; replica "
+ messageInfoFromFirstReplica.replicaId + "'s checksum " + messageInfoFromFirstReplica.checksum
+ "; replica " + replicaId + "'s checksum " + batch.checksum)
}
}
} else
isMessageInAllReplicas = false
} catch {
case t: Throwable =>
throw new RuntimeException("Error in processing replica %d in partition %s at offset %d."
.format(replicaId, topicPartition, fetchOffsetMap.get(topicPartition)), t)
}
}
if (isMessageInAllReplicas) {
val nextOffset = messageInfoFromFirstReplicaOpt.get.nextOffset
fetchOffsetMap.put(topicPartition, nextOffset)
debug(expectedReplicasPerTopicPartition(topicPartition) + " replicas match at offset " +
nextOffset + " for " + topicPartition)
}
}
if (maxHw - fetchOffsetMap.get(topicPartition) > maxLag) {
offsetWithMaxLag = fetchOffsetMap.get(topicPartition)
maxLag = maxHw - offsetWithMaxLag
maxLagTopicAndPartition = topicPartition
}
fetchResponsePerReplica.clear()
}
val currentTimeMs = Time.SYSTEM.milliseconds
if (currentTimeMs - lastReportTime > reportInterval) {
println(ReplicaVerificationTool.dateFormat.format(new Date(currentTimeMs)) + ": max lag is "
+ maxLag + " for partition " + maxLagTopicAndPartition + " at offset " + offsetWithMaxLag
+ " among " + recordsCache.size + " partitions")
lastReportTime = currentTimeMs
}
}
}
private class ReplicaFetcher(name: String, sourceBroker: Node, topicPartitions: Iterable[TopicPartition],
replicaBuffer: ReplicaBuffer, socketTimeout: Int, socketBufferSize: Int,
fetchSize: Int, maxWait: Int, minBytes: Int, doVerification: Boolean, consumerConfig: Properties,
fetcherId: Int)
extends ShutdownableThread(name) {
private val fetchEndpoint = new ReplicaFetcherBlockingSend(sourceBroker, new ConsumerConfig(consumerConfig), new Metrics(), Time.SYSTEM, fetcherId,
s"broker-${Request.DebuggingConsumerId}-fetcher-$fetcherId")
override def doWork() {
val fetcherBarrier = replicaBuffer.getFetcherBarrier()
val verificationBarrier = replicaBuffer.getVerificationBarrier()
val requestMap = new util.LinkedHashMap[TopicPartition, JFetchRequest.PartitionData]
for (topicPartition <- topicPartitions)
requestMap.put(topicPartition, new JFetchRequest.PartitionData(replicaBuffer.getOffset(topicPartition), 0L, fetchSize))
val fetchRequestBuilder = JFetchRequest.Builder.
forReplica(ApiKeys.FETCH.latestVersion, Request.DebuggingConsumerId, maxWait, minBytes, requestMap)
debug("Issuing fetch request ")
var fetchResponse: FetchResponse[MemoryRecords] = null
try {
val clientResponse = fetchEndpoint.sendRequest(fetchRequestBuilder)
fetchResponse = clientResponse.responseBody.asInstanceOf[FetchResponse[MemoryRecords]]
} catch {
case t: Throwable =>
if (!isRunning)
throw t
}
if (fetchResponse != null) {
fetchResponse.responseData.asScala.foreach { case (tp, partitionData) =>
replicaBuffer.addFetchedData(tp, sourceBroker.id, partitionData)
}
} else {
val emptyResponse = new FetchResponse.PartitionData(Errors.NONE, FetchResponse.INVALID_HIGHWATERMARK,
FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
for (topicAndPartition <- topicPartitions)
replicaBuffer.addFetchedData(topicAndPartition, sourceBroker.id, emptyResponse)
}
fetcherBarrier.countDown()
debug("Done fetching")
// wait for all fetchers to finish
fetcherBarrier.await()
debug("Ready for verification")
// one of the fetchers will do the verification
if (doVerification) {
debug("Do verification")
replicaBuffer.verifyCheckSum(println)
replicaBuffer.createNewFetcherBarrier()
replicaBuffer.createNewVerificationBarrier()
debug("Created new barrier")
verificationBarrier.countDown()
}
verificationBarrier.await()
debug("Done verification")
}
}
private class ReplicaFetcherBlockingSend(sourceNode: Node,
consumerConfig: ConsumerConfig,
metrics: Metrics,
time: Time,
fetcherId: Int,
clientId: String) {
private val socketTimeout: Int = consumerConfig.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG)
private val networkClient = {
val channelBuilder = org.apache.kafka.clients.ClientUtils.createChannelBuilder(consumerConfig)
val selector = new Selector(
NetworkReceive.UNLIMITED,
consumerConfig.getLong(ConsumerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
metrics,
time,
"replica-fetcher",
Map("broker-id" -> sourceNode.id.toString, "fetcher-id" -> fetcherId.toString).asJava,
false,
channelBuilder,
new LogContext
)
new NetworkClient(
selector,
new ManualMetadataUpdater(),
clientId,
1,
0,
0,
Selectable.USE_DEFAULT_BUFFER_SIZE,
consumerConfig.getInt(ConsumerConfig.RECEIVE_BUFFER_CONFIG),
consumerConfig.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG),
time,
false,
new ApiVersions,
new LogContext
)
}
def sendRequest(requestBuilder: Builder[_ <: AbstractRequest]): ClientResponse = {
try {
if (!NetworkClientUtils.awaitReady(networkClient, sourceNode, time, socketTimeout))
throw new SocketTimeoutException(s"Failed to connect within $socketTimeout ms")
else {
val clientRequest = networkClient.newClientRequest(sourceNode.id.toString, requestBuilder,
time.milliseconds(), true)
NetworkClientUtils.sendAndReceive(networkClient, clientRequest, time)
}
}
catch {
case e: Throwable =>
networkClient.close(sourceNode.id.toString)
throw e
}
}
def close(): Unit = {
networkClient.close()
}
}
|
Ishiihara/kafka
|
core/src/main/scala/kafka/tools/ReplicaVerificationTool.scala
|
Scala
|
apache-2.0
| 22,077
|
/*
Stratagem is a model checker for transition systems described using rewriting
rules and strategies.
Copyright (C) 2013 - SMV@Geneva University.
Program written by Edmundo Lopez Bobeda <edmundo [at] lopezbobeda.net>.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package ch.unige.cui.smv.stratagem.petrinets
/**
* Represents an arc of the petri net.
* @param id the unique identificator for this arc.
* @param place the target or source place of this arc.
* @param annotation the number that labels the arc.
*
* @author mundacho
*
*/
case class Arc(val id: String, val place: Place, val annotation: Int) {
require(annotation > 0, "Impossible to create an arc with a negative label")
}
|
didierbuchs/oldstratagem
|
src/main/scala/ch/unige/cui/smv/stratagem/petrinets/Arc.scala
|
Scala
|
gpl-2.0
| 1,330
|
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.bijection
import java.lang.{
Short => JShort,
Integer => JInt,
Long => JLong,
Float => JFloat,
Double => JDouble,
Byte => JByte
}
import java.nio.ByteBuffer
import scala.util.Success
import Inversion.{attempt, attemptWhen}
trait NumericInjections extends GeneratedTupleInjections {
implicit val byte2Short: Injection[Byte, Short] = new AbstractInjection[Byte, Short] {
def apply(i: Byte) = i.toShort
def invert(l: Short) =
attemptWhen(l)(_.isValidByte)(_.toByte)
}
implicit val short2Int: Injection[Short, Int] = new AbstractInjection[Short, Int] {
def apply(i: Short) = i.toInt
def invert(l: Int) =
attemptWhen(l)(_.isValidShort)(_.toShort)
}
implicit val int2Long: Injection[Int, Long] = new AbstractInjection[Int, Long] {
def apply(i: Int) = i.toLong
def invert(l: Long) =
attemptWhen(l)(_.isValidInt)(_.toInt)
}
implicit val long2BigInt: Injection[Long, BigInt] = new AbstractInjection[Long, BigInt] {
def apply(l: Long) = BigInt(l)
def invert(bi: BigInt) =
attemptWhen(bi)(bi => bi <= Long.MaxValue && Long.MinValue <= bi)(_.toLong)
}
// This is a loose injection
implicit val float2Double: Injection[Float, Double] = new AbstractInjection[Float, Double] {
def apply(i: Float) = i.toDouble
def invert(l: Double) =
attemptWhen(l)(l => l <= Float.MaxValue && l >= Float.MinValue)(_.toFloat)
}
// This is a loose injection
implicit val int2Double: Injection[Int, Double] = new AbstractInjection[Int, Double] {
def apply(i: Int) = i.toDouble
def invert(l: Double) = Success(l.toInt)
}
implicit val byte2String: Injection[Byte, String] =
new AbstractInjection[Byte, String] {
def apply(b: Byte) = b.toString
override def invert(s: String) = attempt(s)(_.toByte)
}
implicit val jbyte2String: Injection[JByte, String] =
new AbstractInjection[JByte, String] {
def apply(b: JByte) = b.toString
override def invert(s: String) = attempt(s)(JByte.valueOf(_))
}
implicit val short2String: Injection[Short, String] =
new AbstractInjection[Short, String] {
def apply(s: Short) = s.toString
override def invert(s: String) = attempt(s)(_.toShort)
}
implicit val jshort2String: Injection[JShort, String] =
new AbstractInjection[JShort, String] {
def apply(b: JShort) = b.toString
override def invert(s: String) = attempt(s)(JShort.valueOf(_))
}
implicit val int2String: Injection[Int, String] =
new AbstractInjection[Int, String] {
def apply(i: Int) = i.toString
override def invert(s: String) = attempt(s)(_.toInt)
}
implicit val jint2String: Injection[JInt, String] =
new AbstractInjection[JInt, String] {
def apply(i: JInt) = i.toString
override def invert(s: String) = attempt(s)(JInt.valueOf(_))
}
implicit val long2String: Injection[Long, String] =
new AbstractInjection[Long, String] {
def apply(l: Long) = l.toString
override def invert(s: String) = attempt(s)(_.toLong)
}
implicit val jlong2String: Injection[JLong, String] =
new AbstractInjection[JLong, String] {
def apply(l: JLong) = l.toString
override def invert(s: String) = attempt(s)(JLong.valueOf(_))
}
implicit val float2String: Injection[Float, String] =
new AbstractInjection[Float, String] {
def apply(f: Float) = f.toString
override def invert(s: String) = attempt(s)(_.toFloat)
}
implicit val jfloat2String: Injection[JFloat, String] =
new AbstractInjection[JFloat, String] {
def apply(f: JFloat) = f.toString
override def invert(s: String) = attempt(s)(JFloat.valueOf(_))
}
implicit val double2String: Injection[Double, String] =
new AbstractInjection[Double, String] {
def apply(d: Double) = d.toString
override def invert(s: String) = attempt(s)(_.toDouble)
}
implicit val jdouble2String: Injection[JDouble, String] =
new AbstractInjection[JDouble, String] {
def apply(d: JDouble) = d.toString
override def invert(s: String) = attempt(s)(JDouble.valueOf(_))
}
implicit val short2BigEndian: Injection[Short, Array[Byte]] =
new AbstractInjection[Short, Array[Byte]] {
val size = 2
def apply(value: Short) = {
val buf = ByteBuffer.allocate(size)
buf.putShort(value)
buf.array
}
override def invert(b: Array[Byte]) =
attempt(b)(ByteBuffer.wrap(_).getShort)
}
implicit val int2BigEndian: Injection[Int, Array[Byte]] =
new AbstractInjection[Int, Array[Byte]] { value =>
val size = 4
def apply(value: Int) = {
val buf = ByteBuffer.allocate(size)
buf.putInt(value)
buf.array
}
override def invert(b: Array[Byte]) =
attempt(b)(ByteBuffer.wrap(_).getInt)
}
implicit val long2BigEndian: Injection[Long, Array[Byte]] =
new AbstractInjection[Long, Array[Byte]] {
val size = 8
def apply(value: Long) = {
val buf = ByteBuffer.allocate(size)
buf.putLong(value)
buf.array
}
override def invert(b: Array[Byte]) =
attempt(b)(ByteBuffer.wrap(_).getLong)
}
// Lazy to deal with the fact that int2BigEndian od Bijection may not be init yet
// there seemed to be some null pointer exceptions in the tests that this fixed
implicit lazy val float2BigEndian: Injection[Float, Array[Byte]] =
Injection.fromBijection(Bijection.float2IntIEEE754) andThen int2BigEndian
implicit lazy val double2BigEndian: Injection[Double, Array[Byte]] =
Injection.fromBijection(Bijection.double2LongIEEE754) andThen long2BigEndian
}
|
twitter/bijection
|
bijection-core/src/main/scala/com/twitter/bijection/NumericInjections.scala
|
Scala
|
apache-2.0
| 6,238
|
package com.phasmid.laScala.fp
import scala.language.{higherKinds, postfixOps}
import scala.util._
/**
* This is an 2.10-specific appendix to FP
*
* @author scalaprof
*/
object FP_Cross {
/**
* This method is only required when compiling with Scala 2.10.
*
* @param ox the given Option
* @param x the value we want to compare with ox
* @tparam X the underlying type
* @return true if ox is Some(x)
*/
def contains[X](ox: Option[X], x: X): Boolean = ox.isDefined && ox.get == x
/**
* Method to map a pair of Try values (of same underlying type) into a Try value of another type (which could be the same of course)
*
* NOTE: this implementation (for 2.10) simply invokes the non-optimized version of map2
*
* @param ty1 a Try[T] value
* @param ty2 a Try[T] value passed as call-by-name
* @param f function which takes two T parameters and yields a U result
* @param g (implicit) guard function which, given the first parameter's value, must be true for the second parameter (ty2) to be evaluated
* @param default (implicit) a default value
* @tparam T the input type
* @tparam U the result type
* @return a Try[U]
*/
def map2lazy[T, U](ty1: Try[T], ty2: => Try[T])(f: (T, T) => U)(implicit g: T => Boolean = { _: T => true }, default: Try[U] = Failure[U](new Exception("no default result specified"))): Try[U] =
FP.map2(ty1, ty2)(f)
/**
* Method to map a pair of Try values (of same underlying type) into a Try value of another type (which could be the same of course)
*
* @param ty1 a Try[T] value
* @param ty2 a Try[T] value passed as call-by-name
* @param ty3 a Try[T] value passed as call-by-name
* @param f function which takes two T parameters and yields a U result
* @param g (implicit) guard function which, given the first parameter's value, must be true for the second parameter (ty2) to be evaluated;
* and which, given the second parameter's value, must be true for the third parameter (ty3) to be evaluated
* @param default (implicit) a default value
* @tparam T the input type
* @tparam U the result type
* @return a Try[U]
*/
def map3lazy[T, U](ty1: Try[T], ty2: => Try[T], ty3: => Try[T])(f: (T, T, T) => U)(implicit g: T => Boolean = { _: T => true }, default: Try[U] = Failure[U](new Exception("no default result specified"))): Try[U] =
FP.map3(ty1, ty2, ty3)(f)
}
|
rchillyard/LaScala
|
src/main/scala-2.10/com/phasmid/laScala/fp/FP_Cross.scala
|
Scala
|
lgpl-2.1
| 2,505
|
package com.arcusys.learn.liferay.update
import com.arcusys.learn.liferay.LiferayClasses.LUpgradeProcess
import com.arcusys.learn.liferay.update.version240.storyTree.StoryTreeTableComponent
import com.arcusys.valamis.persistence.common.SlickDBInfo
import com.arcusys.valamis.web.configuration.ioc.Configuration
import com.escalatesoft.subcut.inject.Injectable
class DBUpdater2320 extends LUpgradeProcess with Injectable {
implicit lazy val bindingModule = Configuration
override def getThreshold = 2320
override def doUpgrade(): Unit = {
val dbInfo = inject[SlickDBInfo]
new StoryTreeTableComponent {
override protected val driver = dbInfo.slickProfile
import driver.simple._
dbInfo.databaseDef.withTransaction { implicit session =>
trees.ddl.create
}
}
}
}
|
arcusys/Valamis
|
learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/DBUpdater2320.scala
|
Scala
|
gpl-3.0
| 818
|
package circlepuzzles.geometry
/**
* Objects that specify computations for a particular two-dimensional plane-like geometric setting. Subclasses of this
* trait are generally singleton objects.
*
* The types `Point`, `Circle`, `Disk`, and `Arc` must correctly implement `equals` and `hashCode` methods according to
* the [[Object]] contract. These types and `ArcsOnCircle` must all be immutable.
*/
trait Geometry {
// Type members
/**
* Type of individual points.
*/
type Point <: BasePoint
/**
* Type of circles. A circle is defined as a set of points that are equidistant from a center point.
*/
type Circle <: BaseCircle
/**
* Type of disks in this geometry. A disk is defined as a closed set of points whose boundary is a circle. Depending
* on context, this can either be the interior or exterior of a circle.
*/
type Disk <: BaseDisk
/**
* Type of arcs. An arc is defined as a set of points on a circle between a given start and end point, inclusive.
*/
type Arc <: BaseArc
/**
* Type of disjoint arc segments on a single circle.
*/
type ArcsOnCircle <: BaseArcsOnCircle
// Base traits for type members
/**
* Base trait for points. Immutable.
*/
trait BasePoint {
this: Point => // Every BasePoint must also be a Point
/**
* Rotate about the given point in the counterclockwise direction.
* @param rotationCenter Center of rotation.
* @param angle Angle of rotation.
* @return Image of this under the specified rotation.
*/
def rotate(rotationCenter: Point, angle: Angle): Point
}
/**
* Base trait for circles. Immutable.
*/
trait BaseCircle extends HasCenter {
this: Circle => // Every BaseCircle must also be a Circle
/**
* Produces an empty set of arcs around this circle.
* @return Empty set of arcs around this circle.
*/
def emptyArcs: ArcsOnCircle
/**
* Produces a complete (i.e. full circle) set of arcs around this circle.
* @return Full set of arcs around this circle.
*/
def fullArcs: ArcsOnCircle
/**
* Rotate about the given point in the counterclockwise direction.
* @param rotationCenter Center of rotation.
* @param angle Angle of rotation.
* @return Image of this under the specified rotation.
*/
def rotate(rotationCenter: Point, angle: Angle): Circle
}
/**
* Base trait for disks. Immutable.
*/
trait BaseDisk extends HasCircle {
this: Disk => // Every BaseDisk must also be a Disk
/**
* Return an integer indicating the location of the given point relative to this disk.
* @param pt Point to test for membership.
* @return A negative, zero, or positive integer if the point is respectively on the interior, boundary, or
* exterior of this disk.
*/
def containsCompare(pt: Point): Int
/**
* Test if the given point is in the interior or on the boundary of this disk.
* @param pt Point to test for membership.
* @return True if and only if this disk contains the given point.
*/
def contains(pt: Point): Boolean = containsCompare(pt) <= 0
/**
* Test if the given point is in the interior of this disk.
* @param pt Point to test for membership.
* @return True if and only if this disk strictly contains the given point.
*/
def strictlyContains(pt: Point): Boolean = containsCompare(pt) < 0
/**
* Rotate about the given point in the counterclockwise direction.
* @param rotationCenter Center of rotation.
* @param angle Angle of rotation.
* @return Image of this under the specified rotation.
*/
def rotate(rotationCenter: Point, angle: Angle): Disk
}
/**
* Base trait for arcs. Immutable.
*/
trait BaseArc extends HasCircle {
this: Arc => // Every BaseArc must also be an Arc
/**
* Attempts to join two arcs that share an endpoint. If `this` and `that` belong to the same circle, this returns
* `Some(joined)`, where `joined` is the arc formed by the connecting the arcs at their shared point. Otherwise,
* returns `None`.
* @param that Arc to combine that shares exactly one endpoint with this.
* @return Joining of `this` and `that`, or `None` if they can't be combined.
*/
def join(that: Arc): Option[Arc]
/**
* The point at which this arc begins, in the counterclockwise direction.
* @return Start point of this arc.
*/
def startPoint: Point
/**
* The point at which this arc ends, in the counterclockwise direction.
* @return End point of this arc.
*/
def endPoint: Point
/**
* The point on this arc which is equidistant from the start and end points.
* @return Midpoint of this arc.
*/
def midPoint: Point
/**
* Rotate about the given point in the counterclockwise direction.
* @param rotationCenter Center of rotation.
* @param angle Angle of rotation.
* @return Image of this under the specified rotation.
*/
def rotate(rotationCenter: Point, angle: Angle): Arc
}
/**
* Base trait for arcs on a circle. Immutable.
*/
trait BaseArcsOnCircle extends HasCircle {
this: ArcsOnCircle => // Every BaseArcsOnCircle must also be an ArcsOnCircle
/**
* Compute the union of this with the given arcs by joining overlapping segments.
* @param that Arcs around the same circle (i.e. requires `this.circle == that.circle`).
* @return `ArcsOnCircle` containing all arcs that belong to either `this` or `that`.
*/
def sameCircleUnion(that: ArcsOnCircle): ArcsOnCircle
/**
* Compute the difference of this with the given arcs by subtracting overlapping segments.
* @param that Arcs around the same circle (i.e. requires `this.circle == that.circle`).
* @return `ArcsOnCircle` containing all arcs that belong to `this` but not `that`.
*/
def sameCircleDifference(that: ArcsOnCircle): ArcsOnCircle
/**
* Compute the arc segments in this collection that intersect with the given disk.
* @param disk Disk with which to intersect.
* @return The intersection of `this` with the given disk.
*/
def intersection(disk: Disk): ArcsOnCircle
/**
* Test if this contains any positive-length arc segments.
* @return True if and only if this arc collection is nonempty.
*/
def nonEmpty: Boolean
/**
* Rotate about the given point in the counterclockwise direction.
* @param rotationCenter Center of rotation.
* @param angle Angle of rotation.
* @return Image of this under the specified rotation.
*/
def rotate(rotationCenter: Point, angle: Angle): ArcsOnCircle
}
// Shared traits for type members
/**
* Objects that have a well-defined center.
*/
trait HasCenter {
/**
* The natural center of this object.
* @return Center of this object.
*/
def center: Point
}
/**
* Objects that are associated with a single circle.
*/
trait HasCircle extends HasCenter {
/**
* The circle associated with this object.
* @return Circle of this object.
*/
def circle: Circle
/**
* The natural center of this object, which is the center of its circle.
* @return Center of this object.
*/
override def center: Point = circle.center
}
}
|
wkretschmer/CirclePuzzles
|
src/main/scala/circlepuzzles/geometry/Geometry.scala
|
Scala
|
mit
| 7,519
|
package com.twitter.finatra.json.tests.internal.caseclass.validation
import com.twitter.finatra.validation.NotEmpty
case class CaseClassWithTwoConstructors(id: Long, @NotEmpty name: String) {
def this(id: Long) = this(id, "New User")
}
|
syamantm/finatra
|
jackson/src/test/scala/com/twitter/finatra/json/tests/internal/caseclass/validation/CaseClassWithTwoConstructors.scala
|
Scala
|
apache-2.0
| 240
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.model
import org.apache.spark.annotation.DeveloperApi
/**
* Predicted value for a node
* @param predict predicted value
* @param prob probability of the label (classification only)
*/
@DeveloperApi
class Predict(
val predict: Double,
val prob: Double = 0.0) extends Serializable {
override def toString = {
"predict = %f, prob = %f".format(predict, prob)
}
override def equals(other: Any): Boolean = {
other match {
case p: Predict => predict == p.predict && prob == p.prob
case _ => false
}
}
}
|
trueyao/spark-lever
|
mllib/src/main/scala/org/apache/spark/mllib/tree/model/Predict.scala
|
Scala
|
apache-2.0
| 1,382
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2019 Helge Holzmann (Internet Archive) <helge@archive.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package org.archive.archivespark.specific.warc.implicits
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, PrintWriter}
import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.RDD
import org.archive.archivespark.model.dataloads.ByteLoad
import org.archive.archivespark.sparkling.io.{GzipBytes, HdfsIO, IOUtil}
import org.archive.archivespark.sparkling.util.{Common, RddUtil, StringUtil}
import org.archive.archivespark.sparkling.warc.{
WarcHeaders,
WarcRecord,
WarcRecordMeta
}
import org.archive.archivespark.specific.warc.functions.{
HttpPayload,
WarcPayloadFields
}
import org.archive.archivespark.specific.warc.{WarcFileMeta, WarcLikeRecord}
import scala.reflect.ClassTag
object WarcRDD {
val WarcIpField = "WARC-IP-Address"
val WarcRecordIdField = "WARC-Record-ID"
}
class WarcRDD[WARC <: WarcLikeRecord: ClassTag](rdd: RDD[WARC]) {
import WarcRDD._
import org.archive.archivespark.sparkling.Sparkling._
def saveAsWarc(path: String,
meta: org.archive.archivespark.sparkling.warc.WarcFileMeta =
WarcFileMeta(),
generateCdx: Boolean = true): Long = {
HdfsIO.ensureOutDir(path)
val gz = path.toLowerCase.trim.endsWith(GzipExt)
val filePrefix =
StringUtil.stripSuffixes(new Path(path).getName, GzipExt, WarcExt, ArcExt)
val emptyLines = {
val bytes = new ByteArrayOutputStream()
val print = new PrintWriter(bytes)
print.println()
print.println()
print.flush()
bytes.toByteArray
}
rdd.mapPartitionsWithIndex {
case (idx, records) =>
val warcPrefix = filePrefix + "-" + StringUtil.padNum(idx, 5)
val warcFile = warcPrefix + WarcExt + (if (gz) GzipExt else "")
val warcPath = new Path(path, warcFile).toString
val warcCdxPath = new Path(
path,
warcPrefix + WarcExt + CdxExt + (if (gz) GzipExt else "")
).toString
var warcPosition = 0L
val warcOut = Common.lazyValWithCleanup({
val out = HdfsIO.out(warcPath, compress = false)
val headerBytes = WarcHeaders.warcFile(meta, warcFile)
val compressedHeader = if (gz) GzipBytes(headerBytes) else headerBytes
out.write(compressedHeader)
warcPosition += compressedHeader.length
out
})(_.close)
val cdxOut = Common.lazyValWithCleanup(
IOUtil.print(HdfsIO.out(warcCdxPath))
)(_.close)
val processed = records.map { record =>
val payloadPointer = record.dataLoad(ByteLoad).get
val enriched = payloadPointer.init(record, excludeFromOutput = false)
val warcHeadersOpt = payloadPointer
.sibling[Seq[(String, String)]](WarcPayloadFields.RecordHeader)
.get(enriched)
val recordMeta = WarcRecordMeta(
enriched.get.originalUrl,
enriched.get.time.toInstant,
warcHeadersOpt.flatMap(_.find(_._1 == WarcRecordIdField).map(_._2)),
warcHeadersOpt.flatMap(_.find(_._1 == WarcIpField).map(_._2))
)
val httpStatusOpt = payloadPointer
.sibling[String](HttpPayload.StatusLineField)
.get(enriched)
val httpHeadersOpt = payloadPointer
.sibling[Seq[(String, String)]](HttpPayload.HeaderField)
.get(enriched)
val httpHeader =
if (httpStatusOpt.isDefined && httpHeadersOpt.isDefined)
WarcHeaders.http(httpStatusOpt.get, httpHeadersOpt.get)
else Array.empty[Byte]
payloadPointer.get(enriched) match {
case Some(payload) =>
val content = httpHeader ++ payload
val recordHeader =
WarcHeaders.warcResponseRecord(recordMeta, content, payload)
val recordBytes =
if (gz) GzipBytes(recordHeader ++ content ++ emptyLines)
else recordHeader ++ content ++ emptyLines
if (generateCdx) {
val warc = WarcRecord.get(new ByteArrayInputStream(recordBytes))
if (warc.isDefined) {
val cdx = warc.get.toCdx(recordBytes.length)
if (cdx.isDefined) {
warcOut.get.write(recordBytes)
val locationInfo = Array(warcPosition.toString, warcFile)
cdxOut.get.println(cdx.get.toCdxString(locationInfo))
warcPosition += recordBytes.length
1L
} else 0L
} else 0L
} else {
warcOut.get.write(recordBytes)
warcPosition += recordBytes.length
1L
}
case None => 0L
}
}.sum
warcOut.clear(true)
cdxOut.clear(true)
Iterator(processed)
}
}.reduce(_ + _)
def toCdxStrings: RDD[String] = toCdxStrings()
def toCdxStrings(includeAdditionalFields: Boolean = true): RDD[String] =
rdd.map(_.toCdxString(includeAdditionalFields))
def saveAsCdx(path: String): Unit = RddUtil.saveAsTextFile(toCdxStrings, path)
}
|
helgeho/ArchiveSpark
|
src/main/scala/org/archive/archivespark/specific/warc/implicits/WarcRDD.scala
|
Scala
|
mit
| 6,337
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.stream.sql.join
import org.apache.flink.table.plan.common.JoinReorderTestBase
import org.apache.flink.table.util.TableTestUtil
class JoinReorderTest extends JoinReorderTestBase {
override protected def getTableTestUtil: TableTestUtil = streamTestUtil()
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/plan/stream/sql/join/JoinReorderTest.scala
|
Scala
|
apache-2.0
| 1,101
|
package com.michaelhamrah.flickrgpx
import scala.xml._
import com.github.nscala_time.time.Imports._
import scala.language.implicitConversions
object GpxFile {
implicit def NodeSeqToDouble(value: NodeSeq):Double = {
value.text match {
case "" => 0.0
case s:String => s.toDouble
}
}
def GetWaypointsFromGpxFile(file: String): Option[Seq[Waypoint]] = {
try {
val data = XML.loadFile(file)
val waypoints = (data \\ "wpt") filter (node => (node \\ "time").nonEmpty) map BuildWaypoint
Some(waypoints)
}
catch {
case ex : Throwable => println(ex); return None
}
}
def BuildWaypoint(node: Node) = {
Waypoint(
long = (node \\ "@lon")
,lat = (node \\ "@lat")
,elev = (node \\ "ele")
,name = (node \\ "name").text
,uniqueId = (node \\ "extensions" \\ "objectid").text
,time = (node \\ "time").text.toDateTime.withZone(DateTimeZone.forID("UTC"))
)
}
}
|
mhamrah/FlickrGpx
|
src/main/scala/GpxFile.scala
|
Scala
|
mit
| 967
|
package com.twitter.finagle.http
import com.twitter.conversions.DurationOps._
import com.twitter.util.Stopwatch
import java.net.URLEncoder
import java.nio.charset.StandardCharsets
import org.scalacheck.Gen
import org.scalatest.FunSuite
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import scala.collection.JavaConverters._
class QueryParamCodecTest
extends FunSuite
with ScalaCheckDrivenPropertyChecks
with Eventually
with IntegrationPatience {
private def encode(s: String): String = URLEncoder.encode(s, StandardCharsets.UTF_8.name)
private def genNonEmptyToken: Gen[String] =
Gen.nonEmptyListOf(Gen.choose('a', 'z')).map(s => new String(s.toArray))
private def genToken: Gen[String] =
Gen.listOf(Gen.choose('a', 'z')).map(s => new String(s.toArray))
private def genKeyValuePair: Gen[(String, String)] =
for {
k <- genNonEmptyToken
v <- genToken
} yield k -> v
private def genParams: Gen[Seq[(String, String)]] = Gen.listOf(genKeyValuePair).map(_.sorted)
private def roundTrip(params: Seq[(String, String)]): Unit = {
val queryString = QueryParamEncoder.encode(params)
val result = QueryParamDecoder.decode(queryString)
val flattened = result.asScala.toSeq.flatMap {
case (key, values) =>
values.asScala.map(key -> _)
}.sorted
assert(flattened == params)
}
private val samples = Seq(
"a" -> "b",
"a" -> "b", // nobody says you can't repeat a value
"empty value" -> "", // empty value
"empty value" -> "", // nobody says you can't repeat an empty value
"$$&&%;" -> "LOL!!&&;Z",
encode("$$&&%;") -> encode("LOL!!&&;Z") // should be able to encode encoded samples
).sorted
test("Will round trip explicit samples") {
roundTrip(samples)
}
test("arbitrary keys and values") {
forAll(genParams) { params =>
roundTrip(params)
}
}
test("Decode a uri without a query string") {
assert(QueryParamDecoder.decode("foo.com").isEmpty)
assert(QueryParamDecoder.decode("foo.com/bar").isEmpty)
assert(QueryParamDecoder.decode("foo.com/bar?").isEmpty)
}
test("Encode an empty query string") {
assert(QueryParamEncoder.encode(Map.empty) == "")
}
test("Decodes both '%20' and '+' as `space`") {
Seq(
"?foo%20=bar",
"?foo+=bar"
).foreach { s =>
val result = QueryParamDecoder.decode(s)
assert(result.size == 1)
val params = result.get("foo ")
assert(params.size == 1)
assert(params.get(0) == "bar")
}
}
test("Illegal query params") {
Seq(
"?f%tf=bar", // Illegal hex char
"?foo%=bar", // missing hex chars
"?foo%d=bar" // missing hex char
).foreach { uri =>
intercept[IllegalArgumentException] {
QueryParamDecoder.decode(uri)
}
}
}
private def collisions(num: Int, length: Int): Iterator[String] = {
val equiv = Array("Aa", "BB")
// returns a length 2n string, indexed by x
// 0 <= x < 2^n
// for a fixed n, all strings have the same hashCode
def f(x: Int, n: Int): String = n match {
case 0 => ""
case _ => equiv(x % 2) + f(x / 2, n - 1)
}
(0 until num).toIterator.map(f(_, length))
}
test("massive number of collisions isn't super slow") {
// Using a quad core laptop i7 (single threaded) this many params took 399771 ms
// for scala HashMap and 277 ms using the Java LinkedHashMap on Java 8.
val num = 100 * 1000
val cs = collisions(num, 22)
val queryString = cs.map(_ + "=a").mkString("?", "&", "")
eventually {
val stopwatch = Stopwatch.start()
val result = QueryParamDecoder.decode(queryString)
assert(result.size == num)
// we give a generous 2 seconds to complete, 10x what was observed in local
// testing because CI can be slow at times. We'd expect quadratic behavior
// to take two orders of magnitude longer, so just making sure it's below
// 2 seconds should be enough to confirm we're not vulnerable to DoS attack.
assert(stopwatch() < 2.seconds)
}
}
}
|
luciferous/finagle
|
finagle-base-http/src/test/scala/com/twitter/finagle/http/QueryParamCodecTest.scala
|
Scala
|
apache-2.0
| 4,163
|
import play.api.mvc.{SimpleResult, RequestHeader, Filter}
import play.api.Logger
import scala.concurrent.Future
import play.api.libs.concurrent.Execution.Implicits.defaultContext
object LoggingFilter extends Filter {
def apply(nextFilter: (RequestHeader) => Future[SimpleResult]
)(requestHeader: RequestHeader): Future[SimpleResult] = {
val startTime = System.currentTimeMillis
nextFilter(requestHeader).map { result =>
import play.api.Routes
val action = requestHeader.tags(play.api.Routes.ROUTE_CONTROLLER) +
"." + requestHeader.tags(Routes.ROUTE_ACTION_METHOD) +
"-" + requestHeader.uri
val endTime = System.currentTimeMillis
val requestTime = endTime - startTime
Logger.info(s"${action} took ${requestTime}ms" +
s" and returned ${result.header.status}")
result.withHeaders("Request-Time" -> requestTime.toString)
}
}
}
|
kcis/kcis-prot
|
app/controllers/LoggingFilter.scala
|
Scala
|
apache-2.0
| 932
|
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_11.scalatest3_0_1
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.scalatest.SpecialCharactersTest
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 10.03.2017
*/
@Category(Array(classOf[SlowTests]))
class Scalatest2_11_3_0_1_SpecialCharactersTest extends Scalatest2_11_3_0_1_Base with SpecialCharactersTest
|
jastice/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_11/scalatest3_0_1/Scalatest2_11_3_0_1_SpecialCharactersTest.scala
|
Scala
|
apache-2.0
| 462
|
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter
import java.time.temporal.ChronoUnit
import java.time.{ZoneOffset, ZonedDateTime}
import java.util.Date
import org.geotools.filter.text.ecql.ECQL
import org.geotools.filter.{IsGreaterThanImpl, IsLessThenImpl, LiteralExpressionImpl}
import org.geotools.util.Converters
import org.junit.runner.RunWith
import org.locationtech.geomesa.filter.Bounds.Bound
import org.locationtech.geomesa.filter.visitor.QueryPlanFilterVisitor
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.date.DateUtils.toInstant
import org.opengis.filter._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class FilterHelperTest extends Specification {
val sft = SimpleFeatureTypes.createType("FilterHelperTest",
"dtg:Date,number:Int,a:Int,b:Int,c:Int,*geom:Point:srid=4326")
def updateFilter(filter: Filter): Filter = QueryPlanFilterVisitor.apply(sft, filter)
def toInterval(dt1: String, dt2: String, inclusive: Boolean = true): Bounds[ZonedDateTime] = {
val s = Option(Converters.convert(dt1, classOf[Date])).map(d => ZonedDateTime.ofInstant(toInstant(d), ZoneOffset.UTC))
val e = Option(Converters.convert(dt2, classOf[Date])).map(d => ZonedDateTime.ofInstant(toInstant(d), ZoneOffset.UTC))
Bounds(Bound(s, if (s.isDefined) inclusive else false), Bound(e, if (e.isDefined) inclusive else false))
}
"FilterHelper" should {
"evaluate functions with 0 arguments" >> {
val filter = ECQL.toFilter("dtg < currentDate()")
val updated = updateFilter(filter)
updated.asInstanceOf[IsLessThenImpl].getExpression2.isInstanceOf[LiteralExpressionImpl] mustEqual true
}
"evaluate functions with 1 argument" >> {
val filter = ECQL.toFilter("dtg > currentDate('P2D')")
val updated = updateFilter(filter)
updated.asInstanceOf[IsGreaterThanImpl].getExpression2.isInstanceOf[LiteralExpressionImpl] mustEqual true
}
"evaluate functions representing the last day" >> {
val filter = ECQL.toFilter("dtg > currentDate('-P1D') AND dtg < currentDate()")
val updated = updateFilter(filter)
val intervals = FilterHelper.extractIntervals(updated, "dtg", handleExclusiveBounds = true)
intervals.values(0).lower.value.get.until(
intervals.values(0).upper.value.get, ChronoUnit.HOURS) must beCloseTo(24l, 2)
}
"evaluate functions with math" >> {
val filter = ECQL.toFilter("number < 1+2")
val updated = updateFilter(filter)
updated.asInstanceOf[IsLessThenImpl].getExpression2.evaluate(null) mustEqual 3
}
"fix out of bounds bbox" >> {
val filter = ff.bbox(ff.property("geom"), -181, -91, 181, 91, "4326")
val updated = updateFilter(filter)
updated mustEqual Filter.INCLUDE
}
"be idempotent with bbox" >> {
val filter = ff.bbox(ff.property("geom"), -181, -91, 181, 91, "4326")
val updated = updateFilter(filter)
val reupdated = updateFilter(updated)
ECQL.toCQL(updated) mustEqual ECQL.toCQL(reupdated)
}
"be idempotent with dwithin" >> {
val filter = ff.dwithin(ff.property("geom"), ff.literal("LINESTRING (-45 0, -90 45)"), 1000, "meters")
val updated = updateFilter(filter)
val reupdated = updateFilter(updated)
ECQL.toCQL(updated) mustEqual ECQL.toCQL(reupdated)
}
"not modify valid intersects" >> {
val filter = ff.intersects(ff.property("geom"), ff.literal("POLYGON((45 23, 45 27, 48 27, 48 23, 45 23))"))
val updated = updateFilter(filter)
ECQL.toCQL(updated) mustEqual "INTERSECTS(geom, POLYGON ((45 23, 45 27, 48 27, 48 23, 45 23)))"
}
"fix IDL polygons in intersects" >> {
val filter = ff.intersects(ff.property("geom"), ff.literal("POLYGON((-150 23,-164 11,45 23,49 30,-150 23))"))
val updated = updateFilter(filter)
ECQL.toCQL(updated) mustEqual "INTERSECTS(geom, POLYGON ((-180 12.271523178807946, -180 24.304347826086957, " +
"-150 23, -164 11, -180 12.271523178807946))) OR INTERSECTS(geom, POLYGON ((180 24.304347826086957, " +
"180 12.271523178807946, 45 23, 49 30, 180 24.304347826086957)))"
}
"be idempotent with intersects" >> {
val filter = ff.intersects(ff.property("geom"), ff.literal("POLYGON((-150 23,-164 11,45 23,49 30,-150 23))"))
val updated = updateFilter(filter)
val reupdated = updateFilter(updated)
ECQL.toCQL(updated) mustEqual ECQL.toCQL(reupdated)
}
"extract interval from simple during and between" >> {
val predicates = Seq(("dtg DURING 2016-01-01T00:00:00.000Z/2016-01-02T00:00:00.000Z", false),
("dtg BETWEEN '2016-01-01T00:00:00.000Z' AND '2016-01-02T00:00:00.000Z'", true))
forall(predicates) { case (predicate, inclusive) =>
val filter = ECQL.toFilter(predicate)
val intervals = FilterHelper.extractIntervals(filter, "dtg")
intervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:00.000Z", "2016-01-02T00:00:00.000Z", inclusive)))
}
}
"extract interval from narrow during" >> {
val filter = ECQL.toFilter("dtg DURING 2016-01-01T00:00:00.000Z/T1S")
val intervals = FilterHelper.extractIntervals(filter, "dtg", handleExclusiveBounds = true)
intervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:00.000Z", "2016-01-01T00:00:01.000Z", inclusive = false)))
}
"extract interval with exclusive endpoints from simple during and between" >> {
val during = "dtg DURING 2016-01-01T00:00:00.000Z/2016-01-02T00:00:00.000Z"
val between = "dtg BETWEEN '2016-01-01T00:00:00.000Z' AND '2016-01-02T00:00:00.000Z'"
val dIntervals = FilterHelper.extractIntervals(ECQL.toFilter(during), "dtg", handleExclusiveBounds = true)
val bIntervals = FilterHelper.extractIntervals(ECQL.toFilter(between), "dtg", handleExclusiveBounds = true)
dIntervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:01.000Z", "2016-01-01T23:59:59.000Z")))
bIntervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:00.000Z", "2016-01-02T00:00:00.000Z")))
}
"extract interval from simple equals" >> {
val filters = Seq("dtg = '2016-01-01T00:00:00.000Z'", "dtg TEQUALS 2016-01-01T00:00:00.000Z")
forall(filters) { cql =>
val filter = ECQL.toFilter(cql)
val intervals = FilterHelper.extractIntervals(filter, "dtg")
intervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:00.000Z", "2016-01-01T00:00:00.000Z")))
}
}
"extract interval from simple after" >> {
val filters = Seq(
("dtg > '2016-01-01T00:00:00.000Z'", false),
("dtg >= '2016-01-01T00:00:00.000Z'", true),
("dtg AFTER 2016-01-01T00:00:00.000Z", false)
)
forall(filters) { case (cql, inclusive) =>
val filter = ECQL.toFilter(cql)
val intervals = FilterHelper.extractIntervals(filter, "dtg")
intervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:00.000Z", null, inclusive)))
}
}
"extract interval from simple after with exclusive bounds" >> {
val filters = Seq(
"dtg > '2016-01-01T00:00:00.000Z'",
"dtg >= '2016-01-01T00:00:01.000Z'",
"dtg AFTER 2016-01-01T00:00:00.000Z"
)
forall(filters) { cql =>
val filter = ECQL.toFilter(cql)
val intervals = FilterHelper.extractIntervals(filter, "dtg", handleExclusiveBounds = true)
intervals mustEqual FilterValues(Seq(toInterval("2016-01-01T00:00:01.000Z", null)))
}
}
"extract interval from simple before" >> {
val filters = Seq(
("dtg < '2016-01-01T00:00:00.000Z'", false),
("dtg <= '2016-01-01T00:00:00.000Z'", true),
("dtg BEFORE 2016-01-01T00:00:00.000Z", false)
)
forall(filters) { case (cql, inclusive) =>
val filter = ECQL.toFilter(cql)
val intervals = FilterHelper.extractIntervals(filter, "dtg")
intervals mustEqual FilterValues(Seq(toInterval(null, "2016-01-01T00:00:00.000Z", inclusive)))
}
}
"extract interval from simple before with exclusive bounds" >> {
val filters = Seq(
"dtg < '2016-01-01T00:00:00.001Z'",
"dtg <= '2016-01-01T00:00:00.000Z'",
"dtg BEFORE 2016-01-01T00:00:00.001Z"
)
forall(filters) { cql =>
val filter = ECQL.toFilter(cql)
val intervals = FilterHelper.extractIntervals(filter, "dtg", handleExclusiveBounds = true)
intervals mustEqual FilterValues(Seq(toInterval(null, "2016-01-01T00:00:00.000Z")))
}
}
"deduplicate OR filters" >> {
val filters = Seq(
("(a > 1 AND b < 2 AND c = 3) OR (c = 3 AND a > 2 AND b < 2) OR (b < 2 AND a > 3 AND c = 3)",
"(a > 3 OR a > 1 OR a > 2) AND c = 3 AND b < 2"),
("c = 3 AND ((a > 2 AND b < 2) OR (b < 2 AND a > 3))", "c = 3 AND (a > 2 OR a > 3) AND b < 2"),
("(a > 1) OR (c = 3)", "a > 1 OR c = 3"),
("(a > 1) AND (c = 3)", "a > 1 AND c = 3"),
("a > 1", "a > 1")
)
forall(filters) { case (original, expected) =>
ECQL.toCQL(FilterHelper.simplify(ECQL.toFilter(original))) mustEqual expected
}
}
"deduplicate massive OR filters without stack overflow" >> {
import scala.collection.JavaConversions._
// actual count to get the old code to stack overflow varies depending on environment
// with the fix, tested up to 100k without issue, but the specs checks take a long time with that many
val count = 1000
val a = ff.property("a")
var filter: Filter = ff.equal(a, ff.literal(0))
(1 until count).foreach { i =>
filter = ff.or(filter, ff.equal(a, ff.literal(i)))
}
val flattened = FilterHelper.simplify(filter)
flattened must beAnInstanceOf[Or]
flattened.asInstanceOf[Or].getChildren must haveLength(count)
flattened.asInstanceOf[Or].getChildren.map(_.toString) must
containTheSameElementsAs((0 until count).map(i => s"[ a equals $i ]"))
}
}
}
|
elahrvivaz/geomesa
|
geomesa-filter/src/test/scala/org/locationtech/geomesa/filter/FilterHelperTest.scala
|
Scala
|
apache-2.0
| 10,573
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.