code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/**
* Created by Variant on 16/4/6.
*/
object Assinment_Internals {
def main(args: Array[String]) {
val a@b = 1000
println("a = "+ a + "b =" + b)
val (c,d) = (1000,"2000")
println("c = "+ c +"d=" + d)
//e,F 在进行模式匹配时,会将大写的F当成一个常量去匹配,但是F并没有定义
//val F = _
//val (e,F) = (1000,2000)
val Array(g,h) = Array(1000,2000)
//val Array(i,J) = Array(1000,2000)
println(g + " " + h)
//编译通过,运行时错误,因为匹配错误 object是lazy的
//object Test{ val 1 = 2}
//Test
}
}
| sparkLiwei/ProgrammingNote | scalaLearning/scalaInSpark/Assinment_Internals.scala | Scala | cc0-1.0 | 597 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.c.model
import de.ust.skill.generator.c.GeneralOutputMaker
/**
* @author Fabian Harth, Timm Felden
* @todo rename skill state to skill file
* @todo ensure 80 characters margin
*/
trait StringAccessHeaderMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = files.open(s"model/${prefix}string_access.h")
val prefixCapital = packagePrefix.toUpperCase
out.write(s"""#ifndef ${prefixCapital}STRING_ACCESS_H_
#define ${prefixCapital}STRING_ACCESS_H_
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <glib.h>
typedef struct ${prefix}string_access_struct *${prefix}string_access;
/**
* Wee need a mapping id->string and a mapping string->id. Both lookups should
* be fast (NOT linear time in the number of strings). Thus, two maps are
* stored here.
*/
typedef struct ${prefix}string_access_struct {
//! Mapping <id -> string>
GHashTable *strings_by_id;
//! Mapping <string -> id>
GHashTable *ids_by_string;
int64_t current_skill_id;
} ${prefix}string_access_struct;
${prefix}string_access ${prefix}string_access_new();
void ${prefix}string_access_destroy(${prefix}string_access this);
//! Returns the string with the given id or null, if the id is invalid.
char *${prefix}string_access_get_string_by_id(${prefix}string_access this, int64_t skill_id);
//! Returns the id, this string got assigned.
int64_t ${prefix}string_access_add_string(${prefix}string_access this, char *string);
int64_t ${prefix}string_access_get_id_by_string(${prefix}string_access this, char *string);
//! Returns a list of *char. The list must be deallocated manually.
GList *${prefix}string_access_get_all_strings(${prefix}string_access this);
int64_t ${prefix}string_access_get_size(${prefix}string_access this);
#endif /* STRING_ACCESS_H_ */
""")
out.close()
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/c/model/StringAccessHeaderMaker.scala | Scala | bsd-3-clause | 2,291 |
package com.criteo.dev.cluster
import java.io.{File, PrintWriter}
import org.slf4j.LoggerFactory
import scala.collection.mutable.ListBuffer
import scala.sys.process._
/**
* Special case of SSH Multi action that runs several hive commands.
*/
@Public
class SshHiveAction(node: Node, ignoreError: Boolean = false) {
private final val localTmpQueryFile = s"${GeneralUtilities.getTempDir}/tmphivequery"
private final val remoteTmpQueryFile = s"~/tmphivequery" //concurrent hive actions on same node not supported for now
private val commands = new ListBuffer[String]
private val logger = LoggerFactory.getLogger(classOf[SshHiveAction])
private val processLogger = ProcessLogger(
(e: String) => logger.info("err " + e))
def add(action: String): Unit = {
commands.+=(action)
}
def run(): String = {
GeneralUtilities.prepareTempDir
val localQueryFile = new File(s"${GeneralUtilities.getHomeDir}/$localTmpQueryFile")
val writer = new PrintWriter(localQueryFile)
commands.foreach(s => {
writer.write(s"$s;\\n")
logger.info(s)
})
writer.close
localQueryFile.setExecutable(true)
localQueryFile.setReadable(true)
localQueryFile.deleteOnExit()
ScpAction(None, localTmpQueryFile, Some(node), remoteTmpQueryFile)
val ignoreErrorFlag = if (ignoreError) "-hiveconf hive.cli.errors.ignore=true" else ""
val ret = SshAction(node, s"hive $ignoreErrorFlag -f $remoteTmpQueryFile", returnResult = true, ignoreError)
GeneralUtilities.cleanupTempDir
ret
}
override def toString = {
commands.mkString("\\n")
}
}
object SshHiveAction {
def apply(node: Node, statements: List[String], ignoreError: Boolean = false) = {
val action = new SshHiveAction(node, ignoreError)
statements.foreach(action.add)
action.run
}
}
| szehonCriteo/berilia | src/main/scala/com/criteo/dev/cluster/SshHiveAction.scala | Scala | apache-2.0 | 1,823 |
package pl.iterators.kebs.support
import pl.iterators.kebs.macros.CaseClass1Rep
trait EquivSupport {
implicit def equivFromCaseClass1Rep[A, Rep](implicit cc1Rep: CaseClass1Rep[A, Rep], equivRep: Equiv[Rep]): Equiv[A] =
(x: A, y: A) => equivRep.equiv(cc1Rep.unapply(x), cc1Rep.unapply(y))
}
| theiterators/kebs | macro-utils/src/main/scala-3/pl/iterators/kebs/support/EquivSupport.scala | Scala | mit | 300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status
import java.util.Date
import java.util.concurrent.ConcurrentHashMap
import java.util.function.Function
import scala.collection.JavaConverters._
import scala.collection.mutable.HashMap
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.CPUS_PER_TASK
import org.apache.spark.internal.config.Status._
import org.apache.spark.scheduler._
import org.apache.spark.status.api.v1
import org.apache.spark.storage._
import org.apache.spark.ui.SparkUI
import org.apache.spark.ui.scope._
/**
* A Spark listener that writes application information to a data store. The types written to the
* store are defined in the `storeTypes.scala` file and are based on the public REST API.
*
* @param lastUpdateTime When replaying logs, the log's last update time, so that the duration of
* unfinished tasks can be more accurately calculated (see SPARK-21922).
*/
private[spark] class AppStatusListener(
kvstore: ElementTrackingStore,
conf: SparkConf,
live: Boolean,
appStatusSource: Option[AppStatusSource] = None,
lastUpdateTime: Option[Long] = None) extends SparkListener with Logging {
private var sparkVersion = SPARK_VERSION
private var appInfo: v1.ApplicationInfo = null
private var appSummary = new AppSummary(0, 0)
private var coresPerTask: Int = 1
// How often to update live entities. -1 means "never update" when replaying applications,
// meaning only the last write will happen. For live applications, this avoids a few
// operations that we can live without when rapidly processing incoming task events.
private val liveUpdatePeriodNs = if (live) conf.get(LIVE_ENTITY_UPDATE_PERIOD) else -1L
private val maxTasksPerStage = conf.get(MAX_RETAINED_TASKS_PER_STAGE)
private val maxGraphRootNodes = conf.get(MAX_RETAINED_ROOT_NODES)
// Keep track of live entities, so that task metrics can be efficiently updated (without
// causing too many writes to the underlying store, and other expensive operations).
private val liveStages = new ConcurrentHashMap[(Int, Int), LiveStage]()
private val liveJobs = new HashMap[Int, LiveJob]()
private val liveExecutors = new HashMap[String, LiveExecutor]()
private val deadExecutors = new HashMap[String, LiveExecutor]()
private val liveTasks = new HashMap[Long, LiveTask]()
private val liveRDDs = new HashMap[Int, LiveRDD]()
private val pools = new HashMap[String, SchedulerPool]()
private val SQL_EXECUTION_ID_KEY = "spark.sql.execution.id"
// Keep the active executor count as a separate variable to avoid having to do synchronization
// around liveExecutors.
@volatile private var activeExecutorCount = 0
kvstore.addTrigger(classOf[ExecutorSummaryWrapper], conf.get(MAX_RETAINED_DEAD_EXECUTORS))
{ count => cleanupExecutors(count) }
kvstore.addTrigger(classOf[JobDataWrapper], conf.get(MAX_RETAINED_JOBS)) { count =>
cleanupJobs(count)
}
kvstore.addTrigger(classOf[StageDataWrapper], conf.get(MAX_RETAINED_STAGES)) { count =>
cleanupStages(count)
}
kvstore.onFlush {
if (!live) {
flush()
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = event match {
case SparkListenerLogStart(version) => sparkVersion = version
case _ =>
}
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
assert(event.appId.isDefined, "Application without IDs are not supported.")
val attempt = v1.ApplicationAttemptInfo(
event.appAttemptId,
new Date(event.time),
new Date(-1),
new Date(event.time),
-1L,
event.sparkUser,
false,
sparkVersion)
appInfo = v1.ApplicationInfo(
event.appId.get,
event.appName,
None,
None,
None,
None,
Seq(attempt))
kvstore.write(new ApplicationInfoWrapper(appInfo))
kvstore.write(appSummary)
// Update the driver block manager with logs from this event. The SparkContext initialization
// code registers the driver before this event is sent.
event.driverLogs.foreach { logs =>
val driver = liveExecutors.get(SparkContext.DRIVER_IDENTIFIER)
driver.foreach { d =>
d.executorLogs = logs.toMap
d.attributes = event.driverAttributes.getOrElse(Map.empty).toMap
update(d, System.nanoTime())
}
}
}
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = {
val details = event.environmentDetails
val jvmInfo = Map(details("JVM Information"): _*)
val runtime = new v1.RuntimeInfo(
jvmInfo.get("Java Version").orNull,
jvmInfo.get("Java Home").orNull,
jvmInfo.get("Scala Version").orNull)
val envInfo = new v1.ApplicationEnvironmentInfo(
runtime,
details.getOrElse("Spark Properties", Nil),
details.getOrElse("Hadoop Properties", Nil),
details.getOrElse("System Properties", Nil),
details.getOrElse("Classpath Entries", Nil))
coresPerTask = envInfo.sparkProperties.toMap.get(CPUS_PER_TASK.key).map(_.toInt)
.getOrElse(coresPerTask)
kvstore.write(new ApplicationEnvironmentInfoWrapper(envInfo))
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
val old = appInfo.attempts.head
val attempt = v1.ApplicationAttemptInfo(
old.attemptId,
old.startTime,
new Date(event.time),
new Date(event.time),
event.time - old.startTime.getTime(),
old.sparkUser,
true,
old.appSparkVersion)
appInfo = v1.ApplicationInfo(
appInfo.id,
appInfo.name,
None,
None,
None,
None,
Seq(attempt))
kvstore.write(new ApplicationInfoWrapper(appInfo))
}
override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = {
// This needs to be an update in case an executor re-registers after the driver has
// marked it as "dead".
val exec = getOrCreateExecutor(event.executorId, event.time)
exec.host = event.executorInfo.executorHost
exec.isActive = true
exec.totalCores = event.executorInfo.totalCores
exec.maxTasks = event.executorInfo.totalCores / coresPerTask
exec.executorLogs = event.executorInfo.logUrlMap
exec.attributes = event.executorInfo.attributes
liveUpdate(exec, System.nanoTime())
}
override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = {
liveExecutors.remove(event.executorId).foreach { exec =>
val now = System.nanoTime()
activeExecutorCount = math.max(0, activeExecutorCount - 1)
exec.isActive = false
exec.removeTime = new Date(event.time)
exec.removeReason = event.reason
update(exec, now, last = true)
// Remove all RDD distributions that reference the removed executor, in case there wasn't
// a corresponding event.
liveRDDs.values.foreach { rdd =>
if (rdd.removeDistribution(exec)) {
update(rdd, now)
}
}
// Remove all RDD partitions that reference the removed executor
liveRDDs.values.foreach { rdd =>
rdd.getPartitions.values
.filter(_.executors.contains(event.executorId))
.foreach { partition =>
if (partition.executors.length == 1) {
rdd.removePartition(partition.blockName)
rdd.memoryUsed = addDeltaToValue(rdd.memoryUsed, partition.memoryUsed * -1)
rdd.diskUsed = addDeltaToValue(rdd.diskUsed, partition.diskUsed * -1)
} else {
rdd.memoryUsed = addDeltaToValue(rdd.memoryUsed,
(partition.memoryUsed / partition.executors.length) * -1)
rdd.diskUsed = addDeltaToValue(rdd.diskUsed,
(partition.diskUsed / partition.executors.length) * -1)
partition.update(partition.executors
.filter(!_.equals(event.executorId)), rdd.storageLevel,
addDeltaToValue(partition.memoryUsed,
(partition.memoryUsed / partition.executors.length) * -1),
addDeltaToValue(partition.diskUsed,
(partition.diskUsed / partition.executors.length) * -1))
}
}
update(rdd, now)
}
if (isExecutorActiveForLiveStages(exec)) {
// the executor was running for a currently active stage, so save it for now in
// deadExecutors, and remove when there are no active stages overlapping with the
// executor.
deadExecutors.put(event.executorId, exec)
}
}
}
/** Was the specified executor active for any currently live stages? */
private def isExecutorActiveForLiveStages(exec: LiveExecutor): Boolean = {
liveStages.values.asScala.exists { stage =>
stage.info.submissionTime.getOrElse(0L) < exec.removeTime.getTime
}
}
override def onExecutorBlacklisted(event: SparkListenerExecutorBlacklisted): Unit = {
updateBlackListStatus(event.executorId, true)
}
override def onExecutorBlacklistedForStage(
event: SparkListenerExecutorBlacklistedForStage): Unit = {
val now = System.nanoTime()
Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage =>
setStageBlackListStatus(stage, now, event.executorId)
}
liveExecutors.get(event.executorId).foreach { exec =>
addBlackListedStageTo(exec, event.stageId, now)
}
}
override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = {
val now = System.nanoTime()
// Implicitly blacklist every available executor for the stage associated with this node
Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage =>
val executorIds = liveExecutors.values.filter(_.host == event.hostId).map(_.executorId).toSeq
setStageBlackListStatus(stage, now, executorIds: _*)
}
liveExecutors.values.filter(_.hostname == event.hostId).foreach { exec =>
addBlackListedStageTo(exec, event.stageId, now)
}
}
private def addBlackListedStageTo(exec: LiveExecutor, stageId: Int, now: Long): Unit = {
exec.blacklistedInStages += stageId
liveUpdate(exec, now)
}
private def setStageBlackListStatus(stage: LiveStage, now: Long, executorIds: String*): Unit = {
executorIds.foreach { executorId =>
val executorStageSummary = stage.executorSummary(executorId)
executorStageSummary.isBlacklisted = true
maybeUpdate(executorStageSummary, now)
}
stage.blackListedExecutors ++= executorIds
maybeUpdate(stage, now)
}
override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = {
updateBlackListStatus(event.executorId, false)
}
override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = {
updateNodeBlackList(event.hostId, true)
}
override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = {
updateNodeBlackList(event.hostId, false)
}
private def updateBlackListStatus(execId: String, blacklisted: Boolean): Unit = {
liveExecutors.get(execId).foreach { exec =>
exec.isBlacklisted = blacklisted
if (blacklisted) {
appStatusSource.foreach(_.BLACKLISTED_EXECUTORS.inc())
} else {
appStatusSource.foreach(_.UNBLACKLISTED_EXECUTORS.inc())
}
liveUpdate(exec, System.nanoTime())
}
}
private def updateNodeBlackList(host: String, blacklisted: Boolean): Unit = {
val now = System.nanoTime()
// Implicitly (un)blacklist every executor associated with the node.
liveExecutors.values.foreach { exec =>
if (exec.hostname == host) {
exec.isBlacklisted = blacklisted
liveUpdate(exec, now)
}
}
}
override def onJobStart(event: SparkListenerJobStart): Unit = {
val now = System.nanoTime()
// Compute (a potential over-estimate of) the number of tasks that will be run by this job.
// This may be an over-estimate because the job start event references all of the result
// stages' transitive stage dependencies, but some of these stages might be skipped if their
// output is available from earlier runs.
// See https://github.com/apache/spark/pull/3009 for a more extensive discussion.
val numTasks = {
val missingStages = event.stageInfos.filter(_.completionTime.isEmpty)
missingStages.map(_.numTasks).sum
}
val lastStageInfo = event.stageInfos.sortBy(_.stageId).lastOption
val jobName = lastStageInfo.map(_.name).getOrElse("")
val jobGroup = Option(event.properties)
.flatMap { p => Option(p.getProperty(SparkContext.SPARK_JOB_GROUP_ID)) }
val sqlExecutionId = Option(event.properties)
.flatMap(p => Option(p.getProperty(SQL_EXECUTION_ID_KEY)).map(_.toLong))
val job = new LiveJob(
event.jobId,
jobName,
if (event.time > 0) Some(new Date(event.time)) else None,
event.stageIds,
jobGroup,
numTasks,
sqlExecutionId)
liveJobs.put(event.jobId, job)
liveUpdate(job, now)
event.stageInfos.foreach { stageInfo =>
// A new job submission may re-use an existing stage, so this code needs to do an update
// instead of just a write.
val stage = getOrCreateStage(stageInfo)
stage.jobs :+= job
stage.jobIds += event.jobId
liveUpdate(stage, now)
}
// Create the graph data for all the job's stages.
event.stageInfos.foreach { stage =>
val graph = RDDOperationGraph.makeOperationGraph(stage, maxGraphRootNodes)
val uigraph = new RDDOperationGraphWrapper(
stage.stageId,
graph.edges,
graph.outgoingEdges,
graph.incomingEdges,
newRDDOperationCluster(graph.rootCluster))
kvstore.write(uigraph)
}
}
private def newRDDOperationCluster(cluster: RDDOperationCluster): RDDOperationClusterWrapper = {
new RDDOperationClusterWrapper(
cluster.id,
cluster.name,
cluster.childNodes,
cluster.childClusters.map(newRDDOperationCluster))
}
override def onJobEnd(event: SparkListenerJobEnd): Unit = {
liveJobs.remove(event.jobId).foreach { job =>
val now = System.nanoTime()
// Check if there are any pending stages that match this job; mark those as skipped.
val it = liveStages.entrySet.iterator()
while (it.hasNext()) {
val e = it.next()
if (job.stageIds.contains(e.getKey()._1)) {
val stage = e.getValue()
if (v1.StageStatus.PENDING.equals(stage.status)) {
stage.status = v1.StageStatus.SKIPPED
job.skippedStages += stage.info.stageId
job.skippedTasks += stage.info.numTasks
job.activeStages -= 1
pools.get(stage.schedulingPool).foreach { pool =>
pool.stageIds = pool.stageIds - stage.info.stageId
update(pool, now)
}
it.remove()
update(stage, now, last = true)
}
}
}
job.status = event.jobResult match {
case JobSucceeded =>
appStatusSource.foreach{_.SUCCEEDED_JOBS.inc()}
JobExecutionStatus.SUCCEEDED
case JobFailed(_) =>
appStatusSource.foreach{_.FAILED_JOBS.inc()}
JobExecutionStatus.FAILED
}
job.completionTime = if (event.time > 0) Some(new Date(event.time)) else None
for {
source <- appStatusSource
submissionTime <- job.submissionTime
completionTime <- job.completionTime
} {
source.JOB_DURATION.value.set(completionTime.getTime() - submissionTime.getTime())
}
// update global app status counters
appStatusSource.foreach { source =>
source.COMPLETED_STAGES.inc(job.completedStages.size)
source.FAILED_STAGES.inc(job.failedStages)
source.COMPLETED_TASKS.inc(job.completedTasks)
source.FAILED_TASKS.inc(job.failedTasks)
source.KILLED_TASKS.inc(job.killedTasks)
source.SKIPPED_TASKS.inc(job.skippedTasks)
source.SKIPPED_STAGES.inc(job.skippedStages.size)
}
update(job, now, last = true)
if (job.status == JobExecutionStatus.SUCCEEDED) {
appSummary = new AppSummary(appSummary.numCompletedJobs + 1, appSummary.numCompletedStages)
kvstore.write(appSummary)
}
}
}
override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = {
val now = System.nanoTime()
val stage = getOrCreateStage(event.stageInfo)
stage.status = v1.StageStatus.ACTIVE
stage.schedulingPool = Option(event.properties).flatMap { p =>
Option(p.getProperty(SparkContext.SPARK_SCHEDULER_POOL))
}.getOrElse(SparkUI.DEFAULT_POOL_NAME)
// Look at all active jobs to find the ones that mention this stage.
stage.jobs = liveJobs.values
.filter(_.stageIds.contains(event.stageInfo.stageId))
.toSeq
stage.jobIds = stage.jobs.map(_.jobId).toSet
stage.description = Option(event.properties).flatMap { p =>
Option(p.getProperty(SparkContext.SPARK_JOB_DESCRIPTION))
}
stage.jobs.foreach { job =>
job.completedStages = job.completedStages - event.stageInfo.stageId
job.activeStages += 1
liveUpdate(job, now)
}
val pool = pools.getOrElseUpdate(stage.schedulingPool, new SchedulerPool(stage.schedulingPool))
pool.stageIds = pool.stageIds + event.stageInfo.stageId
update(pool, now)
event.stageInfo.rddInfos.foreach { info =>
if (info.storageLevel.isValid) {
liveUpdate(liveRDDs.getOrElseUpdate(info.id, new LiveRDD(info)), now)
}
}
liveUpdate(stage, now)
}
override def onTaskStart(event: SparkListenerTaskStart): Unit = {
val now = System.nanoTime()
val task = new LiveTask(event.taskInfo, event.stageId, event.stageAttemptId, lastUpdateTime)
liveTasks.put(event.taskInfo.taskId, task)
liveUpdate(task, now)
Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage =>
stage.activeTasks += 1
stage.firstLaunchTime = math.min(stage.firstLaunchTime, event.taskInfo.launchTime)
val locality = event.taskInfo.taskLocality.toString()
val count = stage.localitySummary.getOrElse(locality, 0L) + 1L
stage.localitySummary = stage.localitySummary ++ Map(locality -> count)
stage.activeTasksPerExecutor(event.taskInfo.executorId) += 1
maybeUpdate(stage, now)
stage.jobs.foreach { job =>
job.activeTasks += 1
maybeUpdate(job, now)
}
if (stage.savedTasks.incrementAndGet() > maxTasksPerStage && !stage.cleaning) {
stage.cleaning = true
kvstore.doAsync {
cleanupTasks(stage)
}
}
}
liveExecutors.get(event.taskInfo.executorId).foreach { exec =>
exec.activeTasks += 1
exec.totalTasks += 1
maybeUpdate(exec, now)
}
}
override def onTaskGettingResult(event: SparkListenerTaskGettingResult): Unit = {
// Call update on the task so that the "getting result" time is written to the store; the
// value is part of the mutable TaskInfo state that the live entity already references.
liveTasks.get(event.taskInfo.taskId).foreach { task =>
maybeUpdate(task, System.nanoTime())
}
}
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
// TODO: can this really happen?
if (event.taskInfo == null) {
return
}
val now = System.nanoTime()
val metricsDelta = liveTasks.remove(event.taskInfo.taskId).map { task =>
task.info = event.taskInfo
val errorMessage = event.reason match {
case Success =>
None
case k: TaskKilled =>
Some(k.reason)
case e: ExceptionFailure => // Handle ExceptionFailure because we might have accumUpdates
Some(e.toErrorString)
case e: TaskFailedReason => // All other failure cases
Some(e.toErrorString)
case other =>
logInfo(s"Unhandled task end reason: $other")
None
}
task.errorMessage = errorMessage
val delta = task.updateMetrics(event.taskMetrics)
update(task, now, last = true)
delta
}.orNull
val (completedDelta, failedDelta, killedDelta) = event.reason match {
case Success =>
(1, 0, 0)
case _: TaskKilled =>
(0, 0, 1)
case _: TaskCommitDenied =>
(0, 0, 1)
case _ =>
(0, 1, 0)
}
Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage =>
if (metricsDelta != null) {
stage.metrics = LiveEntityHelpers.addMetrics(stage.metrics, metricsDelta)
}
stage.activeTasks -= 1
stage.completedTasks += completedDelta
if (completedDelta > 0) {
stage.completedIndices.add(event.taskInfo.index)
}
stage.failedTasks += failedDelta
stage.killedTasks += killedDelta
if (killedDelta > 0) {
stage.killedSummary = killedTasksSummary(event.reason, stage.killedSummary)
}
stage.activeTasksPerExecutor(event.taskInfo.executorId) -= 1
// [SPARK-24415] Wait for all tasks to finish before removing stage from live list
val removeStage =
stage.activeTasks == 0 &&
(v1.StageStatus.COMPLETE.equals(stage.status) ||
v1.StageStatus.FAILED.equals(stage.status))
if (removeStage) {
update(stage, now, last = true)
} else {
maybeUpdate(stage, now)
}
// Store both stage ID and task index in a single long variable for tracking at job level.
val taskIndex = (event.stageId.toLong << Integer.SIZE) | event.taskInfo.index
stage.jobs.foreach { job =>
job.activeTasks -= 1
job.completedTasks += completedDelta
if (completedDelta > 0) {
job.completedIndices.add(taskIndex)
}
job.failedTasks += failedDelta
job.killedTasks += killedDelta
if (killedDelta > 0) {
job.killedSummary = killedTasksSummary(event.reason, job.killedSummary)
}
if (removeStage) {
update(job, now)
} else {
maybeUpdate(job, now)
}
}
val esummary = stage.executorSummary(event.taskInfo.executorId)
esummary.taskTime += event.taskInfo.duration
esummary.succeededTasks += completedDelta
esummary.failedTasks += failedDelta
esummary.killedTasks += killedDelta
if (metricsDelta != null) {
esummary.metrics = LiveEntityHelpers.addMetrics(esummary.metrics, metricsDelta)
}
val isLastTask = stage.activeTasksPerExecutor(event.taskInfo.executorId) == 0
// If the last task of the executor finished, then update the esummary
// for both live and history events.
if (isLastTask) {
update(esummary, now)
} else {
maybeUpdate(esummary, now)
}
if (!stage.cleaning && stage.savedTasks.get() > maxTasksPerStage) {
stage.cleaning = true
kvstore.doAsync {
cleanupTasks(stage)
}
}
if (removeStage) {
liveStages.remove((event.stageId, event.stageAttemptId))
}
}
liveExecutors.get(event.taskInfo.executorId).foreach { exec =>
exec.activeTasks -= 1
exec.completedTasks += completedDelta
exec.failedTasks += failedDelta
exec.totalDuration += event.taskInfo.duration
// Note: For resubmitted tasks, we continue to use the metrics that belong to the
// first attempt of this task. This may not be 100% accurate because the first attempt
// could have failed half-way through. The correct fix would be to keep track of the
// metrics added by each attempt, but this is much more complicated.
if (event.reason != Resubmitted) {
if (event.taskMetrics != null) {
val readMetrics = event.taskMetrics.shuffleReadMetrics
exec.totalGcTime += event.taskMetrics.jvmGCTime
exec.totalInputBytes += event.taskMetrics.inputMetrics.bytesRead
exec.totalShuffleRead += readMetrics.localBytesRead + readMetrics.remoteBytesRead
exec.totalShuffleWrite += event.taskMetrics.shuffleWriteMetrics.bytesWritten
}
}
// Force an update on both live and history applications when the number of active tasks
// reaches 0. This is checked in some tests (e.g. SQLTestUtilsBase) so it needs to be
// reliably up to date.
if (exec.activeTasks == 0) {
update(exec, now)
} else {
maybeUpdate(exec, now)
}
}
}
override def onStageCompleted(event: SparkListenerStageCompleted): Unit = {
val maybeStage =
Option(liveStages.get((event.stageInfo.stageId, event.stageInfo.attemptNumber)))
maybeStage.foreach { stage =>
val now = System.nanoTime()
stage.info = event.stageInfo
// Because of SPARK-20205, old event logs may contain valid stages without a submission time
// in their start event. In those cases, we can only detect whether a stage was skipped by
// waiting until the completion event, at which point the field would have been set.
stage.status = event.stageInfo.failureReason match {
case Some(_) => v1.StageStatus.FAILED
case _ if event.stageInfo.submissionTime.isDefined => v1.StageStatus.COMPLETE
case _ => v1.StageStatus.SKIPPED
}
stage.jobs.foreach { job =>
stage.status match {
case v1.StageStatus.COMPLETE =>
job.completedStages += event.stageInfo.stageId
case v1.StageStatus.SKIPPED =>
job.skippedStages += event.stageInfo.stageId
job.skippedTasks += event.stageInfo.numTasks
case _ =>
job.failedStages += 1
}
job.activeStages -= 1
liveUpdate(job, now)
}
pools.get(stage.schedulingPool).foreach { pool =>
pool.stageIds = pool.stageIds - event.stageInfo.stageId
update(pool, now)
}
stage.executorSummaries.values.foreach(update(_, now))
val executorIdsForStage = stage.blackListedExecutors
executorIdsForStage.foreach { executorId =>
liveExecutors.get(executorId).foreach { exec =>
removeBlackListedStageFrom(exec, event.stageInfo.stageId, now)
}
}
// Remove stage only if there are no active tasks remaining
val removeStage = stage.activeTasks == 0
update(stage, now, last = removeStage)
if (removeStage) {
liveStages.remove((event.stageInfo.stageId, event.stageInfo.attemptNumber))
}
if (stage.status == v1.StageStatus.COMPLETE) {
appSummary = new AppSummary(appSummary.numCompletedJobs, appSummary.numCompletedStages + 1)
kvstore.write(appSummary)
}
}
// remove any dead executors that were not running for any currently active stages
deadExecutors.retain((execId, exec) => isExecutorActiveForLiveStages(exec))
}
private def removeBlackListedStageFrom(exec: LiveExecutor, stageId: Int, now: Long) = {
exec.blacklistedInStages -= stageId
liveUpdate(exec, now)
}
override def onBlockManagerAdded(event: SparkListenerBlockManagerAdded): Unit = {
// This needs to set fields that are already set by onExecutorAdded because the driver is
// considered an "executor" in the UI, but does not have a SparkListenerExecutorAdded event.
val exec = getOrCreateExecutor(event.blockManagerId.executorId, event.time)
exec.hostPort = event.blockManagerId.hostPort
event.maxOnHeapMem.foreach { _ =>
exec.totalOnHeap = event.maxOnHeapMem.get
exec.totalOffHeap = event.maxOffHeapMem.get
}
exec.isActive = true
exec.maxMemory = event.maxMem
liveUpdate(exec, System.nanoTime())
}
override def onBlockManagerRemoved(event: SparkListenerBlockManagerRemoved): Unit = {
// Nothing to do here. Covered by onExecutorRemoved.
}
override def onUnpersistRDD(event: SparkListenerUnpersistRDD): Unit = {
liveRDDs.remove(event.rddId).foreach { liveRDD =>
val storageLevel = liveRDD.info.storageLevel
// Use RDD partition info to update executor block info.
liveRDD.getPartitions().foreach { case (_, part) =>
part.executors.foreach { executorId =>
liveExecutors.get(executorId).foreach { exec =>
exec.rddBlocks = exec.rddBlocks - 1
}
}
}
val now = System.nanoTime()
// Use RDD distribution to update executor memory and disk usage info.
liveRDD.getDistributions().foreach { case (executorId, rddDist) =>
liveExecutors.get(executorId).foreach { exec =>
if (exec.hasMemoryInfo) {
if (storageLevel.useOffHeap) {
exec.usedOffHeap = addDeltaToValue(exec.usedOffHeap, -rddDist.offHeapUsed)
} else {
exec.usedOnHeap = addDeltaToValue(exec.usedOnHeap, -rddDist.onHeapUsed)
}
}
exec.memoryUsed = addDeltaToValue(exec.memoryUsed, -rddDist.memoryUsed)
exec.diskUsed = addDeltaToValue(exec.diskUsed, -rddDist.diskUsed)
maybeUpdate(exec, now)
}
}
}
kvstore.delete(classOf[RDDStorageInfoWrapper], event.rddId)
}
override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = {
val now = System.nanoTime()
event.accumUpdates.foreach { case (taskId, sid, sAttempt, accumUpdates) =>
liveTasks.get(taskId).foreach { task =>
val metrics = TaskMetrics.fromAccumulatorInfos(accumUpdates)
val delta = task.updateMetrics(metrics)
maybeUpdate(task, now)
Option(liveStages.get((sid, sAttempt))).foreach { stage =>
stage.metrics = LiveEntityHelpers.addMetrics(stage.metrics, delta)
maybeUpdate(stage, now)
val esummary = stage.executorSummary(event.execId)
esummary.metrics = LiveEntityHelpers.addMetrics(esummary.metrics, delta)
maybeUpdate(esummary, now)
}
}
}
// check if there is a new peak value for any of the executor level memory metrics
// for the live UI. SparkListenerExecutorMetricsUpdate events are only processed
// for the live UI.
event.executorUpdates.foreach { updates =>
liveExecutors.get(event.execId).foreach { exec =>
if (exec.peakExecutorMetrics.compareAndUpdatePeakValues(updates)) {
maybeUpdate(exec, now)
}
}
}
}
override def onStageExecutorMetrics(executorMetrics: SparkListenerStageExecutorMetrics): Unit = {
val now = System.nanoTime()
// check if there is a new peak value for any of the executor level memory metrics,
// while reading from the log. SparkListenerStageExecutorMetrics are only processed
// when reading logs.
liveExecutors.get(executorMetrics.execId)
.orElse(deadExecutors.get(executorMetrics.execId)).map { exec =>
if (exec.peakExecutorMetrics.compareAndUpdatePeakValues(executorMetrics.executorMetrics)) {
update(exec, now)
}
}
}
override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = {
event.blockUpdatedInfo.blockId match {
case block: RDDBlockId => updateRDDBlock(event, block)
case stream: StreamBlockId => updateStreamBlock(event, stream)
case broadcast: BroadcastBlockId => updateBroadcastBlock(event, broadcast)
case _ =>
}
}
/** Flush all live entities' data to the underlying store. */
private def flush(): Unit = {
val now = System.nanoTime()
liveStages.values.asScala.foreach { stage =>
update(stage, now)
stage.executorSummaries.values.foreach(update(_, now))
}
liveJobs.values.foreach(update(_, now))
liveExecutors.values.foreach(update(_, now))
liveTasks.values.foreach(update(_, now))
liveRDDs.values.foreach(update(_, now))
pools.values.foreach(update(_, now))
}
/**
* Shortcut to get active stages quickly in a live application, for use by the console
* progress bar.
*/
def activeStages(): Seq[v1.StageData] = {
liveStages.values.asScala
.filter(_.info.submissionTime.isDefined)
.map(_.toApi())
.toList
.sortBy(_.stageId)
}
/**
* Apply a delta to a value, but ensure that it doesn't go negative.
*/
private def addDeltaToValue(old: Long, delta: Long): Long = math.max(0, old + delta)
private def updateRDDBlock(event: SparkListenerBlockUpdated, block: RDDBlockId): Unit = {
val now = System.nanoTime()
val executorId = event.blockUpdatedInfo.blockManagerId.executorId
// Whether values are being added to or removed from the existing accounting.
val storageLevel = event.blockUpdatedInfo.storageLevel
val diskDelta = event.blockUpdatedInfo.diskSize * (if (storageLevel.useDisk) 1 else -1)
val memoryDelta = event.blockUpdatedInfo.memSize * (if (storageLevel.useMemory) 1 else -1)
val updatedStorageLevel = if (storageLevel.isValid) {
Some(storageLevel.description)
} else {
None
}
// We need information about the executor to update some memory accounting values in the
// RDD info, so read that beforehand.
val maybeExec = liveExecutors.get(executorId)
var rddBlocksDelta = 0
// Update the executor stats first, since they are used to calculate the free memory
// on tracked RDD distributions.
maybeExec.foreach { exec =>
updateExecutorMemoryDiskInfo(exec, storageLevel, memoryDelta, diskDelta)
}
// Update the block entry in the RDD info, keeping track of the deltas above so that we
// can update the executor information too.
liveRDDs.get(block.rddId).foreach { rdd =>
if (updatedStorageLevel.isDefined) {
rdd.setStorageLevel(updatedStorageLevel.get)
}
val partition = rdd.partition(block.name)
val executors = if (updatedStorageLevel.isDefined) {
val current = partition.executors
if (current.contains(executorId)) {
current
} else {
rddBlocksDelta = 1
current :+ executorId
}
} else {
rddBlocksDelta = -1
partition.executors.filter(_ != executorId)
}
// Only update the partition if it's still stored in some executor, otherwise get rid of it.
if (executors.nonEmpty) {
partition.update(executors, rdd.storageLevel,
addDeltaToValue(partition.memoryUsed, memoryDelta),
addDeltaToValue(partition.diskUsed, diskDelta))
} else {
rdd.removePartition(block.name)
}
maybeExec.foreach { exec =>
if (exec.rddBlocks + rddBlocksDelta > 0) {
val dist = rdd.distribution(exec)
dist.memoryUsed = addDeltaToValue(dist.memoryUsed, memoryDelta)
dist.diskUsed = addDeltaToValue(dist.diskUsed, diskDelta)
if (exec.hasMemoryInfo) {
if (storageLevel.useOffHeap) {
dist.offHeapUsed = addDeltaToValue(dist.offHeapUsed, memoryDelta)
} else {
dist.onHeapUsed = addDeltaToValue(dist.onHeapUsed, memoryDelta)
}
}
dist.lastUpdate = null
} else {
rdd.removeDistribution(exec)
}
// Trigger an update on other RDDs so that the free memory information is updated.
liveRDDs.values.foreach { otherRdd =>
if (otherRdd.info.id != block.rddId) {
otherRdd.distributionOpt(exec).foreach { dist =>
dist.lastUpdate = null
update(otherRdd, now)
}
}
}
}
rdd.memoryUsed = addDeltaToValue(rdd.memoryUsed, memoryDelta)
rdd.diskUsed = addDeltaToValue(rdd.diskUsed, diskDelta)
update(rdd, now)
}
// Finish updating the executor now that we know the delta in the number of blocks.
maybeExec.foreach { exec =>
exec.rddBlocks += rddBlocksDelta
maybeUpdate(exec, now)
}
}
private def getOrCreateExecutor(executorId: String, addTime: Long): LiveExecutor = {
liveExecutors.getOrElseUpdate(executorId, {
activeExecutorCount += 1
new LiveExecutor(executorId, addTime)
})
}
private def updateStreamBlock(event: SparkListenerBlockUpdated, stream: StreamBlockId): Unit = {
val storageLevel = event.blockUpdatedInfo.storageLevel
if (storageLevel.isValid) {
val data = new StreamBlockData(
stream.name,
event.blockUpdatedInfo.blockManagerId.executorId,
event.blockUpdatedInfo.blockManagerId.hostPort,
storageLevel.description,
storageLevel.useMemory,
storageLevel.useDisk,
storageLevel.deserialized,
event.blockUpdatedInfo.memSize,
event.blockUpdatedInfo.diskSize)
kvstore.write(data)
} else {
kvstore.delete(classOf[StreamBlockData],
Array(stream.name, event.blockUpdatedInfo.blockManagerId.executorId))
}
}
private def updateBroadcastBlock(
event: SparkListenerBlockUpdated,
broadcast: BroadcastBlockId): Unit = {
val executorId = event.blockUpdatedInfo.blockManagerId.executorId
liveExecutors.get(executorId).foreach { exec =>
val now = System.nanoTime()
val storageLevel = event.blockUpdatedInfo.storageLevel
// Whether values are being added to or removed from the existing accounting.
val diskDelta = event.blockUpdatedInfo.diskSize * (if (storageLevel.useDisk) 1 else -1)
val memoryDelta = event.blockUpdatedInfo.memSize * (if (storageLevel.useMemory) 1 else -1)
updateExecutorMemoryDiskInfo(exec, storageLevel, memoryDelta, diskDelta)
maybeUpdate(exec, now)
}
}
private def updateExecutorMemoryDiskInfo(
exec: LiveExecutor,
storageLevel: StorageLevel,
memoryDelta: Long,
diskDelta: Long): Unit = {
if (exec.hasMemoryInfo) {
if (storageLevel.useOffHeap) {
exec.usedOffHeap = addDeltaToValue(exec.usedOffHeap, memoryDelta)
} else {
exec.usedOnHeap = addDeltaToValue(exec.usedOnHeap, memoryDelta)
}
}
exec.memoryUsed = addDeltaToValue(exec.memoryUsed, memoryDelta)
exec.diskUsed = addDeltaToValue(exec.diskUsed, diskDelta)
}
private def getOrCreateStage(info: StageInfo): LiveStage = {
val stage = liveStages.computeIfAbsent((info.stageId, info.attemptNumber),
new Function[(Int, Int), LiveStage]() {
override def apply(key: (Int, Int)): LiveStage = new LiveStage()
})
stage.info = info
stage
}
private def killedTasksSummary(
reason: TaskEndReason,
oldSummary: Map[String, Int]): Map[String, Int] = {
reason match {
case k: TaskKilled =>
oldSummary.updated(k.reason, oldSummary.getOrElse(k.reason, 0) + 1)
case denied: TaskCommitDenied =>
val reason = denied.toErrorString
oldSummary.updated(reason, oldSummary.getOrElse(reason, 0) + 1)
case _ =>
oldSummary
}
}
private def update(entity: LiveEntity, now: Long, last: Boolean = false): Unit = {
entity.write(kvstore, now, checkTriggers = last)
}
/** Update a live entity only if it hasn't been updated in the last configured period. */
private def maybeUpdate(entity: LiveEntity, now: Long): Unit = {
if (live && liveUpdatePeriodNs >= 0 && now - entity.lastWriteTime > liveUpdatePeriodNs) {
update(entity, now)
}
}
/** Update an entity only if in a live app; avoids redundant writes when replaying logs. */
private def liveUpdate(entity: LiveEntity, now: Long): Unit = {
if (live) {
update(entity, now)
}
}
private def cleanupExecutors(count: Long): Unit = {
// Because the limit is on the number of *dead* executors, we need to calculate whether
// there are actually enough dead executors to be deleted.
val threshold = conf.get(MAX_RETAINED_DEAD_EXECUTORS)
val dead = count - activeExecutorCount
if (dead > threshold) {
val countToDelete = calculateNumberToRemove(dead, threshold)
val toDelete = kvstore.view(classOf[ExecutorSummaryWrapper]).index("active")
.max(countToDelete).first(false).last(false).asScala.toSeq
toDelete.foreach { e => kvstore.delete(e.getClass(), e.info.id) }
}
}
private def cleanupJobs(count: Long): Unit = {
val countToDelete = calculateNumberToRemove(count, conf.get(MAX_RETAINED_JOBS))
if (countToDelete <= 0L) {
return
}
val view = kvstore.view(classOf[JobDataWrapper]).index("completionTime").first(0L)
val toDelete = KVUtils.viewToSeq(view, countToDelete.toInt) { j =>
j.info.status != JobExecutionStatus.RUNNING && j.info.status != JobExecutionStatus.UNKNOWN
}
toDelete.foreach { j => kvstore.delete(j.getClass(), j.info.jobId) }
}
private def cleanupStages(count: Long): Unit = {
val countToDelete = calculateNumberToRemove(count, conf.get(MAX_RETAINED_STAGES))
if (countToDelete <= 0L) {
return
}
// As the completion time of a skipped stage is always -1, we will remove skipped stages first.
// This is safe since the job itself contains enough information to render skipped stages in the
// UI.
val view = kvstore.view(classOf[StageDataWrapper]).index("completionTime")
val stages = KVUtils.viewToSeq(view, countToDelete.toInt) { s =>
s.info.status != v1.StageStatus.ACTIVE && s.info.status != v1.StageStatus.PENDING
}
stages.foreach { s =>
val key = Array(s.info.stageId, s.info.attemptId)
kvstore.delete(s.getClass(), key)
val execSummaries = kvstore.view(classOf[ExecutorStageSummaryWrapper])
.index("stage")
.first(key)
.last(key)
.asScala
.toSeq
execSummaries.foreach { e =>
kvstore.delete(e.getClass(), e.id)
}
// Check whether there are remaining attempts for the same stage. If there aren't, then
// also delete the RDD graph data.
val remainingAttempts = kvstore.view(classOf[StageDataWrapper])
.index("stageId")
.first(s.info.stageId)
.last(s.info.stageId)
.closeableIterator()
val hasMoreAttempts = try {
remainingAttempts.asScala.exists { other =>
other.info.attemptId != s.info.attemptId
}
} finally {
remainingAttempts.close()
}
if (!hasMoreAttempts) {
kvstore.delete(classOf[RDDOperationGraphWrapper], s.info.stageId)
}
cleanupCachedQuantiles(key)
}
// Delete tasks for all stages in one pass, as deleting them for each stage individually is slow
val tasks = kvstore.view(classOf[TaskDataWrapper]).asScala
val keys = stages.map { s => (s.info.stageId, s.info.attemptId) }.toSet
tasks.foreach { t =>
if (keys.contains((t.stageId, t.stageAttemptId))) {
kvstore.delete(t.getClass(), t.taskId)
}
}
}
private def cleanupTasks(stage: LiveStage): Unit = {
val countToDelete = calculateNumberToRemove(stage.savedTasks.get(), maxTasksPerStage).toInt
if (countToDelete > 0) {
val stageKey = Array(stage.info.stageId, stage.info.attemptNumber)
val view = kvstore.view(classOf[TaskDataWrapper])
.index(TaskIndexNames.COMPLETION_TIME)
.parent(stageKey)
// Try to delete finished tasks only.
val toDelete = KVUtils.viewToSeq(view, countToDelete) { t =>
!live || t.status != TaskState.RUNNING.toString()
}
toDelete.foreach { t => kvstore.delete(t.getClass(), t.taskId) }
stage.savedTasks.addAndGet(-toDelete.size)
// If there are more running tasks than the configured limit, delete running tasks. This
// should be extremely rare since the limit should generally far exceed the number of tasks
// that can run in parallel.
val remaining = countToDelete - toDelete.size
if (remaining > 0) {
val runningTasksToDelete = view.max(remaining).iterator().asScala.toList
runningTasksToDelete.foreach { t => kvstore.delete(t.getClass(), t.taskId) }
stage.savedTasks.addAndGet(-remaining)
}
// On live applications, cleanup any cached quantiles for the stage. This makes sure that
// quantiles will be recalculated after tasks are replaced with newer ones.
//
// This is not needed in the SHS since caching only happens after the event logs are
// completely processed.
if (live) {
cleanupCachedQuantiles(stageKey)
}
}
stage.cleaning = false
}
private def cleanupCachedQuantiles(stageKey: Array[Int]): Unit = {
val cachedQuantiles = kvstore.view(classOf[CachedQuantile])
.index("stage")
.first(stageKey)
.last(stageKey)
.asScala
.toList
cachedQuantiles.foreach { q =>
kvstore.delete(q.getClass(), q.id)
}
}
/**
* Remove at least (retainedSize / 10) items to reduce friction. Because tracking may be done
* asynchronously, this method may return 0 in case enough items have been deleted already.
*/
private def calculateNumberToRemove(dataSize: Long, retainedSize: Long): Long = {
if (dataSize > retainedSize) {
math.max(retainedSize / 10L, dataSize - retainedSize)
} else {
0L
}
}
}
| Aegeaner/spark | core/src/main/scala/org/apache/spark/status/AppStatusListener.scala | Scala | apache-2.0 | 46,000 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import java.util.Random
import breeze.linalg.{Vector, DenseVector, squaredDistance}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* K-means clustering.
*/
object SparkKMeans {
val R = 1000 // Scaling factor
val rand = new Random(42)
def parseVector(line: String): Vector[Double] = {
DenseVector(line.split(' ').map(_.toDouble))
}
def closestPoint(p: Vector[Double], centers: Array[Vector[Double]]): Int = {
var index = 0
var bestIndex = 0
var closest = Double.PositiveInfinity
for (i <- 0 until centers.length) {
val tempDist = squaredDistance(p, centers(i))
if (tempDist < closest) {
closest = tempDist
bestIndex = i
}
}
bestIndex
}
def main(args: Array[String]) {
if (args.length < 3) {
System.err.println("Usage: SparkKMeans <file> <k> <convergeDist>")
System.exit(1)
}
val sparkConf = new SparkConf().setAppName("SparkKMeans")
val sc = new SparkContext(sparkConf)
val lines = sc.textFile(args(0))
val data = lines.map(parseVector _).cache()
val K = args(1).toInt
val convergeDist = args(2).toDouble
val kPoints = data.takeSample(withReplacement = false, K, 42).toArray
var tempDist = 1.0
while(tempDist > convergeDist) {
val closest = data.map (p => (closestPoint(p, kPoints), (p, 1)))
val pointStats = closest.reduceByKey{case ((x1, y1), (x2, y2)) => (x1 + x2, y1 + y2)}
val newPoints = pointStats.map {pair =>
(pair._1, pair._2._1 * (1.0 / pair._2._2))}.collectAsMap()
tempDist = 0.0
for (i <- 0 until K) {
tempDist += squaredDistance(kPoints(i), newPoints(i))
}
for (newP <- newPoints) {
kPoints(newP._1) = newP._2
}
println("Finished iteration (delta = " + tempDist + ")")
}
println("Final centers:")
kPoints.foreach(println)
sc.stop()
}
}
| adobe-research/spark-cluster-deployment | initial-deployment-puppet/modules/spark/files/spark/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala | Scala | apache-2.0 | 2,781 |
package org.nkvoll.gpsd.example.location
import org.slf4j.LoggerFactory
import akka.actor.Actor
import scalikejdbc._
import scalikejdbc.SQLInterpolation._
import com.typesafe.config.{Config, ConfigFactory}
class LookupLocation(selectOffset: Float, numClosest: Int) extends Actor {
val logger = LoggerFactory.getLogger(getClass)
val c = PartialCity.syntax("c")
def receive = {
case location @ Location(lat, lon) => {
logger.info(s"Started looking up location $location")
val start = System.currentTimeMillis()
DB readOnly { implicit session =>
val cities = withSQL {
select
.from(PartialCity as c)
.where
.lt(c.latitude, lat + selectOffset)
.and.gt(c.latitude, lat - selectOffset)
.and.lt(c.longitude, lon + selectOffset)
.and.gt(c.longitude, lon - selectOffset)
.limit(100)
}.map(PartialCity(c)).list()()
val queryTook = System.currentTimeMillis() - start
// calculate manhattan distance for now:
val sortedCities = cities.sortBy(city => math.pow(lat - city.latitude, 2) + math.pow(lon - city.longitude, 2)).take(numClosest)
val sortedString = sortedCities.map(c => (c.name, c.latitude, c.longitude)).mkString(" - ", "\\n - ", "")
val allTook = System.currentTimeMillis() - start
logger.info(s"Expected ${math.min(numClosest, cities.size)} (of ${cities.size}) closest cities (in $queryTook ms/$allTook ms):\\n$sortedString")
}
}
case message => {
logger.info(s"Lookup received %message")
}
}
}
| nkvoll/scala-gpsd | example/src/main/scala/org/nkvoll/gpsd/example/location/LookupLocation.scala | Scala | mit | 1,626 |
package lila.gameSearch
import akka.actor._
import akka.pattern.pipe
import com.sksamuel.elastic4s.ElasticClient
import com.sksamuel.elastic4s.ElasticDsl.{ RichFuture => _, _ }
import com.sksamuel.elastic4s.mappings.FieldType._
import lila.game.actorApi.{ InsertGame, FinishGame }
import lila.game.GameRepo
import lila.search.actorApi._
import lila.search.ElasticSearch
private[gameSearch] final class Indexer(
client: ElasticClient,
indexName: String,
typeName: String) extends Actor {
context.system.lilaBus.subscribe(self, 'finishGame)
def receive = {
case Search(definition) => client execute definition pipeTo sender
case Count(definition) => client execute definition pipeTo sender
case FinishGame(game, _, _) => self ! InsertGame(game)
case InsertGame(game) => if (storable(game)) {
GameRepo isAnalysed game.id foreach { analysed =>
client execute store(indexName, game, analysed)
}
}
case Reset =>
val tempIndexName = "lila_" + ornicar.scalalib.Random.nextString(4)
ElasticSearch.createType(client, tempIndexName, typeName)
try {
import Fields._
client.execute {
put mapping tempIndexName / typeName as Seq(
status typed ShortType index "not_analyzed",
turns typed ShortType index "not_analyzed",
rated typed BooleanType index "not_analyzed",
variant typed ShortType index "not_analyzed",
uids typed StringType index "not_analyzed",
winner typed StringType index "not_analyzed",
averageRating typed ShortType index "not_analyzed",
ai typed ShortType index "not_analyzed",
opening typed StringType index "not_analyzed",
date typed DateType format ElasticSearch.Date.format index "not_analyzed",
duration typed ShortType index "not_analyzed",
analysed typed BooleanType index "not_analyzed"
)
}.await
import scala.concurrent.Await
import scala.concurrent.duration._
import play.api.libs.json.Json
import lila.db.api._
import lila.game.tube.gameTube
loginfo("[game search] counting games...")
val size = SprayPimpedFuture($count($select.all)).await
val batchSize = 1000
var nb = 0
var nbSkipped = 0
var started = nowMillis
Await.result(
$enumerate.bulk[Option[lila.game.Game]]($query.all, batchSize) { gameOptions =>
val games = gameOptions.flatten filter storable
val nbGames = games.size
(GameRepo filterAnalysed games.map(_.id).toSeq flatMap { analysedIds =>
client execute {
bulk {
games.map { g => store(tempIndexName, g, analysedIds(g.id)) }: _*
}
}
}).void >>- {
nb = nb + nbGames
nbSkipped = nbSkipped + gameOptions.size - nbGames
val perS = (batchSize * 1000) / math.max(1, (nowMillis - started))
started = nowMillis
loginfo("[game search] Indexed %d of %d, skipped %d, at %d/s".format(nb, size, nbSkipped, perS))
}
},
10 hours)
sender ! (())
}
catch {
case e: Exception =>
println(e)
sender ! Status.Failure(e)
}
client.execute { deleteIndex(indexName) }.await
client.execute {
add alias indexName on tempIndexName
}.await
}
private def storable(game: lila.game.Game) =
(game.finished || game.imported) && game.playedTurns > 4
private def store(inIndex: String, game: lila.game.Game, hasAnalyse: Boolean) = {
import Fields._
index into s"$inIndex/$typeName" fields {
List(
status -> game.status.is(_.Timeout).fold(chess.Status.Resign, game.status).id.some,
turns -> math.ceil(game.turns.toFloat / 2).some,
rated -> game.rated.some,
variant -> game.variant.id.some,
uids -> game.userIds.toArray.some.filterNot(_.isEmpty),
winner -> (game.winner flatMap (_.userId)),
averageRating -> game.averageUsersRating,
ai -> game.aiLevel,
date -> (ElasticSearch.Date.formatter print game.createdAt).some,
duration -> game.estimateTotalTime.some,
opening -> (game.opening map (_.code.toLowerCase)),
analysed -> hasAnalyse.some
).collect {
case (key, Some(value)) => key -> value
}: _*
} id game.id
}
}
| systemovich/lila | modules/gameSearch/src/main/Indexer.scala | Scala | mit | 4,554 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.ws.services.teach.attendance.web.util
import org.junit.runner.RunWith
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class EncyptorTest extends FunSpec {
val key = "abcd1234"
describe("Encyptor") {
it("encrypt") {
val str1 = new DesEncryptor(key).encrypt("2013121202")
val str2 = new DesEncryptor(key).encrypt("20140327")
val str3 = new DesEncryptor(key).encrypt("082100")
println("&cardphyid=" + str1 + "&signindate=" + str2 + "&signintime=" + str3)
}
// it("decrypt") {
// val str1 = new DesDecryptor(key).decrypt("80c252b0cd7b88fd8ea5cbeeb55d2029")
// println(str1)
// }
}
} | openurp/edu-attendance-core | attendance/src/test/scala/org/openurp/ws/services/teach/attendance/web/util/EncyptorTest.scala | Scala | gpl-3.0 | 1,439 |
package ersirjs
import org.scalajs.dom.Notification
object Notifications {
def send(str: String): Unit = {
Notification.permission match {
case "granted" => new Notification(str)
case "denied" =>
case other => Notification.requestPermission(_ => send(str))
}
}
}
| guidosalva/REScala | Code/Examples/Ersir/web/src/main/scala/ersirjs/Notifications.scala | Scala | apache-2.0 | 301 |
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api
import com.mohiva.play.silhouette.api.Authenticator.Implicits._
import org.joda.time.DateTime
import scala.concurrent.duration.FiniteDuration
/**
* An authenticator tracks an authenticated user.
*/
trait Authenticator {
/**
* The Type of the generated value an authenticator will be serialized to.
*/
type Value
/**
* The type of the settings an authenticator can handle.
*/
type Settings
/**
* Gets the linked login info for an identity.
*
* @return The linked login info for an identity.
*/
def loginInfo: LoginInfo
/**
* Checks if the authenticator valid.
*
* @return True if the authenticator valid, false otherwise.
*/
def isValid: Boolean
}
/**
* The `Authenticator` companion object.
*/
object Authenticator {
/**
* Some implicits.
*/
object Implicits {
/**
* Defines additional methods on an `DateTime` instance.
*
* @param dateTime The `DateTime` instance on which the additional methods should be defined.
*/
implicit class RichDateTime(dateTime: DateTime) {
/**
* Adds a duration to a date/time.
*
* @param duration The duration to add.
* @return A date/time instance with the added duration.
*/
def +(duration: FiniteDuration) = {
dateTime.plusSeconds(duration.toSeconds.toInt)
}
/**
* Subtracts a duration from a date/time.
*
* @param duration The duration to subtract.
* @return A date/time instance with the subtracted duration.
*/
def -(duration: FiniteDuration) = {
dateTime.minusSeconds(duration.toSeconds.toInt)
}
}
}
}
/**
* An authenticator which can be stored in a backing store.
*/
trait StorableAuthenticator extends Authenticator {
/**
* Gets the ID to reference the authenticator in the backing store.
*
* @return The ID to reference the authenticator in the backing store.
*/
def id: String
}
/**
* An authenticator that may expire.
*/
trait ExpirableAuthenticator extends Authenticator {
/**
* The last used date/time.
*/
val lastUsedDateTime: DateTime
/**
* The expiration date/time.
*/
val expirationDateTime: DateTime
/**
* The duration an authenticator can be idle before it timed out.
*/
val idleTimeout: Option[FiniteDuration]
/**
* Checks if the authenticator isn't expired and isn't timed out.
*
* @return True if the authenticator isn't expired and isn't timed out.
*/
override def isValid = !isExpired && !isTimedOut
/**
* Checks if the authenticator is expired. This is an absolute timeout since the creation of
* the authenticator.
*
* @return True if the authenticator is expired, false otherwise.
*/
def isExpired = expirationDateTime.isBeforeNow
/**
* Checks if the time elapsed since the last time the authenticator was used, is longer than
* the maximum idle timeout specified in the properties.
*
* @return True if sliding window expiration is activated and the authenticator is timed out, false otherwise.
*/
def isTimedOut = idleTimeout.isDefined && (lastUsedDateTime + idleTimeout.get).isBeforeNow
}
| cemcatik/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/Authenticator.scala | Scala | apache-2.0 | 4,106 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.security.filter
import org.geotools.factory.CommonFactoryFinder
import org.geotools.feature.simple.SimpleFeatureImpl
import org.geotools.filter.identity.FeatureIdImpl
import org.junit.runner.RunWith
import org.locationtech.geomesa.security._
import org.locationtech.geomesa.security.filter.VisibilityFilterFunctionTest.TestAuthorizationsProvider
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class VisibilityFilterFunctionTest extends Specification {
import scala.collection.JavaConverters._
val ff2 = CommonFactoryFinder.getFilterFactory2()
val testSft = SimpleFeatureTypes.createType("test", "name:String,*geom:Point:srid=4326")
def featureWithAttribute(vis: String): SimpleFeature = {
val f = new SimpleFeatureImpl(Array.ofDim(2), testSft, new FeatureIdImpl(""), false)
f.setAttribute(0, vis)
f
}
def featureWithUserData(vis: String): SimpleFeature = {
val f = new SimpleFeatureImpl(Array.ofDim(2), testSft, new FeatureIdImpl(""), false)
f.visibility = vis
f
}
def filter(attribute: Option[String] = None): Filter = {
val fn = attribute match {
case None => ff2.function(VisibilityFilterFunction.Name.getFunctionName)
case Some(a) => ff2.function(VisibilityFilterFunction.Name.getFunctionName, ff2.property(a))
}
ff2.equals(fn, ff2.literal(true))
}
def withAuths[T](auths: Seq[String])(fn: => T): T = {
try {
GEOMESA_AUTH_PROVIDER_IMPL.threadLocalValue.set(classOf[TestAuthorizationsProvider].getName)
VisibilityFilterFunctionTest.auths.set(auths.asJava)
fn
} finally {
GEOMESA_AUTH_PROVIDER_IMPL.threadLocalValue.remove()
VisibilityFilterFunctionTest.auths.remove()
}
}
"VisibilityFilter" should {
"work with AND'd vis" in {
withAuths(Seq("ADMIN", "USER")) {
filter().evaluate(featureWithUserData("ADMIN&USER")) must beTrue
filter(Some("name")).evaluate(featureWithAttribute("ADMIN&USER")) must beTrue
}
}
"work with OR'd vis" in {
withAuths(Seq("USER")) {
filter().evaluate(featureWithUserData("ADMIN|USER")) must beTrue
filter(Some("name")).evaluate(featureWithAttribute("ADMIN|USER")) must beTrue
}
}
"evaluate to false with no vis on the feature" in {
withAuths(Seq("ADMIN", "USER")) {
filter().evaluate(featureWithUserData(null)) must beFalse
filter(Some("name")).evaluate(featureWithAttribute(null)) must beFalse
}
}
"evaluate to false when user does not have the right auths" in {
withAuths(Seq("ADMIN")) {
filter().evaluate(featureWithUserData("ADMIN&USER")) must beFalse
filter(Some("name")).evaluate(featureWithAttribute("ADMIN&USER")) must beFalse
}
}
}
}
object VisibilityFilterFunctionTest {
private val auths = new ThreadLocal[java.util.List[String]]
class TestAuthorizationsProvider extends AuthorizationsProvider {
override def configure(params: java.util.Map[String, _ <: java.io.Serializable]): Unit = {}
override def getAuthorizations: java.util.List[String] = auths.get
}
}
| aheyne/geomesa | geomesa-security/src/test/scala/org/locationtech/geomesa/security/filter/VisibilityFilterFunctionTest.scala | Scala | apache-2.0 | 3,799 |
package org.ferrit.core.util
object HttpUtil {
val ContentTypeHeader = "Content-Type"
val TextHtmlUtf8 = "text/html; charset=UTF-8"
val TextCssUtf8 = "text/css; charset=UTF-8"
val TextUtf8 = "text; charset=UTF-8"
}
object Headers {
// "Content-Type: text/html; charset=UTF-8"
val ContentTypeTextHtmlUtf8 = HttpUtil.ContentTypeHeader -> Seq(HttpUtil.TextHtmlUtf8)
val ContentTypeTextCssUtf8 = HttpUtil.ContentTypeHeader -> Seq(HttpUtil.TextCssUtf8)
val ContentTypeTextUtf8 = HttpUtil.ContentTypeHeader -> Seq(HttpUtil.TextUtf8)
} | reggoodwin/ferrit | src/main/scala/org/ferrit/core/util/HttpUtil.scala | Scala | mit | 560 |
//@
package xyz.hyperreal.bvm
import org.scalatest._
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import scala.util.parsing.input.Position
import Testing._
class LanguageSupportTests extends FreeSpec with ScalaCheckPropertyChecks with Matchers {
val constants =
Map(
"write" -> { (_: VM, apos: Position, ps: List[Position], args: Any) =>
val list =
args match {
case a: ArgList => a.array toList
case a => List(a)
}
println(list map (a => deref(a)) mkString ", ")
}
)
"hello world" in {
val program =
SourceAST(
List(
ValAST(
VariableStructureAST(null, "a", "a"),
null,
LiteralExpressionAST(VMString("Hello world!"))
),
ValAST(
VariableStructureAST(null, "b", "b"),
null,
LiteralExpressionAST(VMString("bye bye"))
),
ApplyExpressionAST(
null,
VariableExpressionAST(null, "write", "write"),
null,
List(
(null, VariableExpressionAST(null, "a", "a"))
),
false
),
SetValueExpressionAST(
null,
"a",
"a",
VariableExpressionAST(null, "b", "b")
),
ApplyExpressionAST(
null,
VariableExpressionAST(null, "write", "write"),
null,
List(
(null, VariableExpressionAST(null, "a", "a"))
),
false
)
//, AssignmentExpressionAST( List((null, VariableExpressionAST(null, "a", "a"))), '=, null, List((null, LiteralExpressionAST( "bye bye again" ))) )
))
runCapture(program, constants, Map(), Map()) shouldBe
"""
|Hello world!
|bye bye
""".stripMargin.trim
}
}
| edadma/funl | bvm/src/test/scala/xyz/hyperreal/bvm/LanguageSupportTests.scala | Scala | mit | 1,919 |
package net.slozzer.babel
import _root_.cats.kernel.laws.discipline.EqTests
import _root_.cats.laws.discipline.{SemigroupKTests, TraverseTests}
import munit.DisciplineSuite
import net.slozzer.babel.cats._
import org.scalacheck.Arbitrary
final class NonEmptyTranslationsLawsTest extends DisciplineSuite {
implicit def arbitrary[A: Arbitrary]: Arbitrary[NonEmptyTranslations[A]] =
Arbitrary(Generators.nonEmptyTranslations(Arbitrary.arbitrary[A]))
import Cogenerators.nonEmptyTranslations
checkAll(
"NonEmptyTranslations",
TraverseTests[NonEmptyTranslations].traverse[Int, Double, String, Long, Option, Option]
)
checkAll("NonEmptyTranslations", SemigroupKTests[NonEmptyTranslations].semigroupK[Int])
checkAll("NonEmptyTranslations", EqTests[NonEmptyTranslations[Int]].eqv)
}
| Taig/lokal | modules/tests/shared/src/test/scala/net/slozzer/babel/NonEmptyTranslationsLawsTest.scala | Scala | mit | 802 |
implicit def storeComonad[S] = new Comonad[Store[S, ?]] {
def extract[A](wa: Store[S, A]): A = wa match {
case Store(f, s) => f(s)
}
def duplicate[A](wa: Store[S, A]): Store[S, Store[S, A]] = wa match {
case Store(f, s) => Store(Store(f), s)
}
} | hmemcpy/milewski-ctfp-pdf | src/content/3.9/code/scala/snippet08.scala | Scala | gpl-3.0 | 264 |
package scommons.client.ui.popup
import scommons.client.ui.{Buttons, ButtonsPanel, ButtonsPanelProps}
import scommons.client.util.ActionsData
import scommons.react.test.TestSpec
import scommons.react.test.util.ShallowRendererUtils
class ModalFooterSpec extends TestSpec with ShallowRendererUtils {
it should "render component with correct props" in {
//given
val props = ModalFooterProps(
List(Buttons.OK, Buttons.CANCEL),
ActionsData.empty
)
val component = <(ModalFooter())(^.wrapped := props)()
//when
val result = shallowRender(component)
//then
assertComponent(result, ButtonsPanel) {
case ButtonsPanelProps(buttons, actions, dispatch, group, className) =>
buttons shouldBe props.buttons
actions shouldBe props.actions
dispatch shouldBe props.dispatch
group shouldBe false
className shouldBe Some("modal-footer")
}
}
}
| viktor-podzigun/scommons | ui/src/test/scala/scommons/client/ui/popup/ModalFooterSpec.scala | Scala | apache-2.0 | 927 |
package com.rxbytes.splitpal.ui.main.fragments.events
import android.os.Bundle
import android.support.v7.app.AppCompatActivity
/**
* Created by pnagarjuna on 02/01/16.
*/
class AddEventActivity extends AppCompatActivity {
override def onCreate(savedInstanceState: Bundle): Unit = {
super.onCreate(savedInstanceState)
}
override def onDestroy(): Unit = {
super.onDestroy()
}
}
| pamu/split-pal | src/main/scala/com/rxbytes/splitpal/ui/main/fragments/events/AddEventActivity.scala | Scala | apache-2.0 | 402 |
package geotrellis.op
import geotrellis._
import geotrellis.process._
import geotrellis.raster._
import geotrellis.raster.op.local.AddArray
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AddArrayTest extends FunSuite {
val e = Extent(0.0, 0.0, 10.0, 10.0)
val re = RasterExtent(e, 1.0, 1.0, 10, 10)
val server = TestServer()
def r(n:Int) = Raster(Array.fill(100)(n), re)
def r(n:Double) = Raster(Array.fill(100)(n), re)
def addInts(ns:Int*) = AddArray(ns.map(n => r(n)).toArray.asInstanceOf[Array[Raster]])
def addDoubles(ns:Double*) = AddArray(ns.map(n => r(n)).toArray.asInstanceOf[Array[Raster]])
test("add integers") {
val a = 3
val b = 6
val c = 9
val n = NODATA
assert(server.run(addInts(a, b)) === r(c))
assert(server.run(addInts(n, b)) === r(b))
assert(server.run(addInts(c, n)) === r(c))
assert(server.run(addInts(n, n)) === r(n))
}
test("add doubles") {
val a = 3000000000.0
val b = 6000000000.0
val c = 9000000000.0
val x = a + a + b + b + c
val n = Double.NaN
assert(server.run(addDoubles(a, b)) === r(c))
assert(server.run(addDoubles(n, b)) === r(b))
assert(server.run(addDoubles(c, n)) === r(c))
assert(server.run(addDoubles(n, n)) === r(n))
assert(server.run(addDoubles(a, a, b, b, c)) === r(x))
}
}
| Tjoene/thesis | Case_Programs/geotrellis-0.7.0/src/test/scala/geotrellis/op/AddArrayTest.scala | Scala | gpl-2.0 | 1,411 |
import controllers.GreeterController
import play.api.i18n.Langs
import play.api.mvc.ControllerComponents
import services.ServicesModule
trait GreetingModule extends ServicesModule {
import com.softwaremill.macwire._
lazy val greeterController = wire[GreeterController]
def langs: Langs
def controllerComponents: ControllerComponents
}
| play2-maven-plugin/play2-maven-test-projects | play28/scala/macwire-di-example/app/GreetingModule.scala | Scala | apache-2.0 | 348 |
package lila.mod
import lila.db.BSON.BSONJodaDateTimeHandler
import org.joda.time.DateTime
import reactivemongo.api.collections.bson.BSONBatchCommands.AggregationFramework._
import reactivemongo.bson._
import scala.concurrent.duration._
import lila.db.Implicits._
import lila.memo.AsyncCache
final class Gamify(
logColl: Coll,
reportColl: Coll,
historyColl: Coll) {
import Gamify._
def history(orCompute: Boolean = true): Fu[List[HistoryMonth]] = {
val until = DateTime.now minusMonths 1 withDayOfMonth 1
val lastId = HistoryMonth.makeId(until.getYear, until.getMonthOfYear)
historyColl.find(BSONDocument()).sort(BSONDocument(
"year" -> -1,
"month" -> -1
)).cursor[HistoryMonth]().collect[List]().flatMap { months =>
months.headOption match {
case Some(m) if m._id == lastId || !orCompute => fuccess(months)
case Some(m) => buildHistoryAfter(m.year, m.month, until) >> history(false)
case _ => buildHistoryAfter(2012, 6, until) >> history(false)
}
}
}
private implicit val modMixedBSONHandler = Macros.handler[ModMixed]
private implicit val historyMonthBSONHandler = Macros.handler[HistoryMonth]
private def buildHistoryAfter(afterYear: Int, afterMonth: Int, until: DateTime): Funit =
(afterYear to until.getYear).flatMap { year =>
((year == afterYear).fold(afterMonth + 1, 1) to
(year == until.getYear).fold(until.getMonthOfYear, 12)).map { month =>
mixedLeaderboard(
after = new DateTime(year, month, 1, 0, 0).pp("compute mod history"),
before = new DateTime(year, month, 1, 0, 0).plusMonths(1).some
).map {
_.headOption.map { champ =>
HistoryMonth(HistoryMonth.makeId(year, month), year, month, champ)
}
}
}.toList
}.toList.sequenceFu.map(_.flatten).flatMap {
_.map { month =>
historyColl.update(BSONDocument("_id" -> month._id), month, upsert = true)
}.sequenceFu
}.void
def leaderboards = leaderboardsCache(true)
private val leaderboardsCache = AsyncCache.single[Leaderboards](
f = mixedLeaderboard(DateTime.now minusDays 1, none) zip
mixedLeaderboard(DateTime.now minusWeeks 1, none) zip
mixedLeaderboard(DateTime.now minusMonths 1, none) map {
case ((daily, weekly), monthly) => Leaderboards(daily, weekly, monthly)
},
timeToLive = 10 seconds)
private def mixedLeaderboard(after: DateTime, before: Option[DateTime]): Fu[List[ModMixed]] =
actionLeaderboard(after, before) zip reportLeaderboard(after, before) map {
case (actions, reports) => actions.map(_.modId) intersect reports.map(_.modId) map { modId =>
ModMixed(modId,
action = actions.find(_.modId == modId) ?? (_.count),
report = reports.find(_.modId == modId) ?? (_.count))
} sortBy (-_.score)
}
private def dateRange(from: DateTime, toOption: Option[DateTime]) =
BSONDocument("$gte" -> from) ++ toOption.?? { to => BSONDocument("$lt" -> to) }
private val notLichess = BSONDocument("$ne" -> "lichess")
private def actionLeaderboard(after: DateTime, before: Option[DateTime]): Fu[List[ModCount]] =
logColl.aggregate(Match(BSONDocument(
"date" -> dateRange(after, before),
"mod" -> notLichess
)), List(
GroupField("mod")("nb" -> SumValue(1)),
Sort(Descending("nb")))).map {
_.documents.flatMap { obj =>
obj.getAs[String]("_id") |@| obj.getAs[Int]("nb") apply ModCount.apply
}
}
private def reportLeaderboard(after: DateTime, before: Option[DateTime]): Fu[List[ModCount]] =
reportColl.aggregate(
Match(BSONDocument(
"createdAt" -> dateRange(after, before),
"processedBy" -> notLichess
)), List(
GroupField("processedBy")("nb" -> SumValue(1)),
Sort(Descending("nb")))).map {
_.documents.flatMap { obj =>
obj.getAs[String]("_id") |@| obj.getAs[Int]("nb") apply ModCount.apply
}
}
}
object Gamify {
case class HistoryMonth(_id: String, year: Int, month: Int, champion: ModMixed) {
def date = new DateTime(year, month, 1, 0, 0)
}
object HistoryMonth {
def makeId(year: Int, month: Int) = s"$year/$month"
}
sealed trait Period {
def name = toString.toLowerCase
}
object Period {
case object Day extends Period
case object Week extends Period
case object Month extends Period
def apply(p: String) = List(Day, Week, Month).find(_.name == p)
}
case class Leaderboards(daily: List[ModMixed], weekly: List[ModMixed], monthly: List[ModMixed]) {
def apply(period: Period) = period match {
case Period.Day => daily
case Period.Week => weekly
case Period.Month => monthly
}
}
case class ModCount(modId: String, count: Int)
case class ModMixed(modId: String, action: Int, report: Int) {
def score = action + report
}
}
| JimmyMow/lila | modules/mod/src/main/Gamify.scala | Scala | mit | 5,026 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.basic
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.rest.WskRest
/**
* Tests sequence execution
*/
@RunWith(classOf[JUnitRunner])
class WskRestSequenceTests extends WskSequenceTests {
override val wsk: common.rest.WskRest = new WskRest
}
| paulcastro/openwhisk | tests/src/test/scala/system/basic/WskRestSequenceTests.scala | Scala | apache-2.0 | 1,103 |
package de.tototec.sbuild.eclipse.plugin.container
import org.eclipse.core.runtime.NullProgressMonitor
import org.eclipse.jdt.core.IClasspathEntry
import org.eclipse.jdt.core.IJavaProject
import org.eclipse.jdt.ui.wizards.IClasspathContainerPage
import org.eclipse.jdt.ui.wizards.IClasspathContainerPageExtension
import org.eclipse.jface.viewers.ArrayContentProvider
import org.eclipse.jface.viewers.CellEditor
import org.eclipse.jface.viewers.ColumnLabelProvider
import org.eclipse.jface.viewers.ComboBoxViewerCellEditor
import org.eclipse.jface.viewers.EditingSupport
import org.eclipse.jface.viewers.ISelectionChangedListener
import org.eclipse.jface.viewers.IStructuredSelection
import org.eclipse.jface.viewers.SelectionChangedEvent
import org.eclipse.jface.viewers.TableViewerColumn
import org.eclipse.jface.viewers.TextCellEditor
import org.eclipse.jface.wizard.WizardPage
import org.eclipse.swt.SWT
import org.eclipse.swt.events.ModifyEvent
import org.eclipse.swt.events.ModifyListener
import org.eclipse.swt.events.SelectionAdapter
import org.eclipse.swt.events.SelectionEvent
import org.eclipse.swt.layout.GridData
import org.eclipse.swt.widgets.Composite
import de.tototec.sbuild.eclipse.plugin.Logger.debug
import de.tototec.sbuild.eclipse.plugin.Settings
import de.tototec.sbuild.eclipse.plugin.preferences.SBuildPreferences
import de.tototec.sbuild.eclipse.plugin.preferences.WorkspaceProjectAliases
import org.eclipse.jface.viewers.ColumnViewerEditorActivationEvent
import org.eclipse.jface.viewers.ColumnViewerEditorActivationStrategy
class SBuildClasspathContainerPage extends WizardPage("SBuild Libraries") with IClasspathContainerPage with IClasspathContainerPageExtension {
object AliasEntry {
def apply(key: String, value: String, regex: Boolean) = new AliasEntry(key, value, regex)
def unapply(e: AliasEntry): Option[(String, String, Boolean)] = Some(e.key, e.value, e.regex)
}
class AliasEntry(var key: String, var value: String, var regex: Boolean)
val containerPath = SBuildClasspathContainer.ContainerName
private var project: IJavaProject = _
private var options: Map[String, String] = Map()
private val settings: Settings = new Settings
setDescription("Configure SBuild Libraries")
setPageComplete(true)
var aliasModel: Seq[AliasEntry] = Seq()
override def initialize(project: IJavaProject, currentEntries: Array[IClasspathEntry]) = {
this.project = project
debug("Read workspace project aliases into " + getClass())
aliasModel =
WorkspaceProjectAliases.read(project, SBuildPreferences.Node.WorkspaceProjectAlias).toSeq.map {
case (key, value) => new AliasEntry(key, value, false)
} ++
WorkspaceProjectAliases.read(project, SBuildPreferences.Node.WorkspaceProjectRegexAlias).toSeq.map {
case (key, value) => new AliasEntry(key, value, true)
}
}
override def setSelection(classpathEntry: IClasspathEntry) = settings.fromIClasspathEntry(classpathEntry)
override def getSelection(): IClasspathEntry = settings.toIClasspathEntry
override def finish(): Boolean = {
debug("Write workspace project aliases from " + getClass())
val (regex, nonRegex) = aliasModel.partition(_.regex)
WorkspaceProjectAliases.write(project, SBuildPreferences.Node.WorkspaceProjectRegexAlias, regex.map { case AliasEntry(key, value, true) => (key, value) }.toMap)
WorkspaceProjectAliases.write(project, SBuildPreferences.Node.WorkspaceProjectAlias, nonRegex.map { case AliasEntry(key, value, false) => (key, value) }.toMap)
// update the classpath container to reflect changes
SBuildClasspathContainer.getSBuildClasspathContainers(project).map(c => c.updateClasspath(new NullProgressMonitor()))
true
}
override def createControl(parent: Composite) {
val composite = new PageComposite(parent, SWT.NONE)
composite.setLayoutData(new GridData(SWT.BEGINNING | SWT.TOP));
val sbuildFile = composite.sbuildFileText
sbuildFile.addModifyListener(new ModifyListener {
override def modifyText(e: ModifyEvent) {
settings.sbuildFile = sbuildFile.getText
}
})
sbuildFile.setText(settings.sbuildFile)
val exportedClasspath = composite.exportedClasspathText
exportedClasspath.addModifyListener(new ModifyListener {
override def modifyText(e: ModifyEvent) {
settings.exportedClasspath = exportedClasspath.getText
}
})
exportedClasspath.setText(settings.exportedClasspath)
val updateDependenciesButton = composite.updateDependenciesButton
updateDependenciesButton.setSelection(settings.relaxedFetchOfDependencies)
updateDependenciesButton.addSelectionListener(new SelectionAdapter() {
override def widgetSelected(event: SelectionEvent) =
settings.relaxedFetchOfDependencies = updateDependenciesButton.getSelection()
})
val resolveSourcesButton = composite.resolveSourcesButton
resolveSourcesButton.setSelection(settings.resolveSources)
resolveSourcesButton.addSelectionListener(new SelectionAdapter() {
override def widgetSelected(event: SelectionEvent) =
settings.resolveSources = resolveSourcesButton.getSelection()
})
val resolveJavadocButton = composite.resolveJavadocButton
resolveJavadocButton.setSelection(settings.resolveJavadoc)
resolveJavadocButton.addSelectionListener(new SelectionAdapter() {
override def widgetSelected(event: SelectionEvent) =
settings.resolveJavadoc = resolveJavadocButton.getSelection()
})
val workspaceProjectAliases = composite.workspaceProjectAliasTable
// new ColumnViewerEditorActivationStrategy(workspaceProjectAliases) {
// override protected def isEditorActivationEvent(ev: ColumnViewerEditorActivationEvent): Boolean =
// ev.eventType match {
// case ColumnViewerEditorActivationEvent.TRAVERSAL |
// ColumnViewerEditorActivationEvent.MOUSE_CLICK_SELECTION |
// ColumnViewerEditorActivationEvent.MOUSE_DOUBLE_CLICK_SELECTION |
// ColumnViewerEditorActivationEvent.PROGRAMMATIC => true
// case ColumnViewerEditorActivationEvent.KEY_PRESSED => ev.keyCode == SWT.CR
// case _ => false
// }
// }
val col1 = new TableViewerColumn(workspaceProjectAliases, SWT.LEFT)
col1.getColumn.setText("Dependency")
col1.getColumn.setWidth(200)
col1.setLabelProvider(new ColumnLabelProvider() {
override def getText(element: Object) = element match {
case AliasEntry(key, _, _) => key
case _ => ""
}
})
val col1EditingSupport = new EditingSupport(workspaceProjectAliases) {
override def canEdit(o: Object): Boolean = o.isInstanceOf[AliasEntry]
override def getCellEditor(o: Object): CellEditor = new TextCellEditor(workspaceProjectAliases.getTable)
override def getValue(o: Object) = o match {
case AliasEntry(key, _, _) => key
case _ => ""
}
override def setValue(o: Object, newVal: Object) = o match {
case aliasEntry: AliasEntry if newVal.isInstanceOf[String] =>
aliasEntry.key = newVal.asInstanceOf[String]
workspaceProjectAliases.update(o, null)
case _ =>
}
}
col1.setEditingSupport(col1EditingSupport)
val col2 = new TableViewerColumn(workspaceProjectAliases, SWT.LEFT)
col2.getColumn.setText("Workspace Project")
col2.getColumn.setWidth(200)
col2.setLabelProvider(new ColumnLabelProvider() {
override def getText(element: Object) = element match {
case AliasEntry(_, value, _) => value
case _ => ""
}
})
val col2EditingSupport = new EditingSupport(workspaceProjectAliases) {
override def canEdit(o: Object): Boolean = o.isInstanceOf[AliasEntry]
override def getCellEditor(o: Object): CellEditor = new TextCellEditor(workspaceProjectAliases.getTable)
override def getValue(o: Object) = o match {
case AliasEntry(_, value, _) => value
case _ => ""
}
override def setValue(o: Object, newVal: Object) = o match {
case aliasEntry: AliasEntry if newVal.isInstanceOf[String] =>
aliasEntry.value = newVal.asInstanceOf[String]
workspaceProjectAliases.update(o, null)
case _ =>
}
}
col2.setEditingSupport(col2EditingSupport)
val col3 = new TableViewerColumn(workspaceProjectAliases, SWT.CENTER)
col3.getColumn.setText("Regex")
col3.getColumn.setWidth(20)
col3.setLabelProvider(new ColumnLabelProvider() {
override def getText(o: Object) = o match {
case AliasEntry(_, _, true) => "yes"
case _ => "no"
}
})
val col3EditingSupport = new EditingSupport(workspaceProjectAliases) {
override def canEdit(o: Object): Boolean = o.isInstanceOf[AliasEntry]
override def getCellEditor(o: Object): CellEditor = {
val combo = new ComboBoxViewerCellEditor(workspaceProjectAliases.getTable)
combo.setContenProvider(new ArrayContentProvider())
combo.setLabelProvider(new ColumnLabelProvider() {
override def getText(element: Any) = element match {
case java.lang.Boolean.TRUE => "yes"
case _ => "no"
}
})
combo.setInput(Array(java.lang.Boolean.FALSE, java.lang.Boolean.TRUE))
combo
}
override def getValue(o: Object): Object = o match {
case AliasEntry(_, _, regex) if regex => java.lang.Boolean.TRUE
case _ => java.lang.Boolean.FALSE
}
override def setValue(o: Object, newVal: Object) = o match {
case aliasEntry: AliasEntry =>
newVal match {
case regex: java.lang.Boolean => aliasEntry.regex = regex
case _ => aliasEntry.regex = false
}
workspaceProjectAliases.update(o, null)
case _ =>
}
}
col3.setEditingSupport(col3EditingSupport)
val delButton = composite.removeAliasButton
delButton.setEnabled(false)
workspaceProjectAliases.addSelectionChangedListener(new ISelectionChangedListener() {
override def selectionChanged(event: SelectionChangedEvent) {
val delEnabled = event.getSelection match {
case sel: IStructuredSelection if !sel.isEmpty => true
case _ => false
}
delButton.setEnabled(delEnabled)
}
})
workspaceProjectAliases.setContentProvider(new ArrayContentProvider())
workspaceProjectAliases.setInput(aliasModel.toArray)
composite.addAliasButton.addSelectionListener(new SelectionAdapter() {
override def widgetSelected(event: SelectionEvent) {
aliasModel ++= Seq(AliasEntry("", "", false))
workspaceProjectAliases.setInput(aliasModel.toArray)
}
})
delButton.addSelectionListener(new SelectionAdapter() {
override def widgetSelected(event: SelectionEvent) {
workspaceProjectAliases.getSelection match {
case sel: IStructuredSelection if !sel.isEmpty =>
sel.getFirstElement match {
case entry: AliasEntry =>
aliasModel = aliasModel.filter(_ != entry)
workspaceProjectAliases.setInput(aliasModel.toArray)
}
case _ =>
}
}
})
setControl(composite)
}
} | SBuild-org/sbuild-eclipse-plugin | de.tototec.sbuild.eclipse.plugin/src/main/scala/de/tototec/sbuild/eclipse/plugin/container/SBuildClasspathContainerPage.scala | Scala | apache-2.0 | 11,332 |
package spinoco.protocol.kafka
import java.util.Date
import shapeless.tag.@@
import scala.concurrent.duration.FiniteDuration
/**
* Response for Kafka Broker
*/
trait Response
object Response {
/**
* Response to [[spinoco.protocol.kafka.Request.MetadataRequest]]
* @param brokers All brokers known
* @param topics All topics known
*/
case class MetadataResponse(
brokers: Vector[Broker]
, topics: Vector[TopicMetadata]
) extends Response
/**
* Response to [[spinoco.protocol.kafka.Request.ProduceRequest]]
* Contains map per topic and partition.
*
* @param data Contains result of each produce response. Not guaranteed to be in same order as request.
* @param throttleTime If the request was throttled, this contains time how long it was throttled (since kafka 0.9.0)
*/
case class ProduceResponse(
data: Vector[(String @@ TopicName, Vector[(Int @@ PartitionId, PartitionProduceResult)])]
, throttleTime:Option[FiniteDuration]
) extends Response
/**
* Contains result of the produce for single partition
*
* @param error If nonempty, produce failed
* @param offset Contains offset of first published message
* @param time If LogAppendTime is used for the topic, this is the timestamp assigned by the broker to the message set.
* All the messages in the message set have the same timestamp. If CreateTime is used, this field is always None.
* The producer can assume the timestamp of the messages in the produce request has been accepted by the broker if there is no error code returned.
* Unit is milliseconds since beginning of the epoch (midnight Jan 1, 1970 (UTC)).
* Available since kafka 0.10.0
*/
case class PartitionProduceResult(
error: Option[ErrorType.Value]
, offset:Long @@ Offset
, time:Option[Date]
)
/**
* Response to [[spinoco.protocol.kafka.Request.FetchRequest]]
*
*
* @param data Contains data of messages fetched
* @param throttleTime If the request was throttled, this contains time how long it was throttled (since kafka 0.9.0)
*/
case class FetchResponse(
data:Vector[(String @@ TopicName, Vector[PartitionFetchResult])]
, throttleTime:Option[FiniteDuration]
) extends Response
/**
* Contains fetch result for given partition
*
* @param partitionId Id of partition
* @param error If nonempty, fetch resulted in error
* @param highWMOffset The offset at the end of the log for this partition. This can be used by the client to determine how many messages behind the end of the log they are.
* @param messages Messages fetched.
*/
case class PartitionFetchResult(
partitionId: Int @@ PartitionId
, error: Option[ErrorType.Value]
, highWMOffset: Long @@ Offset
, messages:Vector[Message]
)
/**
* Response to the offset query. Response for topic queries by client
* @param data Data containing reposne to offset query
*/
case class OffsetResponse(
data: Vector[(String @@ TopicName, Vector[PartitionOffsetResponse])]
) extends Response
/**
* Reposne data for Offset query for an partition
* @param partitionId Id of partition
* @param error If nonempty, reposne failed
* @param timestamp If query contained a timestamp, this will indicate offset for given timestamp. 0 in case of protocolV9
* @param offsets Offsets of chunks for given partition
*/
case class PartitionOffsetResponse(
partitionId: Int @@ PartitionId
, error: Option[ErrorType.Value]
, timestamp: Date
, offsets: Vector[Long @@ Offset]
)
}
| Spinoco/protocol | kafka/src/main/scala/spinoco/protocol/kafka/Response.scala | Scala | mit | 3,773 |
package com.sksamuel.elastic4s.searches.aggs
import com.sksamuel.elastic4s.ScriptBuilder
import com.sksamuel.elastic4s.searches.aggs.pipeline.PipelineAggregationBuilderFn
import org.elasticsearch.search.aggregations.AggregationBuilders
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder
import scala.collection.JavaConverters._
object GeoDistanceAggregationBuilder {
def apply(agg: GeoDistanceAggregationDefinition): GeoDistanceAggregationBuilder = {
val builder = AggregationBuilders.geoDistance(agg.name, agg.origin)
agg.field.foreach(builder.field)
agg.missing.foreach(builder.missing)
agg.format.foreach(builder.format)
agg.keyed.foreach(builder.keyed)
agg.distanceType.foreach(builder.distanceType)
agg.unit.foreach(builder.unit)
agg.script.map(ScriptBuilder.apply).foreach(builder.script)
agg.ranges.foreach {
case (Some(key), from, to) => builder.addRange(key, from, to)
case (None, from, to) => builder.addRange(from, to)
}
agg.unboundedFrom.foreach {
case (Some(key), from) => builder.addUnboundedFrom(key, from)
case (None, from) => builder.addUnboundedFrom(from)
}
agg.unboundedTo.foreach {
case (Some(key), to) => builder.addUnboundedTo(key, to)
case (None, to) => builder.addUnboundedTo(to)
}
agg.subaggs.map(AggregationBuilder.apply).foreach(builder.subAggregation)
agg.pipelines.map(PipelineAggregationBuilderFn.apply).foreach(builder.subAggregation)
if (agg.metadata.nonEmpty) builder.setMetaData(agg.metadata.asJava)
builder
}
}
| tyth/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/aggs/GeoDistanceAggregationBuilder.scala | Scala | apache-2.0 | 1,617 |
package org.scalacoin.protocol.rpc.bitcoincore.networking
/**
* Created by Tom on 1/5/2016.
*/
trait NetworkInfo {
def version : Int
def subVersion : String
def protocolVersion: Int
def localServices : String
def timeOffSet : Int
def connections : Int
def networks : Seq[NetworkConnections]
def relayFee : Double
def localAddresses : Seq[Int]
}
//TODO: Change localaddresses from array[string] to array of connections. see https://bitcoin.org/en/developer-reference#getnetworkinfo
case class NetworkInfoImpl(version : Int, subVersion : String, protocolVersion : Int, localServices : String,
timeOffSet : Int, connections : Int, networks : Seq[NetworkConnections], relayFee : Double, localAddresses: Seq[Int]) extends NetworkInfo
| TomMcCabe/scalacoin | src/main/scala/org/scalacoin/protocol/rpc/bitcoincore/networking/NetworkInfo.scala | Scala | mit | 754 |
package sampler.abc.actor.root.state.task
import akka.actor.ActorRef
import sampler.abc.actor.root.state.task.egen.EvolvingGeneration
import sampler.abc.{ABCConfig, Population}
trait Task[P] {
val config: ABCConfig
val client: ActorRef
def updateEvolvingGeneration(eGen: EvolvingGeneration[P]) =
RunningTask(config, client, eGen)
def shouldFlush: Boolean
def shouldTerminate: Boolean
}
case class ResumingTask[P](
config: ABCConfig,
client: ActorRef,
initialPopulation: Population[P]
) extends Task[P] {
def shouldFlush = false
def shouldTerminate = false
}
case class RunningTask[P](
config: ABCConfig,
client: ActorRef,
evolvingGeneration: EvolvingGeneration[P]
) extends Task[P] {
def shouldFlush ={
val achievedMinParticles = evolvingGeneration.weighed.size >= config.numParticles
val achievedMinLocallyGenParticles ={
val required = config.minNumLocalParticles
val actual = evolvingGeneration.weighed.seq.count(_.wasLocallyGenerated)
actual >= required
}
achievedMinParticles && achievedMinLocallyGenParticles
}
def shouldTerminate = {
evolvingGeneration.previousGen.iteration >= config.numGenerations - 1 &&
config.terminateAtTargetGen
}
//TODO empty weighing buffer?
}
| tearne/Sampler | sampler-abc/src/main/scala/sampler/abc/actor/root/state/task/Task.scala | Scala | apache-2.0 | 1,287 |
package au.com.dius.pact.consumer
object PactConsumerConfig {
val config = scala.collection.mutable.Map("pactRootDir" -> "target/pacts")
def pactRootDir = System.getProperty("pact.rootDir", config("pactRootDir"))
}
| caoquendo/pact-jvm | pact-jvm-consumer/src/main/scala/au/com/dius/pact/consumer/PactConsumerConfig.scala | Scala | apache-2.0 | 222 |
/* *\\
** \\ \\ / _) \\ \\ / \\ | **
** \\ \\ / | __ \\ _ \\ __| \\ \\ / |\\/ | **
** \\ \\ / | | | __/ | \\ \\ / | | **
** \\_/ _| .__/ \\___| _| \\_/ _| _| **
** _| **
** **
** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **
** **
** http://www.vipervm.org **
** GPLv3 **
\\* */
package org.vipervm.bindings.opencl
import org.vipervm.bindings.NativeSize
import com.sun.jna._
import java.nio.ByteBuffer
object Wrapper {
Native.register("OpenCL")
/* Platform API */
@native def clGetPlatformIDs(numEntries:Int, platforms:Pointer, numPlatforms:Pointer): Int
@native def clGetPlatformInfo(platform:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Device API */
@native def clGetDeviceIDs(platform:Pointer, deviceType:Int, numEntries:Int, devices:Pointer, numDevices:Pointer): Int
@native def clGetDeviceInfo(device:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Context API */
@native def clCreateContext(properties:Pointer, numDevices:Int, devices:Pointer, pfnNotify:Pointer, userData:Pointer, errcodeRet:Pointer): Pointer
@native def clCreateContextFromType(propeties:Pointer, deviceType:Int, pfnNotify:Pointer, userData:Pointer, errcodeRet:Pointer): Pointer
@native def clRetainContext(context:Pointer): Int
@native def clReleaseContext(context:Pointer): Int
@native def clGetContextInfo(context:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Command Queue APIs */
@native def clCreateCommandQueue(context:Pointer, device:Pointer, properties:Long, errcodeRet:Pointer): Pointer
@native def clRetainCommandQueue(commandQueue:Pointer): Int
@native def clReleaseCommandQueue(commandQueue:Pointer): Int
@native def clGetCommandQueueInfo(commandQueue:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clSetCommandQueueProperty(commandQueue:Pointer, properties:Long, enable:Int, oldProperties:Pointer): Int
/* Memory Object APIs */
//@native def clCreateBuffer(context:Pointer, flags:Long, size:NativeSize, hostPtr:Pointer, errcodeRet:Pointer): Pointer
@native def clCreateBuffer(context:Pointer, flags:Long, size:NativeSize, hostPtr:ByteBuffer, errcodeRet:Pointer): Pointer
@native def clRetainMemObject(memobj:Pointer): Int
@native def clReleaseMemObject(memobj:Pointer): Int
@native def clGetMemObjectInfo(memobj:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Program Object APIs */
@native def clCreateProgramWithSource(context:Pointer, count:Int, strings:StringArray, lengths:Pointer, errcodeRet:Pointer): Pointer
@native def clRetainProgram(program:Pointer): Int
@native def clReleaseProgram(program:Pointer): Int
@native def clBuildProgram(program:Pointer, numDevices:Int, deviceList:Pointer, options:String, pfnNotify:Pointer, userData:Pointer): Int
@native def clGetProgramInfo(program:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clUnloadCompiler():Int
@native def clGetProgramBuildInfo(program:Pointer, device:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Kernel Object APIs */
@native def clCreateKernel(program:Pointer, kernelName:String, errcodeRet:Pointer): Pointer
@native def clRetainKernel(kernel:Pointer): Int
@native def clReleaseKernel(kernel:Pointer): Int
@native def clGetKernelInfo(kernel:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clGetKernelWorkGroupInfo(kernel:Pointer, device:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clCreateKernelsInProgram(program:Pointer, numKernels:Int, kernels:Pointer, numkernelsRet:Pointer): Int
@native def clSetKernelArg(kernel:Pointer, argIndex:Int, argSize:NativeSize, argValue:Pointer): Int
/* Event Object APIs */
@native def clRetainEvent(event:Pointer): Int
@native def clReleaseEvent(event:Pointer): Int
@native def clGetEventInfo(event:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clWaitForEvents(numEvents:Int, eventList:Pointer): Int
/* Profiling APIs */
@native def clGetEventProfilingInfo(event:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Flush and Finish APIs */
@native def clFlush(commandQueue:Pointer): Int
@native def clFinish(commandQueue:Pointer): Int
/* Enqueued Commands APIs */
@native def clEnqueueReadBuffer(commandQueue:Pointer, buffer:Pointer, blockingRead:Int, offset:NativeSize, cb:NativeSize, ptr:Pointer, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueWriteBuffer(commandQueue:Pointer, buffer:Pointer, blockingWrite:Int, offset:NativeSize, cb:NativeSize, ptr:Pointer, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueCopyBuffer(commandQueue:Pointer, srcBuffer:Pointer, dstBuffer:Pointer, srcOffset:NativeSize, dstOffset:NativeSize, cb:NativeSize, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueMapBuffer(commandQueue:Pointer, buffer:Pointer, blockingMap:Int, mapFlags:Long, offset:NativeSize, cb:NativeSize, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer, errcodeRet:Pointer): Pointer
@native def clEnqueueUnmapMemObject(commandQueue:Pointer, memobj:Pointer, mappedPtr:Pointer, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueNDRangeKernel(commandQueue:Pointer, kernel:Pointer, workDim:Int, globalWorkOffset:Pointer, globalWorkSize:Pointer, localWorkSize:Pointer, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueWaitForEvents(commandQueue:Pointer, numEvents:Int, eventList:Pointer): Int
@native def clEnqueueTask(commandQueue:Pointer, kernel:Pointer, numEventsInWaitList:Int, eventWaitList:Pointer, event:Pointer): Int
@native def clEnqueueMarker(commandQueue:Pointer, event:Pointer): Int
@native def clEnqueueBarrier(commandQueue:Pointer): Int
/* Image APIs */
@native def clGetImageInfo(image:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
/* Sampler APIs */
@native def clCreateSampler(context:Pointer, normalizedCoords:Int, addressingMode:Int, filterMode:Int, errcodeRet:Pointer): Pointer
@native def clGetSamplerInfo(sampler:Pointer, paramName:Int, paramValueSize:NativeSize, paramValue:Pointer, paramValueSizeRet:Pointer): Int
@native def clRetainSampler(sampler:Pointer): Int
@native def clReleaseSampler(sampler:Pointer): Int
implicit def int2nativesize(i:Int): NativeSize = new NativeSize(i)
implicit def long2nativesize(i:Long): NativeSize = new NativeSize(i)
implicit def nativesize2long(i:NativeSize): Long = i.longValue()
implicit def arraypointer2pointer(a:Seq[Pointer]): Pointer = {
if (a == null) {
null
}
else {
val m = new Memory(a.length * Pointer.SIZE)
for ((e,i) <- a.zipWithIndex)
m.setPointer(Pointer.SIZE*i, e)
m
}
}
implicit def arraynativesize2pointer(a:Seq[NativeSize]): Pointer = {
if (a == null) {
null
}
else {
val m = new Memory(a.length * NativeSize.SIZE)
for ((e,i) <- a.zipWithIndex)
m.setLong(NativeSize.SIZE*i, e.value)
m
}
}
implicit def stringarray2stringarray(a:Array[String]): StringArray =
if (a == null) null else new StringArray(a)
}
/*
clCreateProgramWithBinary(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const size_t * /* lengths */,
const unsigned char ** /* binaries */,
cl_int * /* binary_status */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Enqueued Commands APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReadImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_read */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* row_pitch */,
size_t /* slice_pitch */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueWriteImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_write */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* input_row_pitch */,
size_t /* input_slice_pitch */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyImage(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_image */,
const size_t * /* src_origin[3] */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_buffer */,
const size_t * /* src_origin[3] */,
const size_t * /* region[3] */,
size_t /* dst_offset */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyBufferToImage(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_image */,
size_t /* src_offset */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY void * CL_API_CALL
clEnqueueMapImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t * /* image_row_pitch */,
size_t * /* image_slice_pitch */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueNativeKernel(cl_command_queue /* command_queue */,
void (*user_func)(void *),
void * /* args */,
size_t /* cb_args */,
cl_uint /* num_mem_objects */,
const cl_mem * /* mem_list */,
const void ** /* args_mem_loc */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateImage2D(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
size_t /* image_width */,
size_t /* image_height */,
size_t /* image_row_pitch */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateImage3D(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
size_t /* image_width */,
size_t /* image_height */,
size_t /* image_depth */,
size_t /* image_row_pitch */,
size_t /* image_slice_pitch */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedImageFormats(cl_context /* context */,
cl_mem_flags /* flags */,
cl_mem_object_type /* image_type */,
cl_uint /* num_entries */,
cl_image_format * /* image_formats */,
cl_uint * /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0;
/* Extension function access
*
* Returns the extension function address for the given function name,
* or NULL if a valid function can not be found. The client must
* check to make sure the address is not NULL, before using or
* calling the returned function address.
*/
extern CL_API_ENTRY void * CL_API_CALL clGetExtensionFunctionAddress(const char * /* func_name */) CL_API_SUFFIX__VERSION_1_0;
*/
| hsyl20/Scala_ViperVM | src/main/scala/org/vipervm/bindings/opencl/Wrapper.scala | Scala | gpl-3.0 | 15,367 |
/** *****************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ******************************************************************************/
package hydrograph.engine.spark.components
import hydrograph.engine.core.component.entity.FilterEntity
import hydrograph.engine.expression.userfunctions.FilterForExpression
import hydrograph.engine.spark.components.base.OperationComponentBase
import hydrograph.engine.spark.components.handler.OperationHelper
import hydrograph.engine.spark.components.platform.BaseComponentParams
import hydrograph.engine.transformation.userfunctions.base.FilterBase
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Row}
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
/**
* The Class FilterComponent.
*
* @author Bitwise
*
*/
class FilterComponent(filterEntity: FilterEntity, componentsParams: BaseComponentParams) extends
OperationComponentBase with OperationHelper[FilterBase] with Serializable {
val LOG = LoggerFactory.getLogger(classOf[FilterComponent])
override def createComponent(): Map[String, DataFrame] = {
LOG.info("Filter Component Called with input Schema [in the form of(Column_name,DataType,IsNullable)]: {}", componentsParams.getDataFrame().schema)
val inputSchema: StructType = componentsParams.getDataFrame().schema
val outputSchema = inputSchema
var map: Map[String, DataFrame] = Map()
val filterSparkOperations = initializeOperationList[FilterForExpression](filterEntity.getOperationsList,
inputSchema, outputSchema).head
val filterClass = filterSparkOperations.baseClassInstance
filterClass match {
case expression: FilterForExpression => expression.setValidationAPI(filterSparkOperations.validatioinAPI)
try{
expression.callPrepare(filterSparkOperations.fieldName,filterSparkOperations.fieldType)
}catch {
case e: Exception =>
LOG.error("Exception in callPrepare method of: " + expression.getClass + " and message is " + e.getMessage, e)
throw new InitializationException("Exception in initialization of: " + expression.getClass + " and message is " + e.getMessage, e)
}
case _ =>
}
val opProps = filterSparkOperations.operationEntity.getOperationProperties
LOG.info("Operation Properties: " + opProps)
if(opProps!=null) FilterBase.properties.putAll(opProps)
filterEntity.getOutSocketList.asScala.foreach { outSocket =>
LOG.info("Creating filter Component for '" + filterEntity.getComponentId + "' for socket: '"
+ outSocket.getSocketId + "' of type: '" + outSocket.getSocketType + "'")
val isFilter = (row: Row) => filterClass.isRemove(filterSparkOperations.inputRow.setRow(row))
if (outSocket.getSocketType.equalsIgnoreCase("out")) {
val outDF = componentsParams.getDataFrame().filter(row => !isFilter(row))
map += (outSocket.getSocketId -> outDF)
}
else {
val unusedDF = componentsParams.getDataFrame().filter(row => isFilter(row))
map += (outSocket.getSocketId -> unusedDF)
}
}
map
}
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/FilterComponent.scala | Scala | apache-2.0 | 3,789 |
package org.mbari.odss.services.timeline
import com.mongodb.casbah.Imports._
import java.io.File
/*
in ODSS
{
"__v" : 0,
"_id" : ObjectId("52203575a0b9d4224800001f"),
"abbreviation" : "RC",
"color" : "#FF6600",
"name" : "R_CARSON",
"trackingDBID" : 1,
"typeName" : "ship"
}
*/
case class Platform(id: Option[String],
name: String,
abbreviation: Option[String],
typeName: Option[String],
color: Option[String]
)
case class Token(id: Option[String],
platform_id: String,
start: String,
end: String,
state: String,
description: String)
case class Version(name: String,
description: String,
created: String,
updated: String
//TODO author: ... link to authors collection
)
case class Period(id: Option[String],
name: String,
start: String,
end: String)
class App(
val configFile: File,
val platformColl: MongoCollection,
val tokenColl: MongoCollection,
val periodColl: MongoCollection)
| carueda/odssplatim-rest | src/main/scala/org/mbari/odss/services/timeline/defs.scala | Scala | apache-2.0 | 1,372 |
package io.sphere.json
import com.fasterxml.jackson.databind.DeserializationFeature.{
USE_BIG_DECIMAL_FOR_FLOATS,
USE_BIG_INTEGER_FOR_INTS
}
import com.fasterxml.jackson.databind.ObjectMapper
import org.json4s.jackson.{Json4sScalaModule, JsonMethods}
// extends the default JsonMethods to configure a different default jackson parser
private object SphereJsonParser extends JsonMethods {
override val mapper: ObjectMapper = {
val m = new ObjectMapper()
m.registerModule(new Json4sScalaModule)
m.configure(USE_BIG_INTEGER_FOR_INTS, false)
m.configure(USE_BIG_DECIMAL_FOR_FLOATS, false)
m
}
}
| sphereio/sphere-scala-libs | json/json-core/src/main/scala/io/sphere/json/SphereJsonParser.scala | Scala | apache-2.0 | 621 |
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.emailaddress
trait ObfuscatedEmailAddress {
val value: String
override def toString: String = value
}
object ObfuscatedEmailAddress {
final private val shortMailbox = "(.{1,2})".r
final private val longMailbox = "(.)(.*)(.)".r
import EmailAddress.validEmail
implicit def obfuscatedEmailToString(e: ObfuscatedEmailAddress): String = e.value
def apply(plainEmailAddress: String): ObfuscatedEmailAddress = new ObfuscatedEmailAddress {
val value = plainEmailAddress match {
case validEmail(shortMailbox(m), domain) =>
s"${obscure(m)}@$domain"
case validEmail(longMailbox(firstLetter,middle,lastLetter), domain) =>
s"$firstLetter${obscure(middle)}$lastLetter@$domain"
case invalidEmail =>
throw new IllegalArgumentException(s"Cannot obfuscate invalid email address '$invalidEmail'")
}
}
private def obscure(text: String) = "*" * text.length
}
| hmrc/emailaddress | src/main/scala/uk/gov/hmrc/emailaddress/ObfuscatedEmailAddress.scala | Scala | apache-2.0 | 1,539 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.interfaces
import com.signalcollect.Vertex
import com.signalcollect.GraphEditor
import com.signalcollect.Edge
trait ExistingVertexHandler[Id, Signal] {
/**
* Sets the function that can intervene when a vertex with the same ID is added
* repeatedly. The new vertex will be thrown out for sure, but some of its
* information might be added to the existing vertex.
*
* @note By default the addition of a vertex is ignored if an existing vertex has the same ID.
*/
def mergeVertices(existing: Vertex[Id, _, Id, Signal], failedVertexAddition: Vertex[Id, _, Id, Signal], ge: GraphEditor[Id, Signal])
}
trait UndeliverableSignalHandler[@specialized(Int, Long) Id, Signal] {
/**
* Sets the function that handles signals that could not be delivered to a vertex.
*
* @note By default an exception is thrown when a signal is not deliverable. The handler function
* receives the signal and an instance of GraphEditor as parameters in order to take some
* action that handles this case.
*/
def vertexForSignalNotFound(signal: Signal, inexistentTargetId: Id, senderId: Option[Id], graphEditor: GraphEditor[Id, Signal])
}
trait EdgeAddedToNonExistentVertexHandler[@specialized(Int, Long) Id, Signal] {
/**
* Sets the handler that gets triggered, when the vertex to which an edge should be added does not exist.
* Optionally returns the vertex that should be created and to whioch the edge can then be added.
*
* @note By default an exception is thrown when an edge cannot be added. The handler function
* receives the edge, the id of the vertex that does not exist and an instance of GraphEditor as parameters in order to
* potentially create a vertex to which the edge should be added.
*/
def handleImpossibleEdgeAddition(edge: Edge[Id], vertexId: Id, graphEditor: GraphEditor[Id, Signal]): Option[Vertex[Id, _, Id, Signal]]
}
| mageru/signal-collect | src/main/scala/com/signalcollect/interfaces/Handlers.scala | Scala | apache-2.0 | 2,590 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.optimize.program.{BatchOptimizeContext, FlinkChainedProgram, FlinkHepRuleSetProgramBuilder, HEP_RULES_EXECUTION_TYPE}
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Test for [[ReplaceIntersectWithSemiJoinRule]].
*/
class ReplaceIntersectWithSemiJoinRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"rules",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(ReplaceIntersectWithSemiJoinRule.INSTANCE))
.build()
)
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Long, String)]("T1", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("T2", 'd, 'e, 'f)
}
@Test
def testIntersect(): Unit = {
util.verifyPlan("SELECT c FROM T1 INTERSECT SELECT f FROM T2")
}
@Test
def testIntersectWithFilter(): Unit = {
util.verifyPlan("SELECT c FROM ((SELECT * FROM T1) INTERSECT (SELECT * FROM T2)) WHERE a > 1")
}
@Test
def testIntersectLeftIsEmpty(): Unit = {
util.verifyPlan("SELECT c FROM T1 WHERE 1=0 INTERSECT SELECT f FROM T2")
}
@Test
def testIntersectRightIsEmpty(): Unit = {
util.verifyPlan("SELECT c FROM T1 INTERSECT SELECT f FROM T2 WHERE 1=0")
}
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/ReplaceIntersectWithSemiJoinRuleTest.scala | Scala | apache-2.0 | 2,559 |
package org.scaladebugger.api.virtualmachines
import org.scaladebugger.api.profiles.traits.DebugProfile
import org.scalamock.scalatest.MockFactory
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import org.scaladebugger.api.lowlevel.breakpoints.{DummyBreakpointManager, PendingBreakpointSupport}
import org.scaladebugger.api.lowlevel.classes.{DummyClassPrepareManager, DummyClassUnloadManager, PendingClassPrepareSupport, PendingClassUnloadSupport}
import org.scaladebugger.api.lowlevel.events.{DummyEventManager, PendingEventHandlerSupport}
import org.scaladebugger.api.lowlevel.exceptions.{DummyExceptionManager, PendingExceptionSupport}
import org.scaladebugger.api.lowlevel.methods.{DummyMethodEntryManager, DummyMethodExitManager, PendingMethodEntrySupport, PendingMethodExitSupport}
import org.scaladebugger.api.lowlevel.monitors._
import org.scaladebugger.api.lowlevel.steps.{DummyStepManager, PendingStepSupport}
import org.scaladebugger.api.lowlevel.threads.{DummyThreadDeathManager, DummyThreadStartManager, PendingThreadDeathSupport, PendingThreadStartSupport}
import org.scaladebugger.api.lowlevel.vm.{DummyVMDeathManager, PendingVMDeathSupport}
import org.scaladebugger.api.lowlevel.watchpoints.{DummyAccessWatchpointManager, DummyModificationWatchpointManager, PendingAccessWatchpointSupport, PendingModificationWatchpointSupport}
import org.scaladebugger.api.profiles.ProfileManager
import org.scaladebugger.test.helpers.ParallelMockFunSpec
class DummyScalaVirtualMachineSpec extends ParallelMockFunSpec
{
private val mockScalaVirtualMachineManager = mock[ScalaVirtualMachineManager]
private val mockProfileManager = mock[ProfileManager]
private val dummyScalaVirtualMachine = new DummyScalaVirtualMachine(
mockScalaVirtualMachineManager, mockProfileManager
)
describe("DummyScalaVirtualMachine") {
describe("#initialize") {
it("should do nothing") {
dummyScalaVirtualMachine.initialize()
}
}
describe("#startProcessingEvents") {
it("should do nothing") {
dummyScalaVirtualMachine.startProcessingEvents()
}
}
describe("#stopProcessingEvents") {
it("should do nothing") {
dummyScalaVirtualMachine.stopProcessingEvents()
}
}
describe("#isProcessingEvents") {
it("should return false") {
val expected = false
val actual = dummyScalaVirtualMachine.isProcessingEvents
actual should be (expected)
}
}
describe("#isInitialized") {
it("should return false") {
val expected = false
val actual = dummyScalaVirtualMachine.isInitialized
actual should be (expected)
}
}
describe("#isStarted") {
it("should return false") {
val expected = false
val actual = dummyScalaVirtualMachine.isStarted
actual should be (expected)
}
}
describe("#cache") {
it("should return the same cache instance") {
val expected = dummyScalaVirtualMachine.cache
expected should not be (null)
val actual = dummyScalaVirtualMachine.cache
actual should be (expected)
}
}
describe("#lowlevel") {
it("should return a container of dummy managers") {
val managerContainer = dummyScalaVirtualMachine.lowlevel
// TODO: Provide a less hard-coded test (this was pulled from manager container spec)
managerContainer.accessWatchpointManager shouldBe a [DummyAccessWatchpointManager]
managerContainer.accessWatchpointManager shouldBe a [PendingAccessWatchpointSupport]
managerContainer.breakpointManager shouldBe a [DummyBreakpointManager]
managerContainer.breakpointManager shouldBe a [PendingBreakpointSupport]
managerContainer.classManager should be (null)
managerContainer.classPrepareManager shouldBe a [DummyClassPrepareManager]
managerContainer.classPrepareManager shouldBe a [PendingClassPrepareSupport]
managerContainer.classUnloadManager shouldBe a [DummyClassUnloadManager]
managerContainer.classUnloadManager shouldBe a [PendingClassUnloadSupport]
managerContainer.eventManager shouldBe a [DummyEventManager]
managerContainer.eventManager shouldBe a [PendingEventHandlerSupport]
managerContainer.exceptionManager shouldBe a [DummyExceptionManager]
managerContainer.exceptionManager shouldBe a [PendingExceptionSupport]
managerContainer.methodEntryManager shouldBe a [DummyMethodEntryManager]
managerContainer.methodEntryManager shouldBe a [PendingMethodEntrySupport]
managerContainer.methodExitManager shouldBe a [DummyMethodExitManager]
managerContainer.methodExitManager shouldBe a [PendingMethodExitSupport]
managerContainer.modificationWatchpointManager shouldBe a [DummyModificationWatchpointManager]
managerContainer.modificationWatchpointManager shouldBe a [PendingModificationWatchpointSupport]
managerContainer.monitorContendedEnteredManager shouldBe a [DummyMonitorContendedEnteredManager]
managerContainer.monitorContendedEnteredManager shouldBe a [PendingMonitorContendedEnteredSupport]
managerContainer.monitorContendedEnterManager shouldBe a [DummyMonitorContendedEnterManager]
managerContainer.monitorContendedEnterManager shouldBe a [PendingMonitorContendedEnterSupport]
managerContainer.monitorWaitedManager shouldBe a [DummyMonitorWaitedManager]
managerContainer.monitorWaitedManager shouldBe a [PendingMonitorWaitedSupport]
managerContainer.monitorWaitManager shouldBe a [DummyMonitorWaitManager]
managerContainer.monitorWaitManager shouldBe a [PendingMonitorWaitSupport]
managerContainer.requestManager should be (null)
managerContainer.stepManager shouldBe a [DummyStepManager]
managerContainer.stepManager shouldBe a [PendingStepSupport]
managerContainer.threadDeathManager shouldBe a [DummyThreadDeathManager]
managerContainer.threadDeathManager shouldBe a [PendingThreadDeathSupport]
managerContainer.threadStartManager shouldBe a [DummyThreadStartManager]
managerContainer.threadStartManager shouldBe a [PendingThreadStartSupport]
managerContainer.vmDeathManager shouldBe a [DummyVMDeathManager]
managerContainer.vmDeathManager shouldBe a [PendingVMDeathSupport]
}
it("should return the same container each time") {
val expected = dummyScalaVirtualMachine.lowlevel
val actual = dummyScalaVirtualMachine.lowlevel
actual should be (expected)
}
}
describe("#uniqueId") {
it("should return a non-empty string") {
dummyScalaVirtualMachine.uniqueId should not be (empty)
}
it("should return the same id each time") {
val expected = dummyScalaVirtualMachine.uniqueId
val actual = dummyScalaVirtualMachine.uniqueId
actual should be (expected)
}
}
describe("#underlyingVirtualMachine") {
it("should return null") {
val expected = null
val actual = dummyScalaVirtualMachine.underlyingVirtualMachine
actual should be (expected)
}
}
describe("#resume") {
it("should do nothing") {
dummyScalaVirtualMachine.resume()
}
}
describe("#suspend") {
it("should do nothing") {
dummyScalaVirtualMachine.suspend()
}
}
describe("#register") {
it("should invoke the underlying profile manager") {
val testName = "some name"
val testProfile = mock[DebugProfile]
val expected = Some(testProfile)
(mockProfileManager.register _).expects(testName, testProfile)
.returning(expected).once()
val actual = dummyScalaVirtualMachine.register(testName, testProfile)
actual should be (expected)
}
}
describe("#unregister") {
it("should invoke the underlying profile manager") {
val testName = "some name"
val testProfile = mock[DebugProfile]
val expected = Some(testProfile)
(mockProfileManager.unregister _).expects(testName)
.returning(expected).once()
val actual = dummyScalaVirtualMachine.unregister(testName)
actual should be (expected)
}
}
describe("#retrieve") {
it("should invoke the underlying profile manager") {
val testName = "some name"
val testProfile = mock[DebugProfile]
val expected = Some(testProfile)
(mockProfileManager.retrieve _).expects(testName)
.returning(expected).once()
val actual = dummyScalaVirtualMachine.retrieve(testName)
actual should be (expected)
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/virtualmachines/DummyScalaVirtualMachineSpec.scala | Scala | apache-2.0 | 8,753 |
package dotty.tools
package dotc
package transform
import MegaPhase._
import core._
import Symbols._
import SymDenotations._
import Contexts._
import Types._
import Flags._
import Decorators._
import DenotTransformers._
import core.StdNames.nme
import ast.Trees._
import reporting.trace
/** Abstract base class of ByNameClosures and ElimByName, factoring out the
* common functionality to transform arguments of by-name parameters.
*/
abstract class TransformByNameApply extends MiniPhase { thisPhase: DenotTransformer =>
import ast.tpd._
/** The info of the tree's symbol before it is potentially transformed in this phase */
private def originalDenotation(tree: Tree)(implicit ctx: Context) =
tree.symbol.denot(ctx.withPhase(thisPhase))
/** If denotation had an ExprType before, it now gets a function type */
protected def exprBecomesFunction(symd: SymDenotation)(implicit ctx: Context): Boolean =
symd.is(Param) || symd.is(ParamAccessor, butNot = Method)
protected def isByNameRef(tree: Tree)(implicit ctx: Context): Boolean = {
val origDenot = originalDenotation(tree)
origDenot.info.isInstanceOf[ExprType] && exprBecomesFunction(origDenot)
}
def mkByNameClosure(arg: Tree, argType: Type)(implicit ctx: Context): Tree = unsupported(i"mkClosure($arg)")
override def transformApply(tree: Apply)(implicit ctx: Context): Tree =
trace(s"transforming ${tree.show} at phase ${ctx.phase}", show = true) {
def transformArg(arg: Tree, formal: Type): Tree = formal.dealias match {
case formalExpr: ExprType =>
var argType = arg.tpe.widenIfUnstable
if (defn.isBottomType(argType)) argType = formal.widenExpr
def wrap(arg: Tree) =
ref(defn.cbnArg).appliedToType(argType).appliedTo(arg).withSpan(arg.span)
arg match {
case Apply(Select(qual, nme.apply), Nil)
if qual.tpe.derivesFrom(defn.FunctionClass(0)) && isPureExpr(qual) =>
wrap(qual)
case _ =>
if (isByNameRef(arg) || arg.symbol == defn.cbnArg) arg
else wrap(mkByNameClosure(arg, argType))
}
case _ =>
arg
}
val mt @ MethodType(_) = tree.fun.tpe.widen
val args1 = tree.args.zipWithConserve(mt.paramInfos)(transformArg)
cpy.Apply(tree)(tree.fun, args1)
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/TransformByNameApply.scala | Scala | apache-2.0 | 2,353 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Linked}
import uk.gov.hmrc.ct.computations.CP257
case class B4(value: Option[Int]) extends CtBoxIdentifier("Trading losses brought forward claimed against profits") with CtOptionalInteger
object B4 extends Linked[CP257, B4] {
override def apply(source: CP257): B4 = B4(source.value)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B4.scala | Scala | apache-2.0 | 992 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.nio.ByteBuffer
import java.lang.{Long => JLong}
import java.util.{Collections, Properties}
import java.util
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import kafka.admin.{AdminUtils, RackAwareMode}
import kafka.api.{ApiVersion, KAFKA_0_11_0_IV0}
import kafka.cluster.Partition
import kafka.common.{OffsetAndMetadata, OffsetMetadata}
import kafka.server.QuotaFactory.{QuotaManagers, UnboundedQuota}
import kafka.controller.KafkaController
import kafka.coordinator.group.{GroupCoordinator, JoinGroupResult}
import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator}
import kafka.log.{Log, LogManager, TimestampOffset}
import kafka.network.RequestChannel
import kafka.network.RequestChannel.{CloseConnectionAction, NoOpAction, SendAction}
import kafka.security.SecurityUtils
import kafka.security.auth.{Resource, _}
import kafka.utils.{CoreUtils, Logging}
import kafka.zk.{AdminZkClient, KafkaZkClient}
import org.apache.kafka.common.errors._
import org.apache.kafka.common.internals.FatalExitError
import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME, isInternal}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, MemoryRecords, RecordBatch, RecordsProcessingStats}
import org.apache.kafka.common.requests.CreateAclsResponse.AclCreationResponse
import org.apache.kafka.common.requests.DeleteAclsResponse.{AclDeletionResult, AclFilterResponse}
import org.apache.kafka.common.requests.{Resource => RResource, ResourceType => RResourceType, _}
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.common.{Node, TopicPartition}
import org.apache.kafka.common.requests.{SaslAuthenticateResponse, SaslHandshakeResponse}
import org.apache.kafka.common.resource.{Resource => AdminResource}
import org.apache.kafka.common.acl.{AccessControlEntry, AclBinding}
import DescribeLogDirsResponse.LogDirInfo
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation}
import scala.collection._
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Success, Try}
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val replicaManager: ReplicaManager,
val adminManager: AdminManager,
val groupCoordinator: GroupCoordinator,
val txnCoordinator: TransactionCoordinator,
val controller: KafkaController,
val zkClient: KafkaZkClient,
val brokerId: Int,
val config: KafkaConfig,
val metadataCache: MetadataCache,
val metrics: Metrics,
val authorizer: Option[Authorizer],
val quotas: QuotaManagers,
val fetchManager: FetchManager,
brokerTopicStats: BrokerTopicStats,
val clusterId: String,
time: Time,
val tokenManager: DelegationTokenManager) extends Logging {
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
val adminZkClient = new AdminZkClient(zkClient)
def close() {
info("Shutdown complete.")
}
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
def handle(request: RequestChannel.Request) {
try {
trace(s"Handling request:${request.requestDesc(true)} from connection ${request.context.connectionId};" +
s"securityProtocol:${request.context.securityProtocol},principal:${request.context.principal}")
request.header.apiKey match {
case ApiKeys.PRODUCE => handleProduceRequest(request)
case ApiKeys.FETCH => handleFetchRequest(request)
case ApiKeys.LIST_OFFSETS => handleListOffsetRequest(request)
case ApiKeys.METADATA => handleTopicMetadataRequest(request)
case ApiKeys.LEADER_AND_ISR => handleLeaderAndIsrRequest(request)
case ApiKeys.STOP_REPLICA => handleStopReplicaRequest(request)
case ApiKeys.UPDATE_METADATA => handleUpdateMetadataRequest(request)
case ApiKeys.CONTROLLED_SHUTDOWN => handleControlledShutdownRequest(request)
case ApiKeys.OFFSET_COMMIT => handleOffsetCommitRequest(request)
case ApiKeys.OFFSET_FETCH => handleOffsetFetchRequest(request)
case ApiKeys.FIND_COORDINATOR => handleFindCoordinatorRequest(request)
case ApiKeys.JOIN_GROUP => handleJoinGroupRequest(request)
case ApiKeys.HEARTBEAT => handleHeartbeatRequest(request)
case ApiKeys.LEAVE_GROUP => handleLeaveGroupRequest(request)
case ApiKeys.SYNC_GROUP => handleSyncGroupRequest(request)
case ApiKeys.DESCRIBE_GROUPS => handleDescribeGroupRequest(request)
case ApiKeys.LIST_GROUPS => handleListGroupsRequest(request)
case ApiKeys.SASL_HANDSHAKE => handleSaslHandshakeRequest(request)
case ApiKeys.API_VERSIONS => handleApiVersionsRequest(request)
case ApiKeys.CREATE_TOPICS => handleCreateTopicsRequest(request)
case ApiKeys.DELETE_TOPICS => handleDeleteTopicsRequest(request)
case ApiKeys.DELETE_RECORDS => handleDeleteRecordsRequest(request)
case ApiKeys.INIT_PRODUCER_ID => handleInitProducerIdRequest(request)
case ApiKeys.OFFSET_FOR_LEADER_EPOCH => handleOffsetForLeaderEpochRequest(request)
case ApiKeys.ADD_PARTITIONS_TO_TXN => handleAddPartitionToTxnRequest(request)
case ApiKeys.ADD_OFFSETS_TO_TXN => handleAddOffsetsToTxnRequest(request)
case ApiKeys.END_TXN => handleEndTxnRequest(request)
case ApiKeys.WRITE_TXN_MARKERS => handleWriteTxnMarkersRequest(request)
case ApiKeys.TXN_OFFSET_COMMIT => handleTxnOffsetCommitRequest(request)
case ApiKeys.DESCRIBE_ACLS => handleDescribeAcls(request)
case ApiKeys.CREATE_ACLS => handleCreateAcls(request)
case ApiKeys.DELETE_ACLS => handleDeleteAcls(request)
case ApiKeys.ALTER_CONFIGS => handleAlterConfigsRequest(request)
case ApiKeys.DESCRIBE_CONFIGS => handleDescribeConfigsRequest(request)
case ApiKeys.ALTER_REPLICA_LOG_DIRS => handleAlterReplicaLogDirsRequest(request)
case ApiKeys.DESCRIBE_LOG_DIRS => handleDescribeLogDirsRequest(request)
case ApiKeys.SASL_AUTHENTICATE => handleSaslAuthenticateRequest(request)
case ApiKeys.CREATE_PARTITIONS => handleCreatePartitionsRequest(request)
case ApiKeys.CREATE_DELEGATION_TOKEN => handleCreateTokenRequest(request)
case ApiKeys.RENEW_DELEGATION_TOKEN => handleRenewTokenRequest(request)
case ApiKeys.EXPIRE_DELEGATION_TOKEN => handleExpireTokenRequest(request)
case ApiKeys.DESCRIBE_DELEGATION_TOKEN => handleDescribeTokensRequest(request)
case ApiKeys.DELETE_GROUPS => handleDeleteGroupsRequest(request)
}
} catch {
case e: FatalExitError => throw e
case e: Throwable => handleError(request, e)
} finally {
request.apiLocalCompleteTimeNanos = time.nanoseconds
}
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val correlationId = request.header.correlationId
val leaderAndIsrRequest = request.body[LeaderAndIsrRequest]
def onLeadershipChange(updatedLeaders: Iterable[Partition], updatedFollowers: Iterable[Partition]) {
// for each new leader or follower, call coordinator to handle consumer group migration.
// this callback is invoked under the replica state change lock to ensure proper order of
// leadership changes
updatedLeaders.foreach { partition =>
if (partition.topic == GROUP_METADATA_TOPIC_NAME)
groupCoordinator.handleGroupImmigration(partition.partitionId)
else if (partition.topic == TRANSACTION_STATE_TOPIC_NAME)
txnCoordinator.handleTxnImmigration(partition.partitionId, partition.getLeaderEpoch)
}
updatedFollowers.foreach { partition =>
if (partition.topic == GROUP_METADATA_TOPIC_NAME)
groupCoordinator.handleGroupEmigration(partition.partitionId)
else if (partition.topic == TRANSACTION_STATE_TOPIC_NAME)
txnCoordinator.handleTxnEmigration(partition.partitionId, partition.getLeaderEpoch)
}
}
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
val response = replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest, onLeadershipChange)
sendResponseExemptThrottle(request, response)
} else {
sendResponseMaybeThrottle(request, throttleTimeMs => leaderAndIsrRequest.getErrorResponse(throttleTimeMs,
Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
}
}
def handleStopReplicaRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stopReplicaRequest = request.body[StopReplicaRequest]
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
val (result, error) = replicaManager.stopReplicas(stopReplicaRequest)
// Clearing out the cache for groups that belong to an offsets topic partition for which this broker was the leader,
// since this broker is no longer a replica for that offsets topic partition.
// This is required to handle the following scenario :
// Consider old replicas : {[1,2,3], Leader = 1} is reassigned to new replicas : {[2,3,4], Leader = 2}, broker 1 does not receive a LeaderAndIsr
// request to become a follower due to which cache for groups that belong to an offsets topic partition for which broker 1 was the leader,
// is not cleared.
result.foreach { case (topicPartition, error) =>
if (error == Errors.NONE && stopReplicaRequest.deletePartitions && topicPartition.topic == GROUP_METADATA_TOPIC_NAME) {
groupCoordinator.handleGroupEmigration(topicPartition.partition)
}
}
sendResponseExemptThrottle(request, new StopReplicaResponse(error, result.asJava))
} else {
val result = stopReplicaRequest.partitions.asScala.map((_, Errors.CLUSTER_AUTHORIZATION_FAILED)).toMap
sendResponseMaybeThrottle(request, _ =>
new StopReplicaResponse(Errors.CLUSTER_AUTHORIZATION_FAILED, result.asJava))
}
CoreUtils.swallow(replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads(), this)
}
def handleUpdateMetadataRequest(request: RequestChannel.Request) {
val correlationId = request.header.correlationId
val updateMetadataRequest = request.body[UpdateMetadataRequest]
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
val deletedPartitions = replicaManager.maybeUpdateMetadataCache(correlationId, updateMetadataRequest)
if (deletedPartitions.nonEmpty)
groupCoordinator.handleDeletedPartitions(deletedPartitions)
if (adminManager.hasDelayedTopicOperations) {
updateMetadataRequest.partitionStates.keySet.asScala.map(_.topic).foreach { topic =>
adminManager.tryCompleteDelayedTopicOperations(topic)
}
}
quotas.clientQuotaCallback.foreach { callback =>
if (callback.updateClusterMetadata(metadataCache.getClusterMetadata(clusterId, request.context.listenerName))) {
quotas.fetch.updateQuotaMetricConfigs()
quotas.produce.updateQuotaMetricConfigs()
quotas.request.updateQuotaMetricConfigs()
}
}
sendResponseExemptThrottle(request, new UpdateMetadataResponse(Errors.NONE))
} else {
sendResponseMaybeThrottle(request, _ => new UpdateMetadataResponse(Errors.CLUSTER_AUTHORIZATION_FAILED))
}
}
def handleControlledShutdownRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val controlledShutdownRequest = request.body[ControlledShutdownRequest]
authorizeClusterAction(request)
def controlledShutdownCallback(controlledShutdownResult: Try[Set[TopicPartition]]): Unit = {
val response = controlledShutdownResult match {
case Success(partitionsRemaining) =>
new ControlledShutdownResponse(Errors.NONE, partitionsRemaining.asJava)
case Failure(throwable) =>
controlledShutdownRequest.getErrorResponse(throwable)
}
sendResponseExemptThrottle(request, response)
}
controller.controlledShutdown(controlledShutdownRequest.brokerId, controlledShutdownCallback)
}
/**
* Handle an offset commit request
*/
def handleOffsetCommitRequest(request: RequestChannel.Request) {
val header = request.header
val offsetCommitRequest = request.body[OffsetCommitRequest]
// reject the request if not authorized to the group
if (!authorize(request.session, Read, new Resource(Group, offsetCommitRequest.groupId))) {
val error = Errors.GROUP_AUTHORIZATION_FAILED
val results = offsetCommitRequest.offsetData.keySet.asScala.map { topicPartition =>
(topicPartition, error)
}.toMap
sendResponseMaybeThrottle(request, requestThrottleMs => new OffsetCommitResponse(requestThrottleMs, results.asJava))
} else {
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
val authorizedTopicRequestInfoBldr = immutable.Map.newBuilder[TopicPartition, OffsetCommitRequest.PartitionData]
for ((topicPartition, partitionData) <- offsetCommitRequest.offsetData.asScala) {
if (!authorize(request.session, Read, new Resource(Topic, topicPartition.topic)))
unauthorizedTopicErrors += (topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += (topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
authorizedTopicRequestInfoBldr += (topicPartition -> partitionData)
}
val authorizedTopicRequestInfo = authorizedTopicRequestInfoBldr.result()
// the callback for sending an offset commit response
def sendResponseCallback(commitStatus: immutable.Map[TopicPartition, Errors]) {
val combinedCommitStatus = commitStatus ++ unauthorizedTopicErrors ++ nonExistingTopicErrors
if (isDebugEnabled)
combinedCommitStatus.foreach { case (topicPartition, error) =>
if (error != Errors.NONE) {
debug(s"Offset commit request with correlation id ${header.correlationId} from client ${header.clientId} " +
s"on partition $topicPartition failed due to ${error.exceptionName}")
}
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new OffsetCommitResponse(requestThrottleMs, combinedCommitStatus.asJava))
}
if (authorizedTopicRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else if (header.apiVersion == 0) {
// for version 0 always store offsets to ZK
val responseInfo = authorizedTopicRequestInfo.map {
case (topicPartition, partitionData) =>
try {
if (partitionData.metadata != null && partitionData.metadata.length > config.offsetMetadataMaxSize)
(topicPartition, Errors.OFFSET_METADATA_TOO_LARGE)
else {
zkClient.setOrCreateConsumerOffset(offsetCommitRequest.groupId, topicPartition, partitionData.offset)
(topicPartition, Errors.NONE)
}
} catch {
case e: Throwable => (topicPartition, Errors.forException(e))
}
}
sendResponseCallback(responseInfo)
} else {
// for version 1 and beyond store offsets in offset manager
// compute the retention time based on the request version:
// if it is v1 or not specified by user, we can use the default retention
val offsetRetention =
if (header.apiVersion <= 1 ||
offsetCommitRequest.retentionTime == OffsetCommitRequest.DEFAULT_RETENTION_TIME)
groupCoordinator.offsetConfig.offsetsRetentionMs
else
offsetCommitRequest.retentionTime
// commit timestamp is always set to now.
// "default" expiration timestamp is now + retention (and retention may be overridden if v2)
// expire timestamp is computed differently for v1 and v2.
// - If v1 and no explicit commit timestamp is provided we use default expiration timestamp.
// - If v1 and explicit commit timestamp is provided we calculate retention from that explicit commit timestamp
// - If v2 we use the default expiration timestamp
val currentTimestamp = time.milliseconds
val defaultExpireTimestamp = offsetRetention + currentTimestamp
val partitionData = authorizedTopicRequestInfo.mapValues { partitionData =>
val metadata = if (partitionData.metadata == null) OffsetMetadata.NoMetadata else partitionData.metadata
new OffsetAndMetadata(
offsetMetadata = OffsetMetadata(partitionData.offset, metadata),
commitTimestamp = currentTimestamp,
expireTimestamp = {
if (partitionData.timestamp == OffsetCommitRequest.DEFAULT_TIMESTAMP)
defaultExpireTimestamp
else
offsetRetention + partitionData.timestamp
}
)
}
// call coordinator to handle commit offset
groupCoordinator.handleCommitOffsets(
offsetCommitRequest.groupId,
offsetCommitRequest.memberId,
offsetCommitRequest.generationId,
partitionData,
sendResponseCallback)
}
}
}
private def authorize(session: RequestChannel.Session, operation: Operation, resource: Resource): Boolean =
authorizer.forall(_.authorize(session, operation, resource))
/**
* Handle a produce request
*/
def handleProduceRequest(request: RequestChannel.Request) {
val produceRequest = request.body[ProduceRequest]
val numBytesAppended = request.header.toStruct.sizeOf + request.sizeOfBodyInBytes
if (produceRequest.isTransactional) {
if (!authorize(request.session, Write, new Resource(TransactionalId, produceRequest.transactionalId))) {
sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
return
}
// Note that authorization to a transactionalId implies ProducerId authorization
} else if (produceRequest.isIdempotent && !authorize(request.session, IdempotentWrite, Resource.ClusterResource)) {
sendErrorResponseMaybeThrottle(request, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)
return
}
val unauthorizedTopicResponses = mutable.Map[TopicPartition, PartitionResponse]()
val nonExistingTopicResponses = mutable.Map[TopicPartition, PartitionResponse]()
val authorizedRequestInfo = mutable.Map[TopicPartition, MemoryRecords]()
for ((topicPartition, memoryRecords) <- produceRequest.partitionRecordsOrFail.asScala) {
if (!authorize(request.session, Write, new Resource(Topic, topicPartition.topic)))
unauthorizedTopicResponses += topicPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicResponses += topicPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
authorizedRequestInfo += (topicPartition -> memoryRecords)
}
// the callback for sending a produce response
def sendResponseCallback(responseStatus: Map[TopicPartition, PartitionResponse]) {
val mergedResponseStatus = responseStatus ++ unauthorizedTopicResponses ++ nonExistingTopicResponses
var errorInResponse = false
mergedResponseStatus.foreach { case (topicPartition, status) =>
if (status.error != Errors.NONE) {
errorInResponse = true
debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
request.header.correlationId,
request.header.clientId,
topicPartition,
status.error.exceptionName))
}
}
def produceResponseCallback(bandwidthThrottleTimeMs: Int) {
if (produceRequest.acks == 0) {
// no operation needed if producer request.required.acks = 0; however, if there is any error in handling
// the request, since no response is expected by the producer, the server will close socket server so that
// the producer client will know that some error has happened and will refresh its metadata
if (errorInResponse) {
val exceptionsSummary = mergedResponseStatus.map { case (topicPartition, status) =>
topicPartition -> status.error.exceptionName
}.mkString(", ")
info(
s"Closing connection due to error during produce request with correlation id ${request.header.correlationId} " +
s"from client id ${request.header.clientId} with ack=0\\n" +
s"Topic and partition to exceptions: $exceptionsSummary"
)
closeConnection(request, new ProduceResponse(mergedResponseStatus.asJava).errorCounts)
} else {
sendNoOpResponseExemptThrottle(request)
}
} else {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new ProduceResponse(mergedResponseStatus.asJava, bandwidthThrottleTimeMs + requestThrottleMs))
}
}
// When this callback is triggered, the remote API call has completed
request.apiRemoteCompleteTimeNanos = time.nanoseconds
quotas.produce.maybeRecordAndThrottle(
request.session,
request.header.clientId,
numBytesAppended,
produceResponseCallback)
}
def processingStatsCallback(processingStats: Map[TopicPartition, RecordsProcessingStats]): Unit = {
processingStats.foreach { case (tp, info) =>
updateRecordsProcessingStats(request, tp, info)
}
}
if (authorizedRequestInfo.isEmpty)
sendResponseCallback(Map.empty)
else {
val internalTopicsAllowed = request.header.clientId == AdminUtils.AdminClientId
// call the replica manager to append messages to the replicas
replicaManager.appendRecords(
timeout = produceRequest.timeout.toLong,
requiredAcks = produceRequest.acks,
internalTopicsAllowed = internalTopicsAllowed,
isFromClient = true,
entriesPerPartition = authorizedRequestInfo,
responseCallback = sendResponseCallback,
processingStatsCallback = processingStatsCallback)
// if the request is put into the purgatory, it will have a held reference and hence cannot be garbage collected;
// hence we clear its data here in order to let GC reclaim its memory since it is already appended to log
produceRequest.clearPartitionRecords()
}
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request) {
val versionId = request.header.apiVersion
val clientId = request.header.clientId
val fetchRequest = request.body[FetchRequest]
val fetchContext = fetchManager.newContext(fetchRequest.metadata(),
fetchRequest.fetchData(),
fetchRequest.toForget(),
fetchRequest.isFromFollower())
val erroneous = mutable.ArrayBuffer[(TopicPartition, FetchResponse.PartitionData)]()
val interesting = mutable.ArrayBuffer[(TopicPartition, FetchRequest.PartitionData)]()
if (fetchRequest.isFromFollower()) {
// The follower must have ClusterAction on ClusterResource in order to fetch partition data.
if (authorize(request.session, ClusterAction, Resource.ClusterResource)) {
fetchContext.foreachPartition((topicPartition, data) => {
if (!metadataCache.contains(topicPartition)) {
erroneous += topicPartition -> new FetchResponse.PartitionData(Errors.UNKNOWN_TOPIC_OR_PARTITION,
FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
} else {
interesting += (topicPartition -> data)
}
})
} else {
fetchContext.foreachPartition((part, data) => {
erroneous += part -> new FetchResponse.PartitionData(Errors.TOPIC_AUTHORIZATION_FAILED,
FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
})
}
} else {
// Regular Kafka consumers need READ permission on each partition they are fetching.
fetchContext.foreachPartition((topicPartition, data) => {
if (!authorize(request.session, Read, new Resource(Topic, topicPartition.topic)))
erroneous += topicPartition -> new FetchResponse.PartitionData(Errors.TOPIC_AUTHORIZATION_FAILED,
FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
else if (!metadataCache.contains(topicPartition))
erroneous += topicPartition -> new FetchResponse.PartitionData(Errors.UNKNOWN_TOPIC_OR_PARTITION,
FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET,
FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)
else
interesting += (topicPartition -> data)
})
}
def convertedPartitionData(tp: TopicPartition, data: FetchResponse.PartitionData) = {
// Down-conversion of the fetched records is needed when the stored magic version is
// greater than that supported by the client (as indicated by the fetch request version). If the
// configured magic version for the topic is less than or equal to that supported by the version of the
// fetch request, we skip the iteration through the records in order to check the magic version since we
// know it must be supported. However, if the magic version is changed from a higher version back to a
// lower version, this check will no longer be valid and we will fail to down-convert the messages
// which were written in the new format prior to the version downgrade.
replicaManager.getMagic(tp).flatMap { magic =>
val downConvertMagic = {
if (magic > RecordBatch.MAGIC_VALUE_V0 && versionId <= 1 && !data.records.hasCompatibleMagic(RecordBatch.MAGIC_VALUE_V0))
Some(RecordBatch.MAGIC_VALUE_V0)
else if (magic > RecordBatch.MAGIC_VALUE_V1 && versionId <= 3 && !data.records.hasCompatibleMagic(RecordBatch.MAGIC_VALUE_V1))
Some(RecordBatch.MAGIC_VALUE_V1)
else
None
}
downConvertMagic.map { magic =>
trace(s"Down converting records from partition $tp to message format version $magic for fetch request from $clientId")
val converted = data.records.downConvert(magic, fetchContext.getFetchOffset(tp).get, time)
updateRecordsProcessingStats(request, tp, converted.recordsProcessingStats)
new FetchResponse.PartitionData(data.error, data.highWatermark, FetchResponse.INVALID_LAST_STABLE_OFFSET,
data.logStartOffset, data.abortedTransactions, converted.records)
}
}.getOrElse(data)
}
// the callback for process a fetch response, invoked before throttling
def processResponseCallback(responsePartitionData: Seq[(TopicPartition, FetchPartitionData)]) {
val partitions = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData]
responsePartitionData.foreach{ case (tp, data) =>
val abortedTransactions = data.abortedTransactions.map(_.asJava).orNull
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
partitions.put(tp, new FetchResponse.PartitionData(data.error, data.highWatermark, lastStableOffset,
data.logStartOffset, abortedTransactions, data.records))
}
erroneous.foreach{case (tp, data) => partitions.put(tp, data)}
val unconvertedFetchResponse = fetchContext.updateAndGenerateResponseData(partitions)
// fetch response callback invoked after any throttling
def fetchResponseCallback(bandwidthThrottleTimeMs: Int) {
def createResponse(requestThrottleTimeMs: Int): FetchResponse = {
val convertedData = new util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData]
unconvertedFetchResponse.responseData().asScala.foreach { case (tp, partitionData) =>
if (partitionData.error != Errors.NONE)
debug(s"Fetch request with correlation id ${request.header.correlationId} from client $clientId " +
s"on partition $tp failed due to ${partitionData.error.exceptionName}")
convertedData.put(tp, convertedPartitionData(tp, partitionData))
}
val response = new FetchResponse(unconvertedFetchResponse.error(), convertedData,
bandwidthThrottleTimeMs + requestThrottleTimeMs, unconvertedFetchResponse.sessionId())
response.responseData.asScala.foreach { case (topicPartition, data) =>
// record the bytes out metrics only when the response is being sent
brokerTopicStats.updateBytesOut(topicPartition.topic, fetchRequest.isFromFollower, data.records.sizeInBytes)
}
response
}
trace(s"Sending Fetch response with partitions.size=${unconvertedFetchResponse.responseData().size()}, " +
s"metadata=${unconvertedFetchResponse.sessionId()}")
if (fetchRequest.isFromFollower)
sendResponseExemptThrottle(request, createResponse(0))
else
sendResponseMaybeThrottle(request, requestThrottleMs => createResponse(requestThrottleMs))
}
// When this callback is triggered, the remote API call has completed.
// Record time before any byte-rate throttling.
request.apiRemoteCompleteTimeNanos = time.nanoseconds
if (fetchRequest.isFromFollower) {
// We've already evaluated against the quota and are good to go. Just need to record it now.
val responseSize = sizeOfThrottledPartitions(versionId, unconvertedFetchResponse, quotas.leader)
quotas.leader.record(responseSize)
fetchResponseCallback(bandwidthThrottleTimeMs = 0)
} else {
// Fetch size used to determine throttle time is calculated before any down conversions.
// This may be slightly different from the actual response size. But since down conversions
// result in data being loaded into memory, it is better to do this after throttling to avoid OOM.
val responseStruct = unconvertedFetchResponse.toStruct(versionId)
quotas.fetch.maybeRecordAndThrottle(request.session, clientId, responseStruct.sizeOf,
fetchResponseCallback)
}
}
if (interesting.isEmpty)
processResponseCallback(Seq.empty)
else {
// call the replica manager to fetch messages from the local replica
replicaManager.fetchMessages(
fetchRequest.maxWait.toLong,
fetchRequest.replicaId,
fetchRequest.minBytes,
fetchRequest.maxBytes,
versionId <= 2,
interesting,
replicationQuota(fetchRequest),
processResponseCallback,
fetchRequest.isolationLevel)
}
}
class SelectingIterator(val partitions: util.LinkedHashMap[TopicPartition, FetchResponse.PartitionData],
val quota: ReplicationQuotaManager)
extends util.Iterator[util.Map.Entry[TopicPartition, FetchResponse.PartitionData]] {
val iter = partitions.entrySet().iterator()
var nextElement: util.Map.Entry[TopicPartition, FetchResponse.PartitionData] = null
override def hasNext: Boolean = {
while ((nextElement == null) && iter.hasNext()) {
val element = iter.next()
if (quota.isThrottled(element.getKey)) {
nextElement = element
}
}
nextElement != null
}
override def next(): util.Map.Entry[TopicPartition, FetchResponse.PartitionData] = {
if (!hasNext()) throw new NoSuchElementException()
val element = nextElement
nextElement = null
element
}
override def remove() = throw new UnsupportedOperationException()
}
private def sizeOfThrottledPartitions(versionId: Short,
unconvertedResponse: FetchResponse,
quota: ReplicationQuotaManager): Int = {
val iter = new SelectingIterator(unconvertedResponse.responseData(), quota)
FetchResponse.sizeOf(versionId, iter)
}
def replicationQuota(fetchRequest: FetchRequest): ReplicaQuota =
if (fetchRequest.isFromFollower) quotas.leader else UnboundedQuota
def handleListOffsetRequest(request: RequestChannel.Request) {
val version = request.header.apiVersion()
val mergedResponseMap = if (version == 0)
handleListOffsetRequestV0(request)
else
handleListOffsetRequestV1AndAbove(request)
sendResponseMaybeThrottle(request, requestThrottleMs => new ListOffsetResponse(requestThrottleMs, mergedResponseMap.asJava))
}
private def handleListOffsetRequestV0(request : RequestChannel.Request) : Map[TopicPartition, ListOffsetResponse.PartitionData] = {
val correlationId = request.header.correlationId
val clientId = request.header.clientId
val offsetRequest = request.body[ListOffsetRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.offsetData.asScala.partition {
case (topicPartition, _) => authorize(request.session, Describe, new Resource(Topic, topicPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ =>
new ListOffsetResponse.PartitionData(Errors.TOPIC_AUTHORIZATION_FAILED, List[JLong]().asJava)
)
val responseMap = authorizedRequestInfo.map {case (topicPartition, partitionData) =>
try {
// ensure leader exists
val localReplica = if (offsetRequest.replicaId != ListOffsetRequest.DEBUGGING_REPLICA_ID)
replicaManager.getLeaderReplicaIfLocal(topicPartition)
else
replicaManager.getReplicaOrException(topicPartition)
val offsets = {
val allOffsets = fetchOffsets(replicaManager.logManager,
topicPartition,
partitionData.timestamp,
partitionData.maxNumOffsets)
if (offsetRequest.replicaId != ListOffsetRequest.CONSUMER_REPLICA_ID) {
allOffsets
} else {
val hw = localReplica.highWatermark.messageOffset
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else
allOffsets
}
}
(topicPartition, new ListOffsetResponse.PartitionData(Errors.NONE, offsets.map(new JLong(_)).asJava))
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderForPartitionException are special cased since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case e @ (_ : UnknownTopicOrPartitionException |
_ : NotLeaderForPartitionException |
_ : KafkaStorageException) =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
correlationId, clientId, topicPartition, e.getMessage))
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(e), List[JLong]().asJava))
case e: Throwable =>
error("Error while responding to offset request", e)
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(e), List[JLong]().asJava))
}
}
responseMap ++ unauthorizedResponseStatus
}
private def handleListOffsetRequestV1AndAbove(request : RequestChannel.Request): Map[TopicPartition, ListOffsetResponse.PartitionData] = {
val correlationId = request.header.correlationId
val clientId = request.header.clientId
val offsetRequest = request.body[ListOffsetRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.partitionTimestamps.asScala.partition {
case (topicPartition, _) => authorize(request.session, Describe, new Resource(Topic, topicPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => {
new ListOffsetResponse.PartitionData(Errors.TOPIC_AUTHORIZATION_FAILED,
ListOffsetResponse.UNKNOWN_TIMESTAMP,
ListOffsetResponse.UNKNOWN_OFFSET)
})
val responseMap = authorizedRequestInfo.map { case (topicPartition, timestamp) =>
if (offsetRequest.duplicatePartitions().contains(topicPartition)) {
debug(s"OffsetRequest with correlation id $correlationId from client $clientId on partition $topicPartition " +
s"failed because the partition is duplicated in the request.")
(topicPartition, new ListOffsetResponse.PartitionData(Errors.INVALID_REQUEST,
ListOffsetResponse.UNKNOWN_TIMESTAMP,
ListOffsetResponse.UNKNOWN_OFFSET))
} else {
try {
// ensure leader exists
val localReplica = if (offsetRequest.replicaId != ListOffsetRequest.DEBUGGING_REPLICA_ID)
replicaManager.getLeaderReplicaIfLocal(topicPartition)
else
replicaManager.getReplicaOrException(topicPartition)
val fromConsumer = offsetRequest.replicaId == ListOffsetRequest.CONSUMER_REPLICA_ID
val found = if (fromConsumer) {
val lastFetchableOffset = offsetRequest.isolationLevel match {
case IsolationLevel.READ_COMMITTED => localReplica.lastStableOffset.messageOffset
case IsolationLevel.READ_UNCOMMITTED => localReplica.highWatermark.messageOffset
}
if (timestamp == ListOffsetRequest.LATEST_TIMESTAMP)
TimestampOffset(RecordBatch.NO_TIMESTAMP, lastFetchableOffset)
else {
def allowed(timestampOffset: TimestampOffset): Boolean =
timestamp == ListOffsetRequest.EARLIEST_TIMESTAMP || timestampOffset.offset < lastFetchableOffset
fetchOffsetForTimestamp(topicPartition, timestamp)
.filter(allowed).getOrElse(TimestampOffset.Unknown)
}
} else {
fetchOffsetForTimestamp(topicPartition, timestamp)
.getOrElse(TimestampOffset.Unknown)
}
(topicPartition, new ListOffsetResponse.PartitionData(Errors.NONE, found.timestamp, found.offset))
} catch {
// NOTE: These exceptions are special cased since these error messages are typically transient or the client
// would have received a clear exception and there is no value in logging the entire stack trace for the same
case e @ (_ : UnknownTopicOrPartitionException |
_ : NotLeaderForPartitionException |
_ : KafkaStorageException |
_ : UnsupportedForMessageFormatException) =>
debug(s"Offset request with correlation id $correlationId from client $clientId on " +
s"partition $topicPartition failed due to ${e.getMessage}")
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(e),
ListOffsetResponse.UNKNOWN_TIMESTAMP,
ListOffsetResponse.UNKNOWN_OFFSET))
case e: Throwable =>
error("Error while responding to offset request", e)
(topicPartition, new ListOffsetResponse.PartitionData(Errors.forException(e),
ListOffsetResponse.UNKNOWN_TIMESTAMP,
ListOffsetResponse.UNKNOWN_OFFSET))
}
}
}
responseMap ++ unauthorizedResponseStatus
}
def fetchOffsets(logManager: LogManager, topicPartition: TopicPartition, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
logManager.getLog(topicPartition) match {
case Some(log) =>
fetchOffsetsBefore(log, timestamp, maxNumOffsets)
case None =>
if (timestamp == ListOffsetRequest.LATEST_TIMESTAMP || timestamp == ListOffsetRequest.EARLIEST_TIMESTAMP)
Seq(0L)
else
Nil
}
}
private def fetchOffsetForTimestamp(topicPartition: TopicPartition, timestamp: Long): Option[TimestampOffset] = {
replicaManager.getLog(topicPartition) match {
case Some(log) =>
log.fetchOffsetsByTimestamp(timestamp)
case None =>
throw new UnknownTopicOrPartitionException(s"$topicPartition does not exist on the broker.")
}
}
private[server] def fetchOffsetsBefore(log: Log, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
// Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides
// constant time access while being safe to use with concurrent collections unlike `toArray`.
val segments = log.logSegments.toBuffer
val lastSegmentHasSize = segments.last.size > 0
val offsetTimeArray =
if (lastSegmentHasSize)
new Array[(Long, Long)](segments.length + 1)
else
new Array[(Long, Long)](segments.length)
for (i <- segments.indices)
offsetTimeArray(i) = (math.max(segments(i).baseOffset, log.logStartOffset), segments(i).lastModified)
if (lastSegmentHasSize)
offsetTimeArray(segments.length) = (log.logEndOffset, time.milliseconds)
var startIndex = -1
timestamp match {
case ListOffsetRequest.LATEST_TIMESTAMP =>
startIndex = offsetTimeArray.length - 1
case ListOffsetRequest.EARLIEST_TIMESTAMP =>
startIndex = 0
case _ =>
var isFound = false
debug("Offset time array = " + offsetTimeArray.foreach(o => "%d, %d".format(o._1, o._2)))
startIndex = offsetTimeArray.length - 1
while (startIndex >= 0 && !isFound) {
if (offsetTimeArray(startIndex)._2 <= timestamp)
isFound = true
else
startIndex -= 1
}
}
val retSize = maxNumOffsets.min(startIndex + 1)
val ret = new Array[Long](retSize)
for (j <- 0 until retSize) {
ret(j) = offsetTimeArray(startIndex)._1
startIndex -= 1
}
// ensure that the returned seq is in descending order of offsets
ret.toSeq.sortBy(-_)
}
private def createTopic(topic: String,
numPartitions: Int,
replicationFactor: Int,
properties: Properties = new Properties()): MetadataResponse.TopicMetadata = {
try {
adminZkClient.createTopic(topic, numPartitions, replicationFactor, properties, RackAwareMode.Safe)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful"
.format(topic, numPartitions, replicationFactor))
new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, topic, isInternal(topic),
java.util.Collections.emptyList())
} catch {
case _: TopicExistsException => // let it go, possibly another broker created this topic
new MetadataResponse.TopicMetadata(Errors.LEADER_NOT_AVAILABLE, topic, isInternal(topic),
java.util.Collections.emptyList())
case ex: Throwable => // Catch all to prevent unhandled errors
new MetadataResponse.TopicMetadata(Errors.forException(ex), topic, isInternal(topic),
java.util.Collections.emptyList())
}
}
private def createInternalTopic(topic: String): MetadataResponse.TopicMetadata = {
if (topic == null)
throw new IllegalArgumentException("topic must not be null")
val aliveBrokers = metadataCache.getAliveBrokers
topic match {
case GROUP_METADATA_TOPIC_NAME =>
if (aliveBrokers.size < config.offsetsTopicReplicationFactor) {
error(s"Number of alive brokers '${aliveBrokers.size}' does not meet the required replication factor " +
s"'${config.offsetsTopicReplicationFactor}' for the offsets topic (configured via " +
s"'${KafkaConfig.OffsetsTopicReplicationFactorProp}'). This error can be ignored if the cluster is starting up " +
s"and not all brokers are up yet.")
new MetadataResponse.TopicMetadata(Errors.COORDINATOR_NOT_AVAILABLE, topic, true, java.util.Collections.emptyList())
} else {
createTopic(topic, config.offsetsTopicPartitions, config.offsetsTopicReplicationFactor.toInt,
groupCoordinator.offsetsTopicConfigs)
}
case TRANSACTION_STATE_TOPIC_NAME =>
if (aliveBrokers.size < config.transactionTopicReplicationFactor) {
error(s"Number of alive brokers '${aliveBrokers.size}' does not meet the required replication factor " +
s"'${config.transactionTopicReplicationFactor}' for the transactions state topic (configured via " +
s"'${KafkaConfig.TransactionsTopicReplicationFactorProp}'). This error can be ignored if the cluster is starting up " +
s"and not all brokers are up yet.")
new MetadataResponse.TopicMetadata(Errors.COORDINATOR_NOT_AVAILABLE, topic, true, java.util.Collections.emptyList())
} else {
createTopic(topic, config.transactionTopicPartitions, config.transactionTopicReplicationFactor.toInt,
txnCoordinator.transactionTopicConfigs)
}
case _ => throw new IllegalArgumentException(s"Unexpected internal topic name: $topic")
}
}
private def getOrCreateInternalTopic(topic: String, listenerName: ListenerName): MetadataResponse.TopicMetadata = {
val topicMetadata = metadataCache.getTopicMetadata(Set(topic), listenerName)
topicMetadata.headOption.getOrElse(createInternalTopic(topic))
}
private def getTopicMetadata(allowAutoTopicCreation: Boolean, topics: Set[String], listenerName: ListenerName,
errorUnavailableEndpoints: Boolean): Seq[MetadataResponse.TopicMetadata] = {
val topicResponses = metadataCache.getTopicMetadata(topics, listenerName, errorUnavailableEndpoints)
if (topics.isEmpty || topicResponses.size == topics.size) {
topicResponses
} else {
val nonExistentTopics = topics -- topicResponses.map(_.topic).toSet
val responsesForNonExistentTopics = nonExistentTopics.map { topic =>
if (isInternal(topic)) {
val topicMetadata = createInternalTopic(topic)
if (topicMetadata.error == Errors.COORDINATOR_NOT_AVAILABLE)
new MetadataResponse.TopicMetadata(Errors.INVALID_REPLICATION_FACTOR, topic, true, java.util.Collections.emptyList())
else
topicMetadata
} else if (allowAutoTopicCreation && config.autoCreateTopicsEnable) {
createTopic(topic, config.numPartitions, config.defaultReplicationFactor)
} else {
new MetadataResponse.TopicMetadata(Errors.UNKNOWN_TOPIC_OR_PARTITION, topic, false, java.util.Collections.emptyList())
}
}
topicResponses ++ responsesForNonExistentTopics
}
}
/**
* Handle a topic metadata request
*/
def handleTopicMetadataRequest(request: RequestChannel.Request) {
val metadataRequest = request.body[MetadataRequest]
val requestVersion = request.header.apiVersion
val topics =
// Handle old metadata request logic. Version 0 has no way to specify "no topics".
if (requestVersion == 0) {
if (metadataRequest.topics() == null || metadataRequest.topics.isEmpty)
metadataCache.getAllTopics()
else
metadataRequest.topics.asScala.toSet
} else {
if (metadataRequest.isAllTopics)
metadataCache.getAllTopics()
else
metadataRequest.topics.asScala.toSet
}
var (authorizedTopics, unauthorizedForDescribeTopics) =
topics.partition(topic => authorize(request.session, Describe, new Resource(Topic, topic)))
var unauthorizedForCreateTopics = Set[String]()
if (authorizedTopics.nonEmpty) {
val nonExistingTopics = metadataCache.getNonExistingTopics(authorizedTopics)
if (metadataRequest.allowAutoTopicCreation && config.autoCreateTopicsEnable && nonExistingTopics.nonEmpty) {
if (!authorize(request.session, Create, Resource.ClusterResource)) {
authorizedTopics --= nonExistingTopics
unauthorizedForCreateTopics ++= nonExistingTopics
}
}
}
val unauthorizedForCreateTopicMetadata = unauthorizedForCreateTopics.map(topic =>
new MetadataResponse.TopicMetadata(Errors.TOPIC_AUTHORIZATION_FAILED, topic, isInternal(topic),
java.util.Collections.emptyList()))
// do not disclose the existence of topics unauthorized for Describe, so we've not even checked if they exist or not
val unauthorizedForDescribeTopicMetadata =
// In case of all topics, don't include topics unauthorized for Describe
if ((requestVersion == 0 && (metadataRequest.topics == null || metadataRequest.topics.isEmpty)) || metadataRequest.isAllTopics)
Set.empty[MetadataResponse.TopicMetadata]
else
unauthorizedForDescribeTopics.map(topic =>
new MetadataResponse.TopicMetadata(Errors.TOPIC_AUTHORIZATION_FAILED, topic, false, java.util.Collections.emptyList()))
// In version 0, we returned an error when brokers with replicas were unavailable,
// while in higher versions we simply don't include the broker in the returned broker list
val errorUnavailableEndpoints = requestVersion == 0
val topicMetadata =
if (authorizedTopics.isEmpty)
Seq.empty[MetadataResponse.TopicMetadata]
else
getTopicMetadata(metadataRequest.allowAutoTopicCreation, authorizedTopics, request.context.listenerName,
errorUnavailableEndpoints)
val completeTopicMetadata = topicMetadata ++ unauthorizedForCreateTopicMetadata ++ unauthorizedForDescribeTopicMetadata
val brokers = metadataCache.getAliveBrokers
trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(completeTopicMetadata.mkString(","),
brokers.mkString(","), request.header.correlationId, request.header.clientId))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new MetadataResponse(
requestThrottleMs,
brokers.flatMap(_.getNode(request.context.listenerName)).asJava,
clusterId,
metadataCache.getControllerId.getOrElse(MetadataResponse.NO_CONTROLLER_ID),
completeTopicMetadata.asJava
))
}
/**
* Handle an offset fetch request
*/
def handleOffsetFetchRequest(request: RequestChannel.Request) {
val header = request.header
val offsetFetchRequest = request.body[OffsetFetchRequest]
def authorizeTopicDescribe(partition: TopicPartition) =
authorize(request.session, Describe, new Resource(Topic, partition.topic))
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val offsetFetchResponse =
// reject the request if not authorized to the group
if (!authorize(request.session, Describe, new Resource(Group, offsetFetchRequest.groupId)))
offsetFetchRequest.getErrorResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED)
else {
if (header.apiVersion == 0) {
val (authorizedPartitions, unauthorizedPartitions) = offsetFetchRequest.partitions.asScala
.partition(authorizeTopicDescribe)
// version 0 reads offsets from ZK
val authorizedPartitionData = authorizedPartitions.map { topicPartition =>
try {
if (!metadataCache.contains(topicPartition))
(topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION)
else {
val payloadOpt = zkClient.getConsumerOffset(offsetFetchRequest.groupId, topicPartition)
payloadOpt match {
case Some(payload) =>
(topicPartition, new OffsetFetchResponse.PartitionData(
payload.toLong, OffsetFetchResponse.NO_METADATA, Errors.NONE))
case None =>
(topicPartition, OffsetFetchResponse.UNKNOWN_PARTITION)
}
}
} catch {
case e: Throwable =>
(topicPartition, new OffsetFetchResponse.PartitionData(
OffsetFetchResponse.INVALID_OFFSET, OffsetFetchResponse.NO_METADATA, Errors.forException(e)))
}
}.toMap
val unauthorizedPartitionData = unauthorizedPartitions.map(_ -> OffsetFetchResponse.UNAUTHORIZED_PARTITION).toMap
new OffsetFetchResponse(requestThrottleMs, Errors.NONE, (authorizedPartitionData ++ unauthorizedPartitionData).asJava)
} else {
// versions 1 and above read offsets from Kafka
if (offsetFetchRequest.isAllPartitions) {
val (error, allPartitionData) = groupCoordinator.handleFetchOffsets(offsetFetchRequest.groupId)
if (error != Errors.NONE)
offsetFetchRequest.getErrorResponse(requestThrottleMs, error)
else {
// clients are not allowed to see offsets for topics that are not authorized for Describe
val authorizedPartitionData = allPartitionData.filter { case (topicPartition, _) => authorizeTopicDescribe(topicPartition) }
new OffsetFetchResponse(requestThrottleMs, Errors.NONE, authorizedPartitionData.asJava)
}
} else {
val (authorizedPartitions, unauthorizedPartitions) = offsetFetchRequest.partitions.asScala
.partition(authorizeTopicDescribe)
val (error, authorizedPartitionData) = groupCoordinator.handleFetchOffsets(offsetFetchRequest.groupId,
Some(authorizedPartitions))
if (error != Errors.NONE)
offsetFetchRequest.getErrorResponse(requestThrottleMs, error)
else {
val unauthorizedPartitionData = unauthorizedPartitions.map(_ -> OffsetFetchResponse.UNAUTHORIZED_PARTITION).toMap
new OffsetFetchResponse(requestThrottleMs, Errors.NONE, (authorizedPartitionData ++ unauthorizedPartitionData).asJava)
}
}
}
}
trace(s"Sending offset fetch response $offsetFetchResponse for correlation id ${header.correlationId} to client ${header.clientId}.")
offsetFetchResponse
}
sendResponseMaybeThrottle(request, createResponse)
}
def handleFindCoordinatorRequest(request: RequestChannel.Request) {
val findCoordinatorRequest = request.body[FindCoordinatorRequest]
if (findCoordinatorRequest.coordinatorType == FindCoordinatorRequest.CoordinatorType.GROUP &&
!authorize(request.session, Describe, new Resource(Group, findCoordinatorRequest.coordinatorKey)))
sendErrorResponseMaybeThrottle(request, Errors.GROUP_AUTHORIZATION_FAILED.exception)
else if (findCoordinatorRequest.coordinatorType == FindCoordinatorRequest.CoordinatorType.TRANSACTION &&
!authorize(request.session, Describe, new Resource(TransactionalId, findCoordinatorRequest.coordinatorKey)))
sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
else {
// get metadata (and create the topic if necessary)
val (partition, topicMetadata) = findCoordinatorRequest.coordinatorType match {
case FindCoordinatorRequest.CoordinatorType.GROUP =>
val partition = groupCoordinator.partitionFor(findCoordinatorRequest.coordinatorKey)
val metadata = getOrCreateInternalTopic(GROUP_METADATA_TOPIC_NAME, request.context.listenerName)
(partition, metadata)
case FindCoordinatorRequest.CoordinatorType.TRANSACTION =>
val partition = txnCoordinator.partitionFor(findCoordinatorRequest.coordinatorKey)
val metadata = getOrCreateInternalTopic(TRANSACTION_STATE_TOPIC_NAME, request.context.listenerName)
(partition, metadata)
case _ =>
throw new InvalidRequestException("Unknown coordinator type in FindCoordinator request")
}
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = if (topicMetadata.error != Errors.NONE) {
new FindCoordinatorResponse(requestThrottleMs, Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode)
} else {
val coordinatorEndpoint = topicMetadata.partitionMetadata.asScala
.find(_.partition == partition)
.map(_.leader)
.flatMap(p => Option(p))
coordinatorEndpoint match {
case Some(endpoint) if !endpoint.isEmpty =>
new FindCoordinatorResponse(requestThrottleMs, Errors.NONE, endpoint)
case _ =>
new FindCoordinatorResponse(requestThrottleMs, Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode)
}
}
trace("Sending FindCoordinator response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
}
def handleDescribeGroupRequest(request: RequestChannel.Request) {
val describeRequest = request.body[DescribeGroupsRequest]
val groups = describeRequest.groupIds.asScala.map { groupId =>
if (!authorize(request.session, Describe, new Resource(Group, groupId))) {
groupId -> DescribeGroupsResponse.GroupMetadata.forError(Errors.GROUP_AUTHORIZATION_FAILED)
} else {
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
val members = summary.members.map { member =>
val metadata = ByteBuffer.wrap(member.metadata)
val assignment = ByteBuffer.wrap(member.assignment)
new DescribeGroupsResponse.GroupMember(member.memberId, member.clientId, member.clientHost, metadata, assignment)
}
groupId -> new DescribeGroupsResponse.GroupMetadata(error, summary.state, summary.protocolType,
summary.protocol, members.asJava)
}
}.toMap
sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeGroupsResponse(requestThrottleMs, groups.asJava))
}
def handleListGroupsRequest(request: RequestChannel.Request) {
if (!authorize(request.session, Describe, Resource.ClusterResource)) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
request.body[ListGroupsRequest].getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception))
} else {
val (error, groups) = groupCoordinator.handleListGroups()
val allGroups = groups.map { group => new ListGroupsResponse.Group(group.groupId, group.protocolType) }
sendResponseMaybeThrottle(request, requestThrottleMs =>
new ListGroupsResponse(requestThrottleMs, error, allGroups.asJava))
}
}
def handleJoinGroupRequest(request: RequestChannel.Request) {
val joinGroupRequest = request.body[JoinGroupRequest]
// the callback for sending a join-group response
def sendResponseCallback(joinResult: JoinGroupResult) {
val members = joinResult.members map { case (memberId, metadataArray) => (memberId, ByteBuffer.wrap(metadataArray)) }
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new JoinGroupResponse(requestThrottleMs, joinResult.error, joinResult.generationId,
joinResult.subProtocol, joinResult.memberId, joinResult.leaderId, members.asJava)
trace("Sending join group response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!authorize(request.session, Read, new Resource(Group, joinGroupRequest.groupId()))) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new JoinGroupResponse(
requestThrottleMs,
Errors.GROUP_AUTHORIZATION_FAILED,
JoinGroupResponse.UNKNOWN_GENERATION_ID,
JoinGroupResponse.UNKNOWN_PROTOCOL,
JoinGroupResponse.UNKNOWN_MEMBER_ID, // memberId
JoinGroupResponse.UNKNOWN_MEMBER_ID, // leaderId
Collections.emptyMap())
)
} else {
// let the coordinator to handle join-group
val protocols = joinGroupRequest.groupProtocols().asScala.map(protocol =>
(protocol.name, Utils.toArray(protocol.metadata))).toList
groupCoordinator.handleJoinGroup(
joinGroupRequest.groupId,
joinGroupRequest.memberId,
request.header.clientId,
request.session.clientAddress.toString,
joinGroupRequest.rebalanceTimeout,
joinGroupRequest.sessionTimeout,
joinGroupRequest.protocolType,
protocols,
sendResponseCallback)
}
}
def handleSyncGroupRequest(request: RequestChannel.Request) {
val syncGroupRequest = request.body[SyncGroupRequest]
def sendResponseCallback(memberState: Array[Byte], error: Errors) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new SyncGroupResponse(requestThrottleMs, error, ByteBuffer.wrap(memberState)))
}
if (!authorize(request.session, Read, new Resource(Group, syncGroupRequest.groupId()))) {
sendResponseCallback(Array[Byte](), Errors.GROUP_AUTHORIZATION_FAILED)
} else {
groupCoordinator.handleSyncGroup(
syncGroupRequest.groupId,
syncGroupRequest.generationId,
syncGroupRequest.memberId,
syncGroupRequest.groupAssignment().asScala.mapValues(Utils.toArray),
sendResponseCallback
)
}
}
def handleDeleteGroupsRequest(request: RequestChannel.Request): Unit = {
val deleteGroupsRequest = request.body[DeleteGroupsRequest]
var groups = deleteGroupsRequest.groups.asScala.toSet
val (authorizedGroups, unauthorizedGroups) = groups.partition { group =>
authorize(request.session, Delete, new Resource(Group, group))
}
val groupDeletionResult = groupCoordinator.handleDeleteGroups(authorizedGroups) ++
unauthorizedGroups.map(_ -> Errors.GROUP_AUTHORIZATION_FAILED)
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DeleteGroupsResponse(requestThrottleMs, groupDeletionResult.asJava))
}
def handleHeartbeatRequest(request: RequestChannel.Request) {
val heartbeatRequest = request.body[HeartbeatRequest]
// the callback for sending a heartbeat response
def sendResponseCallback(error: Errors) {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val response = new HeartbeatResponse(requestThrottleMs, error)
trace("Sending heartbeat response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
response
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!authorize(request.session, Read, new Resource(Group, heartbeatRequest.groupId))) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new HeartbeatResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED))
} else {
// let the coordinator to handle heartbeat
groupCoordinator.handleHeartbeat(
heartbeatRequest.groupId,
heartbeatRequest.memberId,
heartbeatRequest.groupGenerationId,
sendResponseCallback)
}
}
def handleLeaveGroupRequest(request: RequestChannel.Request) {
val leaveGroupRequest = request.body[LeaveGroupRequest]
// the callback for sending a leave-group response
def sendResponseCallback(error: Errors) {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val response = new LeaveGroupResponse(requestThrottleMs, error)
trace("Sending leave group response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
response
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!authorize(request.session, Read, new Resource(Group, leaveGroupRequest.groupId))) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new LeaveGroupResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED))
} else {
// let the coordinator to handle leave-group
groupCoordinator.handleLeaveGroup(
leaveGroupRequest.groupId,
leaveGroupRequest.memberId,
sendResponseCallback)
}
}
def handleSaslHandshakeRequest(request: RequestChannel.Request) {
sendResponseMaybeThrottle(request, _ => new SaslHandshakeResponse(Errors.ILLEGAL_SASL_STATE, Collections.emptySet()))
}
def handleSaslAuthenticateRequest(request: RequestChannel.Request) {
sendResponseMaybeThrottle(request, _ => new SaslAuthenticateResponse(Errors.ILLEGAL_SASL_STATE,
"SaslAuthenticate request received after successful authentication"))
}
def handleApiVersionsRequest(request: RequestChannel.Request) {
// Note that broker returns its full list of supported ApiKeys and versions regardless of current
// authentication state (e.g., before SASL authentication on an SASL listener, do note that no
// Kafka protocol requests may take place on a SSL listener before the SSL handshake is finished).
// If this is considered to leak information about the broker version a workaround is to use SSL
// with client authentication which is performed at an earlier stage of the connection where the
// ApiVersionRequest is not available.
def createResponseCallback(requestThrottleMs: Int): ApiVersionsResponse = {
val apiVersionRequest = request.body[ApiVersionsRequest]
if (apiVersionRequest.hasUnsupportedRequestVersion)
apiVersionRequest.getErrorResponse(requestThrottleMs, Errors.UNSUPPORTED_VERSION.exception)
else
ApiVersionsResponse.apiVersionsResponse(requestThrottleMs,
config.interBrokerProtocolVersion.messageFormatVersion.value)
}
sendResponseMaybeThrottle(request, createResponseCallback)
}
def handleCreateTopicsRequest(request: RequestChannel.Request) {
val createTopicsRequest = request.body[CreateTopicsRequest]
def sendResponseCallback(results: Map[String, ApiError]): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new CreateTopicsResponse(requestThrottleMs, results.asJava)
trace(s"Sending create topics response $responseBody for correlation id ${request.header.correlationId} to client ${request.header.clientId}.")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!controller.isActive) {
val results = createTopicsRequest.topics.asScala.map { case (topic, _) =>
(topic, new ApiError(Errors.NOT_CONTROLLER, null))
}
sendResponseCallback(results)
} else if (!authorize(request.session, Create, Resource.ClusterResource)) {
val results = createTopicsRequest.topics.asScala.map { case (topic, _) =>
(topic, new ApiError(Errors.CLUSTER_AUTHORIZATION_FAILED, null))
}
sendResponseCallback(results)
} else {
val (validTopics, duplicateTopics) = createTopicsRequest.topics.asScala.partition { case (topic, _) =>
!createTopicsRequest.duplicateTopics.contains(topic)
}
// Special handling to add duplicate topics to the response
def sendResponseWithDuplicatesCallback(results: Map[String, ApiError]): Unit = {
val duplicatedTopicsResults =
if (duplicateTopics.nonEmpty) {
val errorMessage = s"Create topics request from client `${request.header.clientId}` contains multiple entries " +
s"for the following topics: ${duplicateTopics.keySet.mkString(",")}"
// We can send the error message in the response for version 1, so we don't have to log it any more
if (request.header.apiVersion == 0)
warn(errorMessage)
duplicateTopics.keySet.map((_, new ApiError(Errors.INVALID_REQUEST, errorMessage))).toMap
} else Map.empty
val completeResults = results ++ duplicatedTopicsResults
sendResponseCallback(completeResults)
}
adminManager.createTopics(
createTopicsRequest.timeout,
createTopicsRequest.validateOnly,
validTopics,
sendResponseWithDuplicatesCallback
)
}
}
def handleCreatePartitionsRequest(request: RequestChannel.Request): Unit = {
val createPartitionsRequest = request.body[CreatePartitionsRequest]
def sendResponseCallback(results: Map[String, ApiError]): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new CreatePartitionsResponse(requestThrottleMs, results.asJava)
trace(s"Sending create partitions response $responseBody for correlation id ${request.header.correlationId} to " +
s"client ${request.header.clientId}.")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!controller.isActive) {
val result = createPartitionsRequest.newPartitions.asScala.map { case (topic, _) =>
(topic, new ApiError(Errors.NOT_CONTROLLER, null))
}
sendResponseCallback(result)
} else {
// Special handling to add duplicate topics to the response
val dupes = createPartitionsRequest.duplicates.asScala
val notDuped = createPartitionsRequest.newPartitions.asScala -- dupes
val (authorized, unauthorized) = notDuped.partition { case (topic, _) =>
authorize(request.session, Alter, new Resource(Topic, topic))
}
val (queuedForDeletion, valid) = authorized.partition { case (topic, _) =>
controller.topicDeletionManager.isTopicQueuedUpForDeletion(topic)
}
val errors = dupes.map(_ -> new ApiError(Errors.INVALID_REQUEST, "Duplicate topic in request.")) ++
unauthorized.keySet.map(_ -> new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED, "The topic authorization is failed.")) ++
queuedForDeletion.keySet.map(_ -> new ApiError(Errors.INVALID_TOPIC_EXCEPTION, "The topic is queued for deletion."))
adminManager.createPartitions(createPartitionsRequest.timeout, valid, createPartitionsRequest.validateOnly,
request.context.listenerName, result => sendResponseCallback(result ++ errors))
}
}
def handleDeleteTopicsRequest(request: RequestChannel.Request) {
val deleteTopicRequest = request.body[DeleteTopicsRequest]
val unauthorizedTopicErrors = mutable.Map[String, Errors]()
val nonExistingTopicErrors = mutable.Map[String, Errors]()
val authorizedForDeleteTopics = mutable.Set[String]()
for (topic <- deleteTopicRequest.topics.asScala) {
if (!authorize(request.session, Delete, new Resource(Topic, topic)))
unauthorizedTopicErrors += topic -> Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(topic))
nonExistingTopicErrors += topic -> Errors.UNKNOWN_TOPIC_OR_PARTITION
else
authorizedForDeleteTopics.add(topic)
}
def sendResponseCallback(authorizedTopicErrors: Map[String, Errors]): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val completeResults = unauthorizedTopicErrors ++ nonExistingTopicErrors ++ authorizedTopicErrors
val responseBody = new DeleteTopicsResponse(requestThrottleMs, completeResults.asJava)
trace(s"Sending delete topics response $responseBody for correlation id ${request.header.correlationId} to client ${request.header.clientId}.")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
if (!controller.isActive) {
val results = deleteTopicRequest.topics.asScala.map { topic =>
(topic, Errors.NOT_CONTROLLER)
}.toMap
sendResponseCallback(results)
} else {
// If no authorized topics return immediately
if (authorizedForDeleteTopics.isEmpty)
sendResponseCallback(Map())
else {
adminManager.deleteTopics(
deleteTopicRequest.timeout.toInt,
authorizedForDeleteTopics,
sendResponseCallback
)
}
}
}
def handleDeleteRecordsRequest(request: RequestChannel.Request) {
val deleteRecordsRequest = request.body[DeleteRecordsRequest]
val unauthorizedTopicResponses = mutable.Map[TopicPartition, DeleteRecordsResponse.PartitionResponse]()
val nonExistingTopicResponses = mutable.Map[TopicPartition, DeleteRecordsResponse.PartitionResponse]()
val authorizedForDeleteTopicOffsets = mutable.Map[TopicPartition, Long]()
for ((topicPartition, offset) <- deleteRecordsRequest.partitionOffsets.asScala) {
if (!authorize(request.session, Delete, new Resource(Topic, topicPartition.topic)))
unauthorizedTopicResponses += topicPartition -> new DeleteRecordsResponse.PartitionResponse(
DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.TOPIC_AUTHORIZATION_FAILED)
else if (!metadataCache.contains(topicPartition))
nonExistingTopicResponses += topicPartition -> new DeleteRecordsResponse.PartitionResponse(
DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION)
else
authorizedForDeleteTopicOffsets += (topicPartition -> offset)
}
// the callback for sending a DeleteRecordsResponse
def sendResponseCallback(authorizedTopicResponses: Map[TopicPartition, DeleteRecordsResponse.PartitionResponse]) {
val mergedResponseStatus = authorizedTopicResponses ++ unauthorizedTopicResponses ++ nonExistingTopicResponses
mergedResponseStatus.foreach { case (topicPartition, status) =>
if (status.error != Errors.NONE) {
debug("DeleteRecordsRequest with correlation id %d from client %s on partition %s failed due to %s".format(
request.header.correlationId,
request.header.clientId,
topicPartition,
status.error.exceptionName))
}
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DeleteRecordsResponse(requestThrottleMs, mergedResponseStatus.asJava))
}
if (authorizedForDeleteTopicOffsets.isEmpty)
sendResponseCallback(Map.empty)
else {
// call the replica manager to append messages to the replicas
replicaManager.deleteRecords(
deleteRecordsRequest.timeout.toLong,
authorizedForDeleteTopicOffsets,
sendResponseCallback)
}
}
def handleInitProducerIdRequest(request: RequestChannel.Request): Unit = {
val initProducerIdRequest = request.body[InitProducerIdRequest]
val transactionalId = initProducerIdRequest.transactionalId
if (transactionalId != null) {
if (!authorize(request.session, Write, new Resource(TransactionalId, transactionalId))) {
sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
return
}
} else if (!authorize(request.session, IdempotentWrite, Resource.ClusterResource)) {
sendErrorResponseMaybeThrottle(request, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)
return
}
def sendResponseCallback(result: InitProducerIdResult): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new InitProducerIdResponse(requestThrottleMs, result.error, result.producerId, result.producerEpoch)
trace(s"Completed $transactionalId's InitProducerIdRequest with result $result from client ${request.header.clientId}.")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleInitProducerId(transactionalId, initProducerIdRequest.transactionTimeoutMs, sendResponseCallback)
}
def handleEndTxnRequest(request: RequestChannel.Request): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val endTxnRequest = request.body[EndTxnRequest]
val transactionalId = endTxnRequest.transactionalId
if (authorize(request.session, Write, new Resource(TransactionalId, transactionalId))) {
def sendResponseCallback(error: Errors) {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody = new EndTxnResponse(requestThrottleMs, error)
trace(s"Completed ${endTxnRequest.transactionalId}'s EndTxnRequest with command: ${endTxnRequest.command}, errors: $error from client ${request.header.clientId}.")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleEndTransaction(endTxnRequest.transactionalId,
endTxnRequest.producerId,
endTxnRequest.producerEpoch,
endTxnRequest.command,
sendResponseCallback)
} else
sendResponseMaybeThrottle(request, requestThrottleMs =>
new EndTxnResponse(requestThrottleMs, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED))
}
def handleWriteTxnMarkersRequest(request: RequestChannel.Request): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
authorizeClusterAction(request)
val writeTxnMarkersRequest = request.body[WriteTxnMarkersRequest]
val errors = new ConcurrentHashMap[java.lang.Long, util.Map[TopicPartition, Errors]]()
val markers = writeTxnMarkersRequest.markers
val numAppends = new AtomicInteger(markers.size)
if (numAppends.get == 0) {
sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
return
}
def updateErrors(producerId: Long, currentErrors: ConcurrentHashMap[TopicPartition, Errors]): Unit = {
val previousErrors = errors.putIfAbsent(producerId, currentErrors)
if (previousErrors != null)
previousErrors.putAll(currentErrors)
}
/**
* This is the call back invoked when a log append of transaction markers succeeds. This can be called multiple
* times when handling a single WriteTxnMarkersRequest because there is one append per TransactionMarker in the
* request, so there could be multiple appends of markers to the log. The final response will be sent only
* after all appends have returned.
*/
def maybeSendResponseCallback(producerId: Long, result: TransactionResult)(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = {
trace(s"End transaction marker append for producer id $producerId completed with status: $responseStatus")
val currentErrors = new ConcurrentHashMap[TopicPartition, Errors](responseStatus.mapValues(_.error).asJava)
updateErrors(producerId, currentErrors)
val successfulOffsetsPartitions = responseStatus.filter { case (topicPartition, partitionResponse) =>
topicPartition.topic == GROUP_METADATA_TOPIC_NAME && partitionResponse.error == Errors.NONE
}.keys
if (successfulOffsetsPartitions.nonEmpty) {
// as soon as the end transaction marker has been written for a transactional offset commit,
// call to the group coordinator to materialize the offsets into the cache
try {
groupCoordinator.handleTxnCompletion(producerId, successfulOffsetsPartitions, result)
} catch {
case e: Exception =>
error(s"Received an exception while trying to update the offsets cache on transaction marker append", e)
val updatedErrors = new ConcurrentHashMap[TopicPartition, Errors]()
successfulOffsetsPartitions.foreach(updatedErrors.put(_, Errors.UNKNOWN_SERVER_ERROR))
updateErrors(producerId, updatedErrors)
}
}
if (numAppends.decrementAndGet() == 0)
sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
}
// TODO: The current append API makes doing separate writes per producerId a little easier, but it would
// be nice to have only one append to the log. This requires pushing the building of the control records
// into Log so that we only append those having a valid producer epoch, and exposing a new appendControlRecord
// API in ReplicaManager. For now, we've done the simpler approach
var skippedMarkers = 0
for (marker <- markers.asScala) {
val producerId = marker.producerId
val partitionsWithCompatibleMessageFormat = new mutable.ArrayBuffer[TopicPartition]
val currentErrors = new ConcurrentHashMap[TopicPartition, Errors]()
marker.partitions.asScala.foreach { partition =>
replicaManager.getMagic(partition) match {
case Some(magic) =>
if (magic < RecordBatch.MAGIC_VALUE_V2)
currentErrors.put(partition, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT)
else
partitionsWithCompatibleMessageFormat += partition
case None =>
currentErrors.put(partition, Errors.UNKNOWN_TOPIC_OR_PARTITION)
}
}
if (!currentErrors.isEmpty)
updateErrors(producerId, currentErrors)
if (partitionsWithCompatibleMessageFormat.isEmpty) {
numAppends.decrementAndGet()
skippedMarkers += 1
} else {
val controlRecords = partitionsWithCompatibleMessageFormat.map { partition =>
val controlRecordType = marker.transactionResult match {
case TransactionResult.COMMIT => ControlRecordType.COMMIT
case TransactionResult.ABORT => ControlRecordType.ABORT
}
val endTxnMarker = new EndTransactionMarker(controlRecordType, marker.coordinatorEpoch)
partition -> MemoryRecords.withEndTransactionMarker(producerId, marker.producerEpoch, endTxnMarker)
}.toMap
replicaManager.appendRecords(
timeout = config.requestTimeoutMs.toLong,
requiredAcks = -1,
internalTopicsAllowed = true,
isFromClient = false,
entriesPerPartition = controlRecords,
responseCallback = maybeSendResponseCallback(producerId, marker.transactionResult))
}
}
// No log appends were written as all partitions had incorrect log format
// so we need to send the error response
if (skippedMarkers == markers.size())
sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors))
}
def ensureInterBrokerVersion(version: ApiVersion): Unit = {
if (config.interBrokerProtocolVersion < version)
throw new UnsupportedVersionException(s"inter.broker.protocol.version: ${config.interBrokerProtocolVersion.version} is less than the required version: ${version.version}")
}
def handleAddPartitionToTxnRequest(request: RequestChannel.Request): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val addPartitionsToTxnRequest = request.body[AddPartitionsToTxnRequest]
val transactionalId = addPartitionsToTxnRequest.transactionalId
val partitionsToAdd = addPartitionsToTxnRequest.partitions.asScala
if (!authorize(request.session, Write, new Resource(TransactionalId, transactionalId)))
sendResponseMaybeThrottle(request, requestThrottleMs =>
addPartitionsToTxnRequest.getErrorResponse(requestThrottleMs, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception))
else {
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
val authorizedPartitions = mutable.Set[TopicPartition]()
for (topicPartition <- partitionsToAdd) {
if (org.apache.kafka.common.internals.Topic.isInternal(topicPartition.topic) ||
!authorize(request.session, Write, new Resource(Topic, topicPartition.topic)))
unauthorizedTopicErrors += topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION
else
authorizedPartitions.add(topicPartition)
}
if (unauthorizedTopicErrors.nonEmpty || nonExistingTopicErrors.nonEmpty) {
// Any failed partition check causes the entire request to fail. We send the appropriate error codes for the
// partitions which failed, and an 'OPERATION_NOT_ATTEMPTED' error code for the partitions which succeeded
// the authorization check to indicate that they were not added to the transaction.
val partitionErrors = unauthorizedTopicErrors ++ nonExistingTopicErrors ++
authorizedPartitions.map(_ -> Errors.OPERATION_NOT_ATTEMPTED)
sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddPartitionsToTxnResponse(requestThrottleMs, partitionErrors.asJava))
} else {
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody: AddPartitionsToTxnResponse = new AddPartitionsToTxnResponse(requestThrottleMs,
partitionsToAdd.map{tp => (tp, error)}.toMap.asJava)
trace(s"Completed $transactionalId's AddPartitionsToTxnRequest with partitions $partitionsToAdd: errors: $error from client ${request.header.clientId}")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleAddPartitionsToTransaction(transactionalId,
addPartitionsToTxnRequest.producerId,
addPartitionsToTxnRequest.producerEpoch,
authorizedPartitions,
sendResponseCallback)
}
}
}
def handleAddOffsetsToTxnRequest(request: RequestChannel.Request): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest]
val transactionalId = addOffsetsToTxnRequest.transactionalId
val groupId = addOffsetsToTxnRequest.consumerGroupId
val offsetTopicPartition = new TopicPartition(GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId))
if (!authorize(request.session, Write, new Resource(TransactionalId, transactionalId)))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddOffsetsToTxnResponse(requestThrottleMs, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED))
else if (!authorize(request.session, Read, new Resource(Group, groupId)))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new AddOffsetsToTxnResponse(requestThrottleMs, Errors.GROUP_AUTHORIZATION_FAILED))
else {
def sendResponseCallback(error: Errors): Unit = {
def createResponse(requestThrottleMs: Int): AbstractResponse = {
val responseBody: AddOffsetsToTxnResponse = new AddOffsetsToTxnResponse(requestThrottleMs, error)
trace(s"Completed $transactionalId's AddOffsetsToTxnRequest for group $groupId on partition " +
s"$offsetTopicPartition: errors: $error from client ${request.header.clientId}")
responseBody
}
sendResponseMaybeThrottle(request, createResponse)
}
txnCoordinator.handleAddPartitionsToTransaction(transactionalId,
addOffsetsToTxnRequest.producerId,
addOffsetsToTxnRequest.producerEpoch,
Set(offsetTopicPartition),
sendResponseCallback)
}
}
def handleTxnOffsetCommitRequest(request: RequestChannel.Request): Unit = {
ensureInterBrokerVersion(KAFKA_0_11_0_IV0)
val header = request.header
val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest]
// authorize for the transactionalId and the consumer group. Note that we skip producerId authorization
// since it is implied by transactionalId authorization
if (!authorize(request.session, Write, new Resource(TransactionalId, txnOffsetCommitRequest.transactionalId)))
sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception)
else if (!authorize(request.session, Read, new Resource(Group, txnOffsetCommitRequest.consumerGroupId)))
sendErrorResponseMaybeThrottle(request, Errors.GROUP_AUTHORIZATION_FAILED.exception)
else {
val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]()
val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]()
val authorizedTopicCommittedOffsets = mutable.Map[TopicPartition, TxnOffsetCommitRequest.CommittedOffset]()
for ((topicPartition, commitedOffset) <- txnOffsetCommitRequest.offsets.asScala) {
if (!authorize(request.session, Read, new Resource(Topic, topicPartition.topic)))
unauthorizedTopicErrors += topicPartition -> Errors.TOPIC_AUTHORIZATION_FAILED
else if (!metadataCache.contains(topicPartition))
nonExistingTopicErrors += topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION
else
authorizedTopicCommittedOffsets += (topicPartition -> commitedOffset)
}
// the callback for sending an offset commit response
def sendResponseCallback(authorizedTopicErrors: Map[TopicPartition, Errors]) {
val combinedCommitStatus = authorizedTopicErrors ++ unauthorizedTopicErrors ++ nonExistingTopicErrors
if (isDebugEnabled)
combinedCommitStatus.foreach { case (topicPartition, error) =>
if (error != Errors.NONE) {
debug(s"TxnOffsetCommit with correlation id ${header.correlationId} from client ${header.clientId} " +
s"on partition $topicPartition failed due to ${error.exceptionName}")
}
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new TxnOffsetCommitResponse(requestThrottleMs, combinedCommitStatus.asJava))
}
if (authorizedTopicCommittedOffsets.isEmpty)
sendResponseCallback(Map.empty)
else {
val offsetMetadata = convertTxnOffsets(authorizedTopicCommittedOffsets.toMap)
groupCoordinator.handleTxnCommitOffsets(
txnOffsetCommitRequest.consumerGroupId,
txnOffsetCommitRequest.producerId,
txnOffsetCommitRequest.producerEpoch,
offsetMetadata,
sendResponseCallback)
}
}
}
private def convertTxnOffsets(offsetsMap: immutable.Map[TopicPartition, TxnOffsetCommitRequest.CommittedOffset]): immutable.Map[TopicPartition, OffsetAndMetadata] = {
val offsetRetention = groupCoordinator.offsetConfig.offsetsRetentionMs
val currentTimestamp = time.milliseconds
val defaultExpireTimestamp = offsetRetention + currentTimestamp
offsetsMap.map { case (topicPartition, partitionData) =>
val metadata = if (partitionData.metadata == null) OffsetMetadata.NoMetadata else partitionData.metadata
topicPartition -> new OffsetAndMetadata(
offsetMetadata = OffsetMetadata(partitionData.offset, metadata),
commitTimestamp = currentTimestamp,
expireTimestamp = defaultExpireTimestamp)
}
}
def handleDescribeAcls(request: RequestChannel.Request): Unit = {
authorizeClusterDescribe(request)
val describeAclsRequest = request.body[DescribeAclsRequest]
authorizer match {
case None =>
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeAclsResponse(requestThrottleMs,
new ApiError(Errors.SECURITY_DISABLED, "No Authorizer is configured on the broker"), Collections.emptySet()))
case Some(auth) =>
val filter = describeAclsRequest.filter()
val returnedAcls = auth.getAcls.toSeq.flatMap { case (resource, acls) =>
acls.flatMap { acl =>
val fixture = new AclBinding(new AdminResource(resource.resourceType.toJava, resource.name),
new AccessControlEntry(acl.principal.toString, acl.host.toString, acl.operation.toJava, acl.permissionType.toJava))
if (filter.matches(fixture)) Some(fixture)
else None
}
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeAclsResponse(requestThrottleMs, ApiError.NONE, returnedAcls.asJava))
}
}
def handleCreateAcls(request: RequestChannel.Request): Unit = {
authorizeClusterAlter(request)
val createAclsRequest = request.body[CreateAclsRequest]
authorizer match {
case None =>
sendResponseMaybeThrottle(request, requestThrottleMs =>
createAclsRequest.getErrorResponse(requestThrottleMs,
new SecurityDisabledException("No Authorizer is configured on the broker.")))
case Some(auth) =>
val aclCreationResults = createAclsRequest.aclCreations.asScala.map { aclCreation =>
SecurityUtils.convertToResourceAndAcl(aclCreation.acl.toFilter) match {
case Left(apiError) => new AclCreationResponse(apiError)
case Right((resource, acl)) => try {
if (resource.resourceType.equals(Cluster) &&
!resource.name.equals(Resource.ClusterResourceName))
throw new InvalidRequestException("The only valid name for the CLUSTER resource is " +
Resource.ClusterResourceName)
if (resource.name.isEmpty)
throw new InvalidRequestException("Invalid empty resource name")
auth.addAcls(immutable.Set(acl), resource)
debug(s"Added acl $acl to $resource")
new AclCreationResponse(ApiError.NONE)
} catch {
case throwable: Throwable =>
debug(s"Failed to add acl $acl to $resource", throwable)
new AclCreationResponse(ApiError.fromThrowable(throwable))
}
}
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new CreateAclsResponse(requestThrottleMs, aclCreationResults.asJava))
}
}
def handleDeleteAcls(request: RequestChannel.Request): Unit = {
authorizeClusterAlter(request)
val deleteAclsRequest = request.body[DeleteAclsRequest]
authorizer match {
case None =>
sendResponseMaybeThrottle(request, requestThrottleMs =>
deleteAclsRequest.getErrorResponse(requestThrottleMs,
new SecurityDisabledException("No Authorizer is configured on the broker.")))
case Some(auth) =>
val filters = deleteAclsRequest.filters.asScala
val filterResponseMap = mutable.Map[Int, AclFilterResponse]()
val toDelete = mutable.Map[Int, ArrayBuffer[(Resource, Acl)]]()
if (filters.forall(_.matchesAtMostOne)) {
// Delete based on a list of ACL fixtures.
for ((filter, i) <- filters.zipWithIndex) {
SecurityUtils.convertToResourceAndAcl(filter) match {
case Left(apiError) => filterResponseMap.put(i, new AclFilterResponse(apiError, Seq.empty.asJava))
case Right(binding) => toDelete.put(i, ArrayBuffer(binding))
}
}
} else {
// Delete based on filters that may match more than one ACL.
val aclMap = auth.getAcls()
val filtersWithIndex = filters.zipWithIndex
for ((resource, acls) <- aclMap; acl <- acls) {
val binding = new AclBinding(
new AdminResource(resource.resourceType.toJava, resource.name),
new AccessControlEntry(acl.principal.toString, acl.host.toString, acl.operation.toJava,
acl.permissionType.toJava))
for ((filter, i) <- filtersWithIndex if filter.matches(binding))
toDelete.getOrElseUpdate(i, ArrayBuffer.empty) += ((resource, acl))
}
}
for ((i, acls) <- toDelete) {
val deletionResults = acls.flatMap { case (resource, acl) =>
val aclBinding = SecurityUtils.convertToAclBinding(resource, acl)
try {
if (auth.removeAcls(immutable.Set(acl), resource))
Some(new AclDeletionResult(aclBinding))
else None
} catch {
case throwable: Throwable =>
Some(new AclDeletionResult(ApiError.fromThrowable(throwable), aclBinding))
}
}.asJava
filterResponseMap.put(i, new AclFilterResponse(deletionResults))
}
val filterResponses = filters.indices.map { i =>
filterResponseMap.getOrElse(i, new AclFilterResponse(Seq.empty.asJava))
}.asJava
sendResponseMaybeThrottle(request, requestThrottleMs => new DeleteAclsResponse(requestThrottleMs, filterResponses))
}
}
def handleOffsetForLeaderEpochRequest(request: RequestChannel.Request): Unit = {
val offsetForLeaderEpoch = request.body[OffsetsForLeaderEpochRequest]
val requestInfo = offsetForLeaderEpoch.epochsByTopicPartition()
authorizeClusterAction(request)
val lastOffsetForLeaderEpoch = replicaManager.lastOffsetForLeaderEpoch(requestInfo.asScala).asJava
sendResponseExemptThrottle(request, new OffsetsForLeaderEpochResponse(lastOffsetForLeaderEpoch))
}
def handleAlterConfigsRequest(request: RequestChannel.Request): Unit = {
val alterConfigsRequest = request.body[AlterConfigsRequest]
val (authorizedResources, unauthorizedResources) = alterConfigsRequest.configs.asScala.partition { case (resource, _) =>
resource.`type` match {
case RResourceType.BROKER =>
authorize(request.session, AlterConfigs, Resource.ClusterResource)
case RResourceType.TOPIC =>
authorize(request.session, AlterConfigs, new Resource(Topic, resource.name))
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt")
}
}
val authorizedResult = adminManager.alterConfigs(authorizedResources, alterConfigsRequest.validateOnly)
val unauthorizedResult = unauthorizedResources.keys.map { resource =>
resource -> configsAuthorizationApiError(request.session, resource)
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new AlterConfigsResponse(requestThrottleMs, (authorizedResult ++ unauthorizedResult).asJava))
}
private def configsAuthorizationApiError(session: RequestChannel.Session, resource: RResource): ApiError = {
val error = resource.`type` match {
case RResourceType.BROKER => Errors.CLUSTER_AUTHORIZATION_FAILED
case RResourceType.TOPIC => Errors.TOPIC_AUTHORIZATION_FAILED
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.name}")
}
new ApiError(error, null)
}
def handleDescribeConfigsRequest(request: RequestChannel.Request): Unit = {
val describeConfigsRequest = request.body[DescribeConfigsRequest]
val (authorizedResources, unauthorizedResources) = describeConfigsRequest.resources.asScala.partition { resource =>
resource.`type` match {
case RResourceType.BROKER => authorize(request.session, DescribeConfigs, Resource.ClusterResource)
case RResourceType.TOPIC =>
authorize(request.session, DescribeConfigs, new Resource(Topic, resource.name))
case rt => throw new InvalidRequestException(s"Unexpected resource type $rt for resource ${resource.name}")
}
}
val authorizedConfigs = adminManager.describeConfigs(authorizedResources.map { resource =>
resource -> Option(describeConfigsRequest.configNames(resource)).map(_.asScala.toSet)
}.toMap, describeConfigsRequest.includeSynonyms)
val unauthorizedConfigs = unauthorizedResources.map { resource =>
val error = configsAuthorizationApiError(request.session, resource)
resource -> new DescribeConfigsResponse.Config(error, Collections.emptyList[DescribeConfigsResponse.ConfigEntry])
}
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeConfigsResponse(requestThrottleMs, (authorizedConfigs ++ unauthorizedConfigs).asJava))
}
def handleAlterReplicaLogDirsRequest(request: RequestChannel.Request): Unit = {
val alterReplicaDirsRequest = request.body[AlterReplicaLogDirsRequest]
val responseMap = {
if (authorize(request.session, Alter, Resource.ClusterResource))
replicaManager.alterReplicaLogDirs(alterReplicaDirsRequest.partitionDirs.asScala)
else
alterReplicaDirsRequest.partitionDirs.asScala.keys.map((_, Errors.CLUSTER_AUTHORIZATION_FAILED)).toMap
}
sendResponseMaybeThrottle(request, requestThrottleMs => new AlterReplicaLogDirsResponse(requestThrottleMs, responseMap.asJava))
}
def handleDescribeLogDirsRequest(request: RequestChannel.Request): Unit = {
val describeLogDirsDirRequest = request.body[DescribeLogDirsRequest]
val logDirInfos = {
if (authorize(request.session, Describe, Resource.ClusterResource)) {
val partitions =
if (describeLogDirsDirRequest.isAllTopicPartitions)
replicaManager.logManager.allLogs.map(_.topicPartition).toSet
else
describeLogDirsDirRequest.topicPartitions().asScala
replicaManager.describeLogDirs(partitions)
} else {
Map.empty[String, LogDirInfo]
}
}
sendResponseMaybeThrottle(request, throttleTimeMs => new DescribeLogDirsResponse(throttleTimeMs, logDirInfos.asJava))
}
def handleCreateTokenRequest(request: RequestChannel.Request) {
val createTokenRequest = request.body[CreateDelegationTokenRequest]
// the callback for sending a create token response
def sendResponseCallback(createResult: CreateTokenResult) {
trace("Sending create token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new CreateDelegationTokenResponse(requestThrottleMs, createResult.error, request.session.principal, createResult.issueTimestamp,
createResult.expiryTimestamp, createResult.maxTimestamp, createResult.tokenId, ByteBuffer.wrap(createResult.hmac)))
}
if (!allowTokenRequests(request))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new CreateDelegationTokenResponse(requestThrottleMs, Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, request.session.principal))
else {
val renewerList = createTokenRequest.renewers().asScala.toList
if (renewerList.exists(principal => principal.getPrincipalType != KafkaPrincipal.USER_TYPE)) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new CreateDelegationTokenResponse(requestThrottleMs, Errors.INVALID_PRINCIPAL_TYPE, request.session.principal))
}
else {
tokenManager.createToken(
request.session.principal,
createTokenRequest.renewers().asScala.toList,
createTokenRequest.maxLifeTime(),
sendResponseCallback
)
}
}
}
def handleRenewTokenRequest(request: RequestChannel.Request) {
val renewTokenRequest = request.body[RenewDelegationTokenRequest]
// the callback for sending a renew token response
def sendResponseCallback(error: Errors, expiryTimestamp: Long) {
trace("Sending renew token response %s for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new RenewDelegationTokenResponse(requestThrottleMs, error, expiryTimestamp))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, DelegationTokenManager.ErrorTimestamp)
else {
tokenManager.renewToken(
request.session.principal,
renewTokenRequest.hmac,
renewTokenRequest.renewTimePeriod(),
sendResponseCallback
)
}
}
def handleExpireTokenRequest(request: RequestChannel.Request) {
val expireTokenRequest = request.body[ExpireDelegationTokenRequest]
// the callback for sending a expire token response
def sendResponseCallback(error: Errors, expiryTimestamp: Long) {
trace("Sending expire token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
sendResponseMaybeThrottle(request, requestThrottleMs =>
new ExpireDelegationTokenResponse(requestThrottleMs, error, expiryTimestamp))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, DelegationTokenManager.ErrorTimestamp)
else {
tokenManager.expireToken(
request.session.principal,
expireTokenRequest.hmac(),
expireTokenRequest.expiryTimePeriod(),
sendResponseCallback
)
}
}
def handleDescribeTokensRequest(request: RequestChannel.Request) {
val describeTokenRequest = request.body[DescribeDelegationTokenRequest]
// the callback for sending a describe token response
def sendResponseCallback(error: Errors, tokenDetails: List[DelegationToken]) {
sendResponseMaybeThrottle(request, requestThrottleMs =>
new DescribeDelegationTokenResponse(requestThrottleMs, error, tokenDetails.asJava))
trace("Sending describe token response for correlation id %d to client %s."
.format(request.header.correlationId, request.header.clientId))
}
if (!allowTokenRequests(request))
sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, List.empty)
else if (!config.tokenAuthEnabled)
sendResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, List.empty)
else {
val requestPrincipal = request.session.principal
if (describeTokenRequest.ownersListEmpty()) {
sendResponseCallback(Errors.NONE, List())
}
else {
val owners = if (describeTokenRequest.owners == null) None else Some(describeTokenRequest.owners.asScala.toList)
def authorizeToken(tokenId: String) = authorize(request.session, Describe, new Resource(kafka.security.auth.DelegationToken, tokenId))
def eligible(token: TokenInformation) = DelegationTokenManager.filterToken(requestPrincipal, owners, token, authorizeToken)
val tokens = tokenManager.getTokens(eligible)
sendResponseCallback(Errors.NONE, tokens)
}
}
}
def allowTokenRequests(request: RequestChannel.Request): Boolean = {
val protocol = request.context.securityProtocol
if (request.session.principal.tokenAuthenticated ||
protocol == SecurityProtocol.PLAINTEXT ||
// disallow requests from 1-way SSL
(protocol == SecurityProtocol.SSL && request.session.principal == KafkaPrincipal.ANONYMOUS))
false
else
true
}
def authorizeClusterAction(request: RequestChannel.Request): Unit = {
if (!authorize(request.session, ClusterAction, Resource.ClusterResource))
throw new ClusterAuthorizationException(s"Request $request is not authorized.")
}
def authorizeClusterAlter(request: RequestChannel.Request): Unit = {
if (!authorize(request.session, Alter, Resource.ClusterResource))
throw new ClusterAuthorizationException(s"Request $request is not authorized.")
}
def authorizeClusterDescribe(request: RequestChannel.Request): Unit = {
if (!authorize(request.session, Describe, Resource.ClusterResource))
throw new ClusterAuthorizationException(s"Request $request is not authorized.")
}
private def updateRecordsProcessingStats(request: RequestChannel.Request, tp: TopicPartition,
processingStats: RecordsProcessingStats): Unit = {
val conversionCount = processingStats.numRecordsConverted
if (conversionCount > 0) {
request.header.apiKey match {
case ApiKeys.PRODUCE =>
brokerTopicStats.topicStats(tp.topic).produceMessageConversionsRate.mark(conversionCount)
brokerTopicStats.allTopicsStats.produceMessageConversionsRate.mark(conversionCount)
case ApiKeys.FETCH =>
brokerTopicStats.topicStats(tp.topic).fetchMessageConversionsRate.mark(conversionCount)
brokerTopicStats.allTopicsStats.fetchMessageConversionsRate.mark(conversionCount)
case _ =>
throw new IllegalStateException("Message conversion info is recorded only for Produce/Fetch requests")
}
request.messageConversionsTimeNanos = processingStats.conversionTimeNanos
}
request.temporaryMemoryBytes = processingStats.temporaryMemoryBytes
}
private def handleError(request: RequestChannel.Request, e: Throwable) {
val mayThrottle = e.isInstanceOf[ClusterAuthorizationException] || !request.header.apiKey.clusterAction
error("Error when handling request %s".format(request.body[AbstractRequest]), e)
if (mayThrottle)
sendErrorResponseMaybeThrottle(request, e)
else
sendErrorResponseExemptThrottle(request, e)
}
private def sendResponseMaybeThrottle(request: RequestChannel.Request, createResponse: Int => AbstractResponse): Unit = {
quotas.request.maybeRecordAndThrottle(request,
throttleTimeMs => sendResponse(request, Some(createResponse(throttleTimeMs))))
}
private def sendErrorResponseMaybeThrottle(request: RequestChannel.Request, error: Throwable) {
quotas.request.maybeRecordAndThrottle(request, sendErrorOrCloseConnection(request, error))
}
private def sendResponseExemptThrottle(request: RequestChannel.Request, response: AbstractResponse): Unit = {
quotas.request.maybeRecordExempt(request)
sendResponse(request, Some(response))
}
private def sendErrorResponseExemptThrottle(request: RequestChannel.Request, error: Throwable): Unit = {
quotas.request.maybeRecordExempt(request)
sendErrorOrCloseConnection(request, error)(throttleMs = 0)
}
private def sendErrorOrCloseConnection(request: RequestChannel.Request, error: Throwable)(throttleMs: Int): Unit = {
val requestBody = request.body[AbstractRequest]
val response = requestBody.getErrorResponse(throttleMs, error)
if (response == null)
closeConnection(request, requestBody.errorCounts(error))
else
sendResponse(request, Some(response))
}
private def sendNoOpResponseExemptThrottle(request: RequestChannel.Request): Unit = {
quotas.request.maybeRecordExempt(request)
sendResponse(request, None)
}
private def closeConnection(request: RequestChannel.Request, errorCounts: java.util.Map[Errors, Integer]): Unit = {
// This case is used when the request handler has encountered an error, but the client
// does not expect a response (e.g. when produce request has acks set to 0)
requestChannel.updateErrorMetrics(request.header.apiKey, errorCounts.asScala)
requestChannel.sendResponse(new RequestChannel.Response(request, None, CloseConnectionAction, None))
}
private def sendResponse(request: RequestChannel.Request, responseOpt: Option[AbstractResponse]): Unit = {
// Update error metrics for each error code in the response including Errors.NONE
responseOpt.foreach(response => requestChannel.updateErrorMetrics(request.header.apiKey, response.errorCounts.asScala))
responseOpt match {
case Some(response) =>
val responseSend = request.context.buildResponse(response)
val responseString =
if (RequestChannel.isRequestLoggingEnabled) Some(response.toString(request.context.apiVersion))
else None
requestChannel.sendResponse(new RequestChannel.Response(request, Some(responseSend), SendAction, responseString))
case None =>
requestChannel.sendResponse(new RequestChannel.Response(request, None, NoOpAction, None))
}
}
}
| sebadiaz/kafka | core/src/main/scala/kafka/server/KafkaApis.scala | Scala | apache-2.0 | 114,083 |
package gitbucket.core.util
import java.io.{ByteArrayOutputStream, File, FileInputStream, InputStream}
import gitbucket.core.service.RepositoryService
import org.eclipse.jgit.api.Git
import Directory._
import StringUtil._
import SyntaxSugars._
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import org.eclipse.jgit.lib._
import org.eclipse.jgit.revwalk._
import org.eclipse.jgit.revwalk.filter._
import org.eclipse.jgit.treewalk._
import org.eclipse.jgit.treewalk.filter._
import org.eclipse.jgit.diff.DiffEntry.ChangeType
import org.eclipse.jgit.errors.{ConfigInvalidException, IncorrectObjectTypeException, MissingObjectException}
import org.eclipse.jgit.transport.RefSpec
import java.util.Date
import java.util.concurrent.TimeUnit
import java.util.function.Consumer
import org.cache2k.Cache2kBuilder
import org.eclipse.jgit.api.errors._
import org.eclipse.jgit.diff.{DiffEntry, DiffFormatter, RawTextComparator}
import org.eclipse.jgit.dircache.DirCacheEntry
import org.eclipse.jgit.util.io.DisabledOutputStream
import org.slf4j.LoggerFactory
/**
* Provides complex JGit operations.
*/
object JGitUtil {
private val logger = LoggerFactory.getLogger(JGitUtil.getClass)
/**
* The repository data.
*
* @param owner the user name of the repository owner
* @param name the repository name
* @param branchList the list of branch names
* @param tags the list of tags
*/
case class RepositoryInfo(owner: String, name: String, branchList: List[String], tags: List[TagInfo]) {
def this(owner: String, name: String) = this(owner, name, Nil, Nil)
}
/**
* The file data for the file list of the repository viewer.
*
* @param id the object id
* @param isDirectory whether is it directory
* @param name the file (or directory) name
* @param path the file (or directory) complete path
* @param message the last commit message
* @param commitId the last commit id
* @param time the last modified time
* @param author the last committer name
* @param mailAddress the committer's mail address
* @param linkUrl the url of submodule
*/
case class FileInfo(
id: ObjectId,
isDirectory: Boolean,
name: String,
path: String,
message: String,
commitId: String,
time: Date,
author: String,
mailAddress: String,
linkUrl: Option[String]
)
/**
* The commit data.
*
* @param id the commit id
* @param shortMessage the short message
* @param fullMessage the full message
* @param parents the list of parent commit id
* @param authorTime the author time
* @param authorName the author name
* @param authorEmailAddress the mail address of the author
* @param commitTime the commit time
* @param committerName the committer name
* @param committerEmailAddress the mail address of the committer
*/
case class CommitInfo(
id: String,
shortMessage: String,
fullMessage: String,
parents: List[String],
authorTime: Date,
authorName: String,
authorEmailAddress: String,
commitTime: Date,
committerName: String,
committerEmailAddress: String
) {
def this(rev: org.eclipse.jgit.revwalk.RevCommit) =
this(
rev.getName,
rev.getShortMessage,
rev.getFullMessage,
rev.getParents().map(_.name).toList,
rev.getAuthorIdent.getWhen,
rev.getAuthorIdent.getName,
rev.getAuthorIdent.getEmailAddress,
rev.getCommitterIdent.getWhen,
rev.getCommitterIdent.getName,
rev.getCommitterIdent.getEmailAddress
)
val summary = getSummaryMessage(fullMessage, shortMessage)
val description = defining(fullMessage.trim.indexOf('\\n')) { i =>
if (i >= 0) {
Some(fullMessage.trim.substring(i).trim)
} else None
}
def isDifferentFromAuthor: Boolean = authorName != committerName || authorEmailAddress != committerEmailAddress
}
case class DiffInfo(
changeType: ChangeType,
oldPath: String,
newPath: String,
oldContent: Option[String],
newContent: Option[String],
oldIsImage: Boolean,
newIsImage: Boolean,
oldObjectId: Option[String],
newObjectId: Option[String],
oldMode: String,
newMode: String,
tooLarge: Boolean,
patch: Option[String]
)
/**
* The file content data for the file content view of the repository viewer.
*
* @param viewType "image", "large" or "other"
* @param size total size of object in bytes
* @param content the string content
* @param charset the character encoding
*/
case class ContentInfo(viewType: String, size: Option[Long], content: Option[String], charset: Option[String]) {
/**
* the line separator of this content ("LF" or "CRLF")
*/
val lineSeparator: String = if (content.exists(_.indexOf("\\r\\n") >= 0)) "CRLF" else "LF"
}
/**
* The tag data.
*
* @param name the tag name
* @param time the tagged date
* @param id the commit id
* @param message the message of the tagged commit
*/
case class TagInfo(name: String, time: Date, id: String, message: String)
/**
* The submodule data
*
* @param name the module name
* @param path the path in the repository
* @param repositoryUrl the repository url of this module
* @param viewerUrl the repository viewer url of this module
*/
case class SubmoduleInfo(name: String, path: String, repositoryUrl: String, viewerUrl: String)
case class BranchMergeInfo(ahead: Int, behind: Int, isMerged: Boolean)
case class BranchInfo(
name: String,
committerName: String,
commitTime: Date,
committerEmailAddress: String,
mergeInfo: Option[BranchMergeInfo],
commitId: String
)
case class BlameInfo(
id: String,
authorName: String,
authorEmailAddress: String,
authorTime: java.util.Date,
prev: Option[String],
prevPath: Option[String],
commitTime: java.util.Date,
message: String,
lines: Set[Int]
)
/**
* Returns RevCommit from the commit or tag id.
*
* @param git the Git object
* @param objectId the ObjectId of the commit or tag
* @return the RevCommit for the specified commit or tag
*/
def getRevCommitFromId(git: Git, objectId: ObjectId): RevCommit = {
val revWalk = new RevWalk(git.getRepository)
val revCommit = revWalk.parseAny(objectId) match {
case r: RevTag => revWalk.parseCommit(r.getObject)
case _ => revWalk.parseCommit(objectId)
}
revWalk.dispose
revCommit
}
private val cache = new Cache2kBuilder[String, Int]() {}
.name("commit-count")
.expireAfterWrite(24, TimeUnit.HOURS)
.entryCapacity(10000)
.build()
def removeCache(git: Git): Unit = {
val dir = git.getRepository.getDirectory
val keyPrefix = dir.getAbsolutePath + "@"
cache.keys.forEach(key => {
if (key.startsWith(keyPrefix)) {
cache.remove(key)
}
})
}
/**
* Returns the number of commits in the specified branch or commit.
* If the specified branch has over 10000 commits, this method returns 100001.
*/
def getCommitCount(owner: String, repository: String, branch: String): Int = {
val dir = getRepositoryDir(owner, repository)
val key = dir.getAbsolutePath + "@" + branch
val entry = cache.getEntry(key)
if (entry == null) {
using(Git.open(dir)) { git =>
val commitId = git.getRepository.resolve(branch)
val commitCount = git.log.add(commitId).call.iterator.asScala.take(10001).size
cache.put(key, commitCount)
commitCount
}
} else {
entry.getValue
}
}
/**
* Returns the repository information. It contains branch names and tag names.
*/
def getRepositoryInfo(owner: String, repository: String): RepositoryInfo = {
using(Git.open(getRepositoryDir(owner, repository))) { git =>
try {
RepositoryInfo(
owner,
repository,
// branches
git.branchList.call.asScala.map { ref =>
ref.getName.stripPrefix("refs/heads/")
}.toList,
// tags
git.tagList.call.asScala
.flatMap { ref =>
try {
val revCommit = getRevCommitFromId(git, ref.getObjectId)
Some(
TagInfo(
ref.getName.stripPrefix("refs/tags/"),
revCommit.getCommitterIdent.getWhen,
revCommit.getName,
revCommit.getShortMessage
)
)
} catch {
case _: IncorrectObjectTypeException =>
None
}
}
.sortBy(_.time)
.toList
)
} catch {
// not initialized
case e: NoHeadException => RepositoryInfo(owner, repository, Nil, Nil)
}
}
}
/**
* Returns the file list of the specified path.
*
* @param git the Git object
* @param revision the branch name or commit id
* @param path the directory path (optional)
* @param baseUrl the base url of GitBucket instance. This parameter is used to generate links of submodules (optional)
* @return HTML of the file list
*/
def getFileList(git: Git, revision: String, path: String = ".", baseUrl: Option[String] = None): List[FileInfo] = {
using(new RevWalk(git.getRepository)) { revWalk =>
val objectId = git.getRepository.resolve(revision)
if (objectId == null) return Nil
val revCommit = revWalk.parseCommit(objectId)
def useTreeWalk(rev: RevCommit)(f: TreeWalk => Any): Unit =
if (path == ".") {
val treeWalk = new TreeWalk(git.getRepository)
treeWalk.addTree(rev.getTree)
using(treeWalk)(f)
} else {
val treeWalk = TreeWalk.forPath(git.getRepository, path, rev.getTree)
if (treeWalk != null) {
treeWalk.enterSubtree
using(treeWalk)(f)
}
}
@tailrec
def simplifyPath(
tuple: (ObjectId, FileMode, String, String, Option[String], RevCommit)
): (ObjectId, FileMode, String, String, Option[String], RevCommit) = tuple match {
case (oid, FileMode.TREE, name, path, _, commit) =>
(using(new TreeWalk(git.getRepository)) { walk =>
walk.addTree(oid)
// single tree child, or None
if (walk.next() && walk.getFileMode(0) == FileMode.TREE) {
Some(
(
walk.getObjectId(0),
walk.getFileMode(0),
name + "/" + walk.getNameString,
path + "/" + walk.getNameString,
None,
commit
)
).filterNot(_ => walk.next())
} else {
None
}
}) match {
case Some(child) => simplifyPath(child)
case _ => tuple
}
case _ => tuple
}
def tupleAdd(tuple: (ObjectId, FileMode, String, String, Option[String]), rev: RevCommit) = tuple match {
case (oid, fmode, name, path, opt) => (oid, fmode, name, path, opt, rev)
}
@tailrec
def findLastCommits(
result: List[(ObjectId, FileMode, String, String, Option[String], RevCommit)],
restList: List[((ObjectId, FileMode, String, String, Option[String]), Map[RevCommit, RevCommit])],
revIterator: java.util.Iterator[RevCommit]
): List[(ObjectId, FileMode, String, String, Option[String], RevCommit)] = {
if (restList.isEmpty) {
result
} else if (!revIterator.hasNext) { // maybe, revCommit has only 1 log. other case, restList be empty
result ++ restList.map { case (tuple, map) => tupleAdd(tuple, map.values.headOption.getOrElse(revCommit)) }
} else {
val newCommit = revIterator.next
val (thisTimeChecks, skips) = restList.partition {
case (tuple, parentsMap) => parentsMap.contains(newCommit)
}
if (thisTimeChecks.isEmpty) {
findLastCommits(result, restList, revIterator)
} else {
var nextRest = skips
var nextResult = result
// Map[(name, oid), (tuple, parentsMap)]
val rest = scala.collection.mutable.Map(thisTimeChecks.map { t =>
(t._1._3 -> t._1._1) -> t
}: _*)
lazy val newParentsMap = newCommit.getParents.map(_ -> newCommit).toMap
useTreeWalk(newCommit) { walk =>
while (walk.next) {
rest.remove(walk.getNameString -> walk.getObjectId(0)).foreach {
case (tuple, _) =>
if (newParentsMap.isEmpty) {
nextResult +:= tupleAdd(tuple, newCommit)
} else {
nextRest +:= tuple -> newParentsMap
}
}
}
}
rest.values.foreach {
case (tuple, parentsMap) =>
val restParentsMap = parentsMap - newCommit
if (restParentsMap.isEmpty) {
nextResult +:= tupleAdd(tuple, parentsMap(newCommit))
} else {
nextRest +:= tuple -> restParentsMap
}
}
findLastCommits(nextResult, nextRest, revIterator)
}
}
}
var fileList: List[(ObjectId, FileMode, String, String, Option[String])] = Nil
useTreeWalk(revCommit) { treeWalk =>
while (treeWalk.next()) {
val linkUrl = if (treeWalk.getFileMode(0) == FileMode.GITLINK) {
getSubmodules(git, revCommit.getTree, baseUrl).find(_.path == treeWalk.getPathString).map(_.viewerUrl)
} else None
fileList +:= (treeWalk.getObjectId(0), treeWalk.getFileMode(0), treeWalk.getNameString, treeWalk.getPathString, linkUrl)
}
}
revWalk.markStart(revCommit)
val it = revWalk.iterator
val lastCommit = it.next
val nextParentsMap = Option(lastCommit).map(_.getParents.map(_ -> lastCommit).toMap).getOrElse(Map())
findLastCommits(List.empty, fileList.map(a => a -> nextParentsMap), it)
.map(simplifyPath)
.map {
case (objectId, fileMode, name, path, linkUrl, commit) =>
FileInfo(
objectId,
fileMode == FileMode.TREE || fileMode == FileMode.GITLINK,
name,
path,
getSummaryMessage(commit.getFullMessage, commit.getShortMessage),
commit.getName,
commit.getAuthorIdent.getWhen,
commit.getAuthorIdent.getName,
commit.getAuthorIdent.getEmailAddress,
linkUrl
)
}
.sortWith { (file1, file2) =>
(file1.isDirectory, file2.isDirectory) match {
case (true, false) => true
case (false, true) => false
case _ => file1.name.compareTo(file2.name) < 0
}
}
}
}
/**
* Returns the first line of the commit message.
*/
private def getSummaryMessage(fullMessage: String, shortMessage: String): String = {
defining(fullMessage.trim.indexOf('\\n')) { i =>
defining(if (i >= 0) fullMessage.trim.substring(0, i).trim else fullMessage) { firstLine =>
if (firstLine.length > shortMessage.length) shortMessage else firstLine
}
}
}
/**
* get all file list by revision. only file.
*/
def getTreeId(git: Git, revision: String): Option[String] = {
using(new RevWalk(git.getRepository)) { revWalk =>
val objectId = git.getRepository.resolve(revision)
if (objectId == null) return None
val revCommit = revWalk.parseCommit(objectId)
Some(revCommit.getTree.name)
}
}
/**
* get all file list by tree object id.
*/
def getAllFileListByTreeId(git: Git, treeId: String): List[String] = {
using(new RevWalk(git.getRepository)) { revWalk =>
val objectId = git.getRepository.resolve(treeId + "^{tree}")
if (objectId == null) return Nil
using(new TreeWalk(git.getRepository)) { treeWalk =>
treeWalk.addTree(objectId)
treeWalk.setRecursive(true)
var ret: List[String] = Nil
if (treeWalk != null) {
while (treeWalk.next()) {
ret +:= treeWalk.getPathString
}
}
ret.reverse
}
}
}
/**
* Returns the commit list of the specified branch.
*
* @param git the Git object
* @param revision the branch name or commit id
* @param page the page number (1-)
* @param limit the number of commit info per page. 0 (default) means unlimited.
* @param path filters by this path. default is no filter.
* @return a tuple of the commit list and whether has next, or the error message
*/
def getCommitLog(
git: Git,
revision: String,
page: Int = 1,
limit: Int = 0,
path: String = ""
): Either[String, (List[CommitInfo], Boolean)] = {
val fixedPage = if (page <= 0) 1 else page
@scala.annotation.tailrec
def getCommitLog(
i: java.util.Iterator[RevCommit],
count: Int,
logs: List[CommitInfo]
): (List[CommitInfo], Boolean) =
i.hasNext match {
case true if (limit <= 0 || logs.size < limit) => {
val commit = i.next
getCommitLog(
i,
count + 1,
if (limit <= 0 || (fixedPage - 1) * limit <= count) logs :+ new CommitInfo(commit) else logs
)
}
case _ => (logs, i.hasNext)
}
using(new RevWalk(git.getRepository)) { revWalk =>
defining(git.getRepository.resolve(revision)) { objectId =>
if (objectId == null) {
Left(s"${revision} can't be resolved.")
} else {
revWalk.markStart(revWalk.parseCommit(objectId))
if (path.nonEmpty) {
revWalk.setTreeFilter(AndTreeFilter.create(PathFilter.create(path), TreeFilter.ANY_DIFF))
}
Right(getCommitLog(revWalk.iterator, 0, Nil))
}
}
}
}
def getCommitLogs(git: Git, begin: String, includesLastCommit: Boolean = false)(
endCondition: RevCommit => Boolean
): List[CommitInfo] = {
@scala.annotation.tailrec
def getCommitLog(i: java.util.Iterator[RevCommit], logs: List[CommitInfo]): List[CommitInfo] =
i.hasNext match {
case true => {
val revCommit = i.next
if (endCondition(revCommit)) {
if (includesLastCommit) logs :+ new CommitInfo(revCommit) else logs
} else {
getCommitLog(i, logs :+ new CommitInfo(revCommit))
}
}
case false => logs
}
using(new RevWalk(git.getRepository)) { revWalk =>
revWalk.markStart(revWalk.parseCommit(git.getRepository.resolve(begin)))
getCommitLog(revWalk.iterator, Nil).reverse
}
}
/**
* Returns the commit list between two revisions.
*
* @param git the Git object
* @param from the from revision
* @param to the to revision
* @return the commit list
*/
// TODO swap parameters 'from' and 'to'!?
def getCommitLog(git: Git, from: String, to: String): List[CommitInfo] =
getCommitLogs(git, to)(_.getName == from)
/**
* Returns the latest RevCommit of the specified path.
*
* @param git the Git object
* @param path the path
* @param revision the branch name or commit id
* @return the latest commit
*/
def getLatestCommitFromPath(git: Git, path: String, revision: String): Option[RevCommit] =
getLatestCommitFromPaths(git, List(path), revision).get(path)
/**
* Returns the list of latest RevCommit of the specified paths.
*
* @param git the Git object
* @param paths the list of paths
* @param revision the branch name or commit id
* @return the list of latest commit
*/
def getLatestCommitFromPaths(git: Git, paths: List[String], revision: String): Map[String, RevCommit] = {
val start = getRevCommitFromId(git, git.getRepository.resolve(revision))
paths.map { path =>
val commit = git.log.add(start).addPath(path).setMaxCount(1).call.iterator.next
(path, commit)
}.toMap
}
def getPatch(git: Git, from: Option[String], to: String): String = {
val out = new ByteArrayOutputStream()
val df = new DiffFormatter(out)
df.setRepository(git.getRepository)
df.setDiffComparator(RawTextComparator.DEFAULT)
df.setDetectRenames(true)
getDiffEntries(git, from, to)
.map { entry =>
df.format(entry)
new String(out.toByteArray, "UTF-8")
}
.mkString("\\n")
}
private def getDiffEntries(git: Git, from: Option[String], to: String): Seq[DiffEntry] = {
using(new RevWalk(git.getRepository)) { revWalk =>
val df = new DiffFormatter(DisabledOutputStream.INSTANCE)
df.setRepository(git.getRepository)
val toCommit = revWalk.parseCommit(git.getRepository.resolve(to))
from match {
case None => {
toCommit.getParentCount match {
case 0 =>
df.scan(
new EmptyTreeIterator(),
new CanonicalTreeParser(null, git.getRepository.newObjectReader(), toCommit.getTree)
)
.asScala
case _ => df.scan(toCommit.getParent(0), toCommit.getTree).asScala
}
}
case Some(from) => {
val fromCommit = revWalk.parseCommit(git.getRepository.resolve(from))
df.scan(fromCommit.getTree, toCommit.getTree).asScala
}
}
}
}
def getParentCommitId(git: Git, id: String): Option[String] = {
using(new RevWalk(git.getRepository)) { revWalk =>
val commit = revWalk.parseCommit(git.getRepository.resolve(id))
commit.getParentCount match {
case 0 => None
case _ => Some(commit.getParent(0).getName)
}
}
}
def getDiffs(
git: Git,
from: Option[String],
to: String,
fetchContent: Boolean,
makePatch: Boolean
): List[DiffInfo] = {
val diffs = getDiffEntries(git, from, to)
diffs.map { diff =>
if (diffs.size > 100) {
DiffInfo(
changeType = diff.getChangeType,
oldPath = diff.getOldPath,
newPath = diff.getNewPath,
oldContent = None,
newContent = None,
oldIsImage = false,
newIsImage = false,
oldObjectId = Option(diff.getOldId).map(_.name),
newObjectId = Option(diff.getNewId).map(_.name),
oldMode = diff.getOldMode.toString,
newMode = diff.getNewMode.toString,
tooLarge = true,
patch = None
)
} else {
val oldIsImage = FileUtil.isImage(diff.getOldPath)
val newIsImage = FileUtil.isImage(diff.getNewPath)
if (!fetchContent || oldIsImage || newIsImage) {
DiffInfo(
changeType = diff.getChangeType,
oldPath = diff.getOldPath,
newPath = diff.getNewPath,
oldContent = None,
newContent = None,
oldIsImage = oldIsImage,
newIsImage = newIsImage,
oldObjectId = Option(diff.getOldId).map(_.name),
newObjectId = Option(diff.getNewId).map(_.name),
oldMode = diff.getOldMode.toString,
newMode = diff.getNewMode.toString,
tooLarge = false,
patch = (if (makePatch) Some(makePatchFromDiffEntry(git, diff)) else None) // TODO use DiffFormatter
)
} else {
DiffInfo(
changeType = diff.getChangeType,
oldPath = diff.getOldPath,
newPath = diff.getNewPath,
oldContent = JGitUtil
.getContentFromId(git, diff.getOldId.toObjectId, false)
.filter(FileUtil.isText)
.map(convertFromByteArray),
newContent = JGitUtil
.getContentFromId(git, diff.getNewId.toObjectId, false)
.filter(FileUtil.isText)
.map(convertFromByteArray),
oldIsImage = oldIsImage,
newIsImage = newIsImage,
oldObjectId = Option(diff.getOldId).map(_.name),
newObjectId = Option(diff.getNewId).map(_.name),
oldMode = diff.getOldMode.toString,
newMode = diff.getNewMode.toString,
tooLarge = false,
patch = (if (makePatch) Some(makePatchFromDiffEntry(git, diff)) else None) // TODO use DiffFormatter
)
}
}
}.toList
}
private def makePatchFromDiffEntry(git: Git, diff: DiffEntry): String = {
val out = new ByteArrayOutputStream()
using(new DiffFormatter(out)) { formatter =>
formatter.setRepository(git.getRepository)
formatter.format(diff)
val patch = new String(out.toByteArray) // TODO charset???
patch.split("\\n").drop(4).mkString("\\n")
}
}
/**
* Returns the list of branch names of the specified commit.
*/
def getBranchesOfCommit(git: Git, commitId: String): List[String] =
using(new RevWalk(git.getRepository)) { revWalk =>
defining(revWalk.parseCommit(git.getRepository.resolve(commitId + "^0"))) { commit =>
git.getRepository.getRefDatabase
.getRefsByPrefix(Constants.R_HEADS)
.asScala
.filter { e =>
(revWalk.isMergedInto(
commit,
revWalk.parseCommit(e.getObjectId)
))
}
.map { e =>
e.getName.substring(Constants.R_HEADS.length)
}
.toList
.sorted
}
}
/**
* Returns the list of tags which pointed on the specified commit.
*/
def getTagsOnCommit(git: Git, commitId: String): List[String] = {
git.getRepository.getAllRefsByPeeledObjectId.asScala
.get(git.getRepository.resolve(commitId + "^0"))
.map {
_.asScala
.collect {
case x if x.getName.startsWith(Constants.R_TAGS) =>
x.getName.substring(Constants.R_TAGS.length)
}
.toList
.sorted
}
.getOrElse {
List.empty
}
}
/**
* Returns the list of tags which contains the specified commit.
*/
def getTagsOfCommit(git: Git, commitId: String): List[String] =
using(new RevWalk(git.getRepository)) { revWalk =>
defining(revWalk.parseCommit(git.getRepository.resolve(commitId + "^0"))) { commit =>
git.getRepository.getRefDatabase
.getRefsByPrefix(Constants.R_TAGS)
.asScala
.filter { e =>
(revWalk.isMergedInto(
commit,
revWalk.parseCommit(e.getObjectId)
))
}
.map { e =>
e.getName.substring(Constants.R_TAGS.length)
}
.toList
.sorted
.reverse
}
}
def initRepository(dir: java.io.File): Unit =
using(new RepositoryBuilder().setGitDir(dir).setBare.build) { repository =>
repository.create(true)
setReceivePack(repository)
}
def cloneRepository(from: java.io.File, to: java.io.File): Unit =
using(Git.cloneRepository.setURI(from.toURI.toString).setDirectory(to).setBare(true).call) { git =>
setReceivePack(git.getRepository)
}
def isEmpty(git: Git): Boolean = git.getRepository.resolve(Constants.HEAD) == null
private def setReceivePack(repository: org.eclipse.jgit.lib.Repository): Unit =
defining(repository.getConfig) { config =>
config.setBoolean("http", null, "receivepack", true)
config.save
}
def getDefaultBranch(
git: Git,
repository: RepositoryService.RepositoryInfo,
revstr: String = ""
): Option[(ObjectId, String)] = {
Seq(
Some(if (revstr.isEmpty) repository.repository.defaultBranch else revstr),
repository.branchList.headOption
).flatMap {
case Some(rev) => Some((git.getRepository.resolve(rev), rev))
case None => None
}
.find(_._1 != null)
}
def createTag(git: Git, name: String, message: Option[String], commitId: String) = {
try {
val objectId: ObjectId = git.getRepository.resolve(commitId)
using(new RevWalk(git.getRepository)) { walk =>
val tagCommand = git.tag().setName(name).setObjectId(walk.parseCommit(objectId))
message.foreach { message =>
tagCommand.setMessage(message)
}
tagCommand.call()
}
Right("Tag added.")
} catch {
case e: GitAPIException => Left("Sorry, some Git operation error occurs.")
case e: ConcurrentRefUpdateException => Left("Sorry some error occurs.")
case e: InvalidTagNameException => Left("Sorry, that name is invalid.")
case e: NoHeadException => Left("Sorry, this repo doesn't have HEAD reference")
}
}
def createBranch(git: Git, fromBranch: String, newBranch: String) = {
try {
git.branchCreate().setStartPoint(fromBranch).setName(newBranch).call()
Right("Branch created.")
} catch {
case e: RefAlreadyExistsException => Left("Sorry, that branch already exists.")
// JGitInternalException occurs when new branch name is 'a' and the branch whose name is 'a/*' exists.
case _: InvalidRefNameException | _: JGitInternalException => Left("Sorry, that name is invalid.")
}
}
def createDirCacheEntry(path: String, mode: FileMode, objectId: ObjectId): DirCacheEntry = {
val entry = new DirCacheEntry(path)
entry.setFileMode(mode)
entry.setObjectId(objectId)
entry
}
def createNewCommit(
git: Git,
inserter: ObjectInserter,
headId: AnyObjectId,
treeId: AnyObjectId,
ref: String,
fullName: String,
mailAddress: String,
message: String
): ObjectId = {
val newCommit = new CommitBuilder()
newCommit.setCommitter(new PersonIdent(fullName, mailAddress))
newCommit.setAuthor(new PersonIdent(fullName, mailAddress))
newCommit.setMessage(message)
if (headId != null) {
newCommit.setParentIds(List(headId).asJava)
}
newCommit.setTreeId(treeId)
val newHeadId = inserter.insert(newCommit)
inserter.flush()
inserter.close()
val refUpdate = git.getRepository.updateRef(ref)
refUpdate.setNewObjectId(newHeadId)
refUpdate.update()
removeCache(git)
newHeadId
}
/**
* Read submodule information from .gitmodules
*/
def getSubmodules(git: Git, tree: RevTree, baseUrl: Option[String]): List[SubmoduleInfo] = {
val repository = git.getRepository
getContentFromPath(git, tree, ".gitmodules", true).map { bytes =>
(try {
val config = new BlobBasedConfig(repository.getConfig(), bytes)
config.getSubsections("submodule").asScala.map { module =>
val path = config.getString("submodule", module, "path")
val url = config.getString("submodule", module, "url")
SubmoduleInfo(module, path, url, StringUtil.getRepositoryViewerUrl(url, baseUrl))
}
} catch {
case e: ConfigInvalidException => {
logger.error("Failed to load .gitmodules file for " + repository.getDirectory(), e)
Nil
}
}).toList
} getOrElse Nil
}
/**
* Get object content of the given path as byte array from the Git repository.
*
* @param git the Git object
* @param revTree the rev tree
* @param path the path
* @param fetchLargeFile if false then returns None for the large file
* @return the byte array of content or None if object does not exist
*/
def getContentFromPath(git: Git, revTree: RevTree, path: String, fetchLargeFile: Boolean): Option[Array[Byte]] = {
@scala.annotation.tailrec
def getPathObjectId(path: String, walk: TreeWalk): Option[ObjectId] = walk.next match {
case true if (walk.getPathString == path) => Some(walk.getObjectId(0))
case true => getPathObjectId(path, walk)
case false => None
}
using(new TreeWalk(git.getRepository)) { treeWalk =>
treeWalk.addTree(revTree)
treeWalk.setRecursive(true)
getPathObjectId(path, treeWalk)
} flatMap { objectId =>
getContentFromId(git, objectId, fetchLargeFile)
}
}
def getLfsObjects(text: String): Map[String, String] = {
if (text.startsWith("version https://git-lfs.github.com/spec/v1")) {
// LFS objects
text
.split("\\n")
.map { line =>
val dim = line.split(" ")
dim(0) -> dim(1)
}
.toMap
} else {
Map.empty
}
}
def getContentSize(loader: ObjectLoader): Long = {
if (loader.isLarge) {
loader.getSize
} else {
val bytes = loader.getCachedBytes
val text = new String(bytes, "UTF-8")
val attr = getLfsObjects(text)
attr.get("size") match {
case Some(size) => size.toLong
case None => loader.getSize
}
}
}
def isLfsPointer(loader: ObjectLoader): Boolean = {
!loader.isLarge && new String(loader.getBytes(), "UTF-8").startsWith("version https://git-lfs.github.com/spec/v1")
}
def getContentInfo(git: Git, path: String, objectId: ObjectId): ContentInfo = {
// Viewer
using(git.getRepository.getObjectDatabase) { db =>
val loader = db.open(objectId)
val isLfs = isLfsPointer(loader)
val large = FileUtil.isLarge(loader.getSize)
val viewer = if (FileUtil.isImage(path)) "image" else if (large) "large" else "other"
val bytes = if (viewer == "other") JGitUtil.getContentFromId(git, objectId, false) else None
val size = Some(getContentSize(loader))
if (viewer == "other") {
if (!isLfs && bytes.isDefined && FileUtil.isText(bytes.get)) {
// text
ContentInfo(
"text",
size,
Some(StringUtil.convertFromByteArray(bytes.get)),
Some(StringUtil.detectEncoding(bytes.get))
)
} else {
// binary
ContentInfo("binary", size, None, None)
}
} else {
// image or large
ContentInfo(viewer, size, None, None)
}
}
}
/**
* Get object content of the given object id as byte array from the Git repository.
*
* @param git the Git object
* @param id the object id
* @param fetchLargeFile if false then returns None for the large file
* @return the byte array of content or None if object does not exist
*/
def getContentFromId(git: Git, id: ObjectId, fetchLargeFile: Boolean): Option[Array[Byte]] =
try {
using(git.getRepository.getObjectDatabase) { db =>
val loader = db.open(id)
if (loader.isLarge || (fetchLargeFile == false && FileUtil.isLarge(loader.getSize))) {
None
} else {
Some(loader.getBytes)
}
}
} catch {
case e: MissingObjectException => None
}
/**
* Get objectLoader of the given object id from the Git repository.
*
* @param git the Git object
* @param id the object id
* @param f the function process ObjectLoader
* @return None if object does not exist
*/
def getObjectLoaderFromId[A](git: Git, id: ObjectId)(f: ObjectLoader => A): Option[A] =
try {
using(git.getRepository.getObjectDatabase) { db =>
Some(f(db.open(id)))
}
} catch {
case e: MissingObjectException => None
}
/**
* Returns all commit id in the specified repository.
*/
def getAllCommitIds(git: Git): Seq[String] =
if (isEmpty(git)) {
Nil
} else {
val existIds = new scala.collection.mutable.ListBuffer[String]()
val i = git.log.all.call.iterator
while (i.hasNext) {
existIds += i.next.name
}
existIds.toSeq
}
def processTree[T](git: Git, id: ObjectId)(f: (String, CanonicalTreeParser) => T): Seq[T] = {
using(new RevWalk(git.getRepository)) { revWalk =>
using(new TreeWalk(git.getRepository)) { treeWalk =>
val index = treeWalk.addTree(revWalk.parseTree(id))
treeWalk.setRecursive(true)
val result = new collection.mutable.ListBuffer[T]()
while (treeWalk.next) {
result += f(treeWalk.getPathString, treeWalk.getTree(index, classOf[CanonicalTreeParser]))
}
result.toSeq
}
}
}
/**
* Returns the identifier of the root commit (or latest merge commit) of the specified branch.
*/
def getForkedCommitId(
oldGit: Git,
newGit: Git,
userName: String,
repositoryName: String,
branch: String,
requestUserName: String,
requestRepositoryName: String,
requestBranch: String
): String =
defining(getAllCommitIds(oldGit)) { existIds =>
getCommitLogs(newGit, requestBranch, true) { commit =>
existIds.contains(commit.name) && getBranchesOfCommit(oldGit, commit.getName).contains(branch)
}.head.id
}
/**
* Fetch pull request contents into refs/pull/${issueId}/head and return (commitIdTo, commitIdFrom)
*/
def updatePullRequest(
userName: String,
repositoryName: String,
branch: String,
issueId: Int,
requestUserName: String,
requestRepositoryName: String,
requestBranch: String
): (String, String) =
using(
Git.open(Directory.getRepositoryDir(userName, repositoryName)),
Git.open(Directory.getRepositoryDir(requestUserName, requestRepositoryName))
) { (oldGit, newGit) =>
oldGit.fetch
.setRemote(Directory.getRepositoryDir(requestUserName, requestRepositoryName).toURI.toString)
.setRefSpecs(new RefSpec(s"refs/heads/${requestBranch}:refs/pull/${issueId}/head").setForceUpdate(true))
.call
val commitIdTo = oldGit.getRepository.resolve(s"refs/pull/${issueId}/head").getName
val commitIdFrom = getForkedCommitId(
oldGit,
newGit,
userName,
repositoryName,
branch,
requestUserName,
requestRepositoryName,
requestBranch
)
(commitIdTo, commitIdFrom)
}
/**
* Returns the last modified commit of specified path
*
* @param git the Git object
* @param startCommit the search base commit id
* @param path the path of target file or directory
* @return the last modified commit of specified path
*/
def getLastModifiedCommit(git: Git, startCommit: RevCommit, path: String): RevCommit = {
git.log.add(startCommit).addPath(path).setMaxCount(1).call.iterator.next
}
def getBranches(owner: String, name: String, defaultBranch: String, origin: Boolean): Seq[BranchInfo] = {
using(Git.open(getRepositoryDir(owner, name))) { git =>
val repo = git.getRepository
val defaultObject = repo.resolve(defaultBranch)
git.branchList.call.asScala.map { ref =>
val walk = new RevWalk(repo)
try {
val defaultCommit = walk.parseCommit(defaultObject)
val branchName = ref.getName.stripPrefix("refs/heads/")
val branchCommit = walk.parseCommit(ref.getObjectId)
val when = branchCommit.getCommitterIdent.getWhen
val committer = branchCommit.getCommitterIdent.getName
val committerEmail = branchCommit.getCommitterIdent.getEmailAddress
val mergeInfo = if (origin && branchName == defaultBranch) {
None
} else {
walk.reset()
walk.setRevFilter(RevFilter.MERGE_BASE)
walk.markStart(branchCommit)
walk.markStart(defaultCommit)
val mergeBase = walk.next()
walk.reset()
walk.setRevFilter(RevFilter.ALL)
Some(
BranchMergeInfo(
ahead = RevWalkUtils.count(walk, branchCommit, mergeBase),
behind = RevWalkUtils.count(walk, defaultCommit, mergeBase),
isMerged = walk.isMergedInto(branchCommit, defaultCommit)
)
)
}
BranchInfo(branchName, committer, when, committerEmail, mergeInfo, ref.getObjectId.name)
} finally {
walk.dispose()
}
}
}
}
def getBlame(git: Git, id: String, path: String): Iterable[BlameInfo] = {
Option(git.getRepository.resolve(id))
.map { commitId =>
val blamer = new org.eclipse.jgit.api.BlameCommand(git.getRepository)
blamer.setStartCommit(commitId)
blamer.setFilePath(path)
val blame = blamer.call()
var blameMap = Map[String, JGitUtil.BlameInfo]()
var idLine = List[(String, Int)]()
val commits = 0.to(blame.getResultContents().size() - 1).map { i =>
val c = blame.getSourceCommit(i)
if (!blameMap.contains(c.name)) {
blameMap += c.name -> JGitUtil.BlameInfo(
c.name,
c.getAuthorIdent.getName,
c.getAuthorIdent.getEmailAddress,
c.getAuthorIdent.getWhen,
Option(git.log.add(c).addPath(blame.getSourcePath(i)).setSkip(1).setMaxCount(2).call.iterator.next)
.map(_.name),
if (blame.getSourcePath(i) == path) { None } else { Some(blame.getSourcePath(i)) },
c.getCommitterIdent.getWhen,
c.getShortMessage,
Set.empty
)
}
idLine :+= (c.name, i)
}
val limeMap = idLine.groupBy(_._1).mapValues(_.map(_._2).toSet)
blameMap.values.map { b =>
b.copy(lines = limeMap(b.id))
}
}
.getOrElse(Seq.empty)
}
/**
* Returns sha1
*
* @param owner repository owner
* @param name repository name
* @param revstr A git object references expression
* @return sha1
*/
def getShaByRef(owner: String, name: String, revstr: String): Option[String] = {
using(Git.open(getRepositoryDir(owner, name))) { git =>
Option(git.getRepository.resolve(revstr)).map(ObjectId.toString(_))
}
}
def getFileSize(git: Git, repository: RepositoryService.RepositoryInfo, treeWalk: TreeWalk): Long = {
val attrs = treeWalk.getAttributes
val loader = git.getRepository.open(treeWalk.getObjectId(0))
if (attrs.containsKey("filter") && attrs.get("filter").getValue == "lfs") {
val lfsAttrs = getLfsAttributes(loader)
lfsAttrs.get("size").map(_.toLong).get
} else {
loader.getSize
}
}
def getFileSize(git: Git, repository: RepositoryService.RepositoryInfo, tree: RevTree, path: String): Long = {
using(TreeWalk.forPath(git.getRepository, path, tree)) { treeWalk =>
getFileSize(git, repository, treeWalk)
}
}
def openFile[T](git: Git, repository: RepositoryService.RepositoryInfo, treeWalk: TreeWalk)(
f: InputStream => T
): T = {
val attrs = treeWalk.getAttributes
val loader = git.getRepository.open(treeWalk.getObjectId(0))
if (attrs.containsKey("filter") && attrs.get("filter").getValue == "lfs") {
val lfsAttrs = getLfsAttributes(loader)
if (lfsAttrs.nonEmpty) {
val oid = lfsAttrs("oid").split(":")(1)
val file = new File(FileUtil.getLfsFilePath(repository.owner, repository.name, oid))
using(new FileInputStream(FileUtil.getLfsFilePath(repository.owner, repository.name, oid))) { in =>
f(in)
}
} else {
throw new NoSuchElementException("LFS attribute is empty.")
}
} else {
using(loader.openStream()) { in =>
f(in)
}
}
}
def openFile[T](git: Git, repository: RepositoryService.RepositoryInfo, tree: RevTree, path: String)(
f: InputStream => T
): T = {
using(TreeWalk.forPath(git.getRepository, path, tree)) { treeWalk =>
openFile(git, repository, treeWalk)(f)
}
}
private def getLfsAttributes(loader: ObjectLoader): Map[String, String] = {
val bytes = loader.getCachedBytes
val text = new String(bytes, "UTF-8")
JGitUtil.getLfsObjects(text)
}
}
| mann-ed/gitbucket | src/main/scala/gitbucket/core/util/JGitUtil.scala | Scala | apache-2.0 | 43,981 |
package applicant.ml.rnn
import java.io.{File, IOException}
import java.nio.charset.Charset
import java.nio.file.Files
import java.util.{Collections, HashMap, LinkedList, List, Map, NoSuchElementException, Random}
import org.deeplearning4j.datasets.iterator.DataSetIterator
import org.nd4j.linalg.dataset.DataSet
import org.nd4j.linalg.dataset.api.DataSetPreProcessor
import org.nd4j.linalg.factory.Nd4j
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
/**
* DataSetIterator used to pull String minibatches from an input file.
*/
class CharacterIterator(file: String, miniBatchSize: Int, exampleLength: Int) extends DataSetIterator {
val log: Logger = LoggerFactory.getLogger(getClass())
val exampleStartOffsets: LinkedList[Integer] = new LinkedList()
var charToIdxMap: Map[Character, Integer] = new HashMap()
var validCharacters: Array[Char] = Array[Char]()
var fileCharacters: Array[Char] = Array[Char]()
var examplesSoFar = 0
initValidation(file, miniBatchSize)
init(file, miniBatchSize)
/**
* Validates input parameters.
*
* @param file input file
* @param miniBatchSize batch size
*/
private def initValidation(file: String, miniBatchSize: Int) {
if (!new File(file).exists()) {
val msg = s"Could not access file (does not exist): $file"
throw new IOException(msg)
}
if (miniBatchSize <= 0) {
val msg = "Invalid miniBatchSize (must be > 0)"
throw new IllegalArgumentException(msg)
}
}
/**
* Initializes the iterator.
*
* @param file input file
* @param miniBatchSize batch size
*/
private def init(file: String, miniBatchSize: Int) {
charToIdxMap = Characters.getIndices()
validCharacters = Characters.getCharacters()
// Load file and convert contents to a char[]
val newLineValid: Boolean = charToIdxMap.containsKey('\\n')
val lines = Files.readAllLines(new File(file).toPath, Charset.forName(Characters.getCharsetName())).asScala
// Add lines.size() to account for newline characters at end of each line
val maxSize: Int = lines.map(_.length).fold(lines.size)(_ + _ )
fileCharacters = lines.flatMap({ s =>
val filtered = s.filter(charToIdxMap.containsKey(_)).toString
if (newLineValid) filtered + "\\n" else filtered
}).toArray
if (exampleLength >= fileCharacters.length) {
val msg = s"exampleLength=$exampleLength cannot exceed number of valid characters in file (${fileCharacters.length})"
throw new IllegalArgumentException(msg)
}
val nRemoved = maxSize - fileCharacters.length
val msg = s"Loaded and converted file: ${fileCharacters.length} valid characters of ${maxSize} total characters (${nRemoved}) removed"
log.info(msg)
//This defines the order in which parts of the file are fetched
val nMinibatchesPerEpoch = (fileCharacters.length-1) / exampleLength - 2 //-2: for end index, and for partial example
(0 until nMinibatchesPerEpoch).foreach { i =>
exampleStartOffsets.add(i * exampleLength)
}
// Shuffle batch order to prevent bias in the ml algorithm
Collections.shuffle(exampleStartOffsets, Characters.getRandom())
}
/**
* {@inheritDoc}
*/
def hasNext: Boolean = {
return exampleStartOffsets.size() > 0
}
/**
* {@inheritDoc}
*/
def next(): DataSet = {
return next(miniBatchSize)
}
/**
* {@inheritDoc}
*/
def next(num: Int): DataSet = {
if (exampleStartOffsets.size() == 0) {
throw new NoSuchElementException()
}
val currMinibatchSize = Math.min(num, exampleStartOffsets.size())
// Note the order here:
// dimension 0 = number of examples in minibatch
// dimension 1 = size of each vector (i.e., number of characters)
// dimension 2 = length of each time series/example
val input = Nd4j.create(Array(currMinibatchSize, validCharacters.length, exampleLength), 'f')
val labels = Nd4j.create(Array(currMinibatchSize, validCharacters.length, exampleLength), 'f')
(0 until currMinibatchSize).foreach { i =>
val startIdx = exampleStartOffsets.removeFirst()
val endIdx = startIdx + exampleLength
var currCharIdx = charToIdxMap.get(fileCharacters(startIdx)) //Current input
var c = 0
(startIdx + 1 until endIdx).foreach { j =>
val nextCharIdx = charToIdxMap.get(fileCharacters(j)) //Next character to predict
input.putScalar(Array[Int](i, currCharIdx, c), 1.0)
labels.putScalar(Array[Int](i, nextCharIdx, c), 1.0)
currCharIdx = nextCharIdx
c += 1
}
}
return new DataSet(input, labels)
}
/**
* {@inheritDoc}
*/
def totalExamples(): Int = {
return (fileCharacters.length - 1) / miniBatchSize - 2
}
/**
* {@inheritDoc}
*/
def inputColumns(): Int = {
return validCharacters.length
}
/**
* {@inheritDoc}
*/
def totalOutcomes(): Int = {
return validCharacters.length
}
/**
* {@inheritDoc}
*/
def reset() {
exampleStartOffsets.clear()
val nMinibatchesPerEpoch = totalExamples()
(0 until nMinibatchesPerEpoch).foreach { i =>
exampleStartOffsets.add(i * miniBatchSize)
}
// Shuffle batch order to prevent bias in the ml algorithm
Collections.shuffle(exampleStartOffsets, Characters.getRandom())
}
/**
* {@inheritDoc}
*/
def batch(): Int = {
return miniBatchSize
}
/**
* {@inheritDoc}
*/
def cursor(): Int = {
return totalExamples() - exampleStartOffsets.size()
}
/**
* {@inheritDoc}
*/
def numExamples(): Int = {
return totalExamples()
}
/**
* {@inheritDoc}
*/
def setPreProcessor(preProcessor: DataSetPreProcessor) {
throw new UnsupportedOperationException("Not implemented")
}
/**
* {@inheritDoc}
*/
def getLabels: List[String] = {
throw new UnsupportedOperationException("Not implemented")
}
/**
* {@inheritDoc}
*/
def remove() {
throw new UnsupportedOperationException("Not implemented")
}
}
| dataworks/internship-2016 | etl/src/scala/applicant/ml/rnn/CharacterIterator.scala | Scala | apache-2.0 | 6,568 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.feeder
import io.gatling.BaseSpec
import io.gatling.core.CoreComponents
import io.gatling.core.structure.ScenarioContext
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.json.JsonParsers
import org.mockito.Mockito._
class JsonFeederSpec extends BaseSpec with FeederSupport {
implicit val configuration = GatlingConfiguration.loadForTest()
implicit val jsonParsers = JsonParsers()
def scenarioContext = {
val ctx = mock[ScenarioContext]
val coreComponents = mock[CoreComponents]
when(coreComponents.configuration) thenReturn configuration
when(ctx.coreComponents) thenReturn coreComponents
ctx
}
"jsonFile" should "handle proper JSON file" in {
val data = jsonFile("test.json").build(scenarioContext).toArray
data.size shouldBe 2
data(0)("id") shouldBe 19434
}
"jsonUrl" should "retrieve and handle proper JSON file" in {
val data = jsonUrl(getClass.getClassLoader.getResource("test.json").toString).build(scenarioContext).toArray
data.size shouldBe 2
data(0)("id") shouldBe 19434
}
"JsonFeederFileParser" should "throw an exception when provided with bad resource" in {
an[IllegalArgumentException] should be thrownBy
new JsonFeederFileParser().stream(this.getClass.getClassLoader.getResourceAsStream("empty.json"))
}
}
| timve/gatling | gatling-core/src/test/scala/io/gatling/core/feeder/JsonFeederSpec.scala | Scala | apache-2.0 | 1,970 |
package org.genericConfig.admin.shared.config
import play.api.libs.functional.syntax.unlift
import play.api.libs.json.{Format, JsPath}
import play.api.libs.functional.syntax._
/**
* Copyright (C) 2016 Gennadi Heimann genaheimann@gmail.com
*
* Created by Gennadi Heimann 23.03.2020
*/
case class ConfigParamsDTO (
userId: Option[String] = None,
configId : Option[String] = None,
configUrl: Option[String] = None,
configurationCourse : Option[String] = None,
update : Option[ConfigUpdateDTO] = None
)
object ConfigParamsDTO {
implicit val format : Format[ConfigParamsDTO] = (
(JsPath \\ "userId").format(Format.optionWithNull[String]) and
(JsPath \\ "configId").format(Format.optionWithNull[String]) and
(JsPath \\ "configUrl").format(Format.optionWithNull[String]) and
(JsPath \\ "configurationCourse").format(Format.optionWithNull[String]) and
(JsPath \\ "update").format(Format.optionWithNull[ConfigUpdateDTO])
)(ConfigParamsDTO.apply, unlift(ConfigParamsDTO.unapply))
} | gennadij/admin | shared/src/main/scala/org/genericConfig/admin/shared/config/ConfigParamsDTO.scala | Scala | apache-2.0 | 1,173 |
package giter8
import java.io.ByteArrayInputStream
import java.net.URI
import org.scalatest.{FlatSpec, Matchers}
import scala.language.implicitConversions
class JGitIgnoreTest extends FlatSpec with Matchers {
implicit def toURI(s: String): URI = new URI(s)
"JGitIgnore" can "be created from Seq of string patterns" in {
val patterns = Seq(".test")
JGitIgnore(patterns: _*).getPatterns should contain theSameElementsAs patterns
}
it can "be created from InputStream" in {
val patterns = Seq(".test")
val stream = new ByteArrayInputStream(patterns.mkString("\n").getBytes())
JGitIgnore(stream).getPatterns should contain theSameElementsAs patterns
}
it should "check if file is ignored" in {
val ignore = JGitIgnore("iAmIgnored")
ignore.isIgnored("iAmNotIgnored") shouldBe false
ignore.isIgnored("iAmIgnored") shouldBe true
}
it should "support wildcards" in {
val ignore = JGitIgnore("*.test")
ignore.isIgnored("foo.test") shouldBe true
ignore.isIgnored("bar.test") shouldBe true
}
it should "support nested directories" in {
val ignore = JGitIgnore("*.test")
ignore.isIgnored("foo/foo.test") shouldBe true
ignore.isIgnored("bar/bar.test") shouldBe true
}
it should "support negation" in {
val ignore = JGitIgnore("*", "!foo")
ignore.isIgnored("foo") shouldBe false
ignore.isIgnored("bar") shouldBe true
}
it should "support precedence" in {
val ignore = JGitIgnore("!foo", "*")
ignore.isIgnored("foo") shouldBe true
ignore.isIgnored("bar") shouldBe true
}
it should "support relativised files" in {
val ignore = JGitIgnore("foo/")
ignore.isIgnored("foo/bar") shouldBe true
ignore.isIgnored("foo/bar", isDir = false, Some("foo")) shouldBe false
ignore.isIgnored("foo/bar", isDir = true, Some("foo")) shouldBe false
}
}
| wolfendale/giter8 | library/src/test/scala/giter8/JGitIgnoreTest.scala | Scala | apache-2.0 | 1,863 |
package torture
import scala.collection.mutable.ArrayBuffer
import Rand._
class SeqCBranch(xregs: HWRegPool) extends InstSeq
{
override val seqname = "xbranch"
val taken = Label("__needs_branch_patch")
val nottakens = ArrayBuffer[Label](Label("crash_backward"), Label("crash_forward"))
val nottaken = rand_pick(nottakens)
def reverse_label(l: Label) = if(l == taken) nottaken else taken
def helper_two_srcs_sameval_samereg_any_c_x8_x15() = () =>
{
val reg_src = reg_read_x8_x15(xregs)
(reg_src, reg_src)
}
def helper_two_srcs_sameval_samereg_zero_c_x8_x15() = () =>
{
val reg_src = reg_read_x8_x15(xregs)
(reg_src, reg_src)
}
//update
//*********************************************************
def helper_two_srcs_sameval_diffreg_any_c_x8_x15() = () =>
{
val reg_dst1 = reg_read_x8_x15(xregs)
val reg_dst2 = reg_read_x8_x15(xregs)
insts += C_ADDI(reg_dst1, Imm(rand_imm_32_0_31))
insts += C_ADDI(reg_dst2, Imm(rand_imm_32_0_31))
(reg_dst1, reg_dst2)
}
def helper_two_srcs_sameval_diffreg_zero_c_x8_x15() = () =>
{
val reg_dst1 = reg_write_visible_x8_x15(xregs)
val reg_dst2 = reg_write_x8_x15(xregs)
insts += C_ADDI(reg_dst1, Imm(rand_imm_32_0_31))
insts += C_ADDI(reg_dst2, Imm(rand_imm_32_0_31))
(reg_dst1, reg_dst2)
}
def helper_two_srcs_diffval_diffreg_bothpos_c_x8_x15() = () =>
{
val reg_dst1 = reg_write_visible_x8_x15(xregs)
val reg_dst2 = reg_write_x8_x15(xregs)
insts += C_ADDI(reg_dst1, Imm(rand_imm_32_0_31))
insts += C_ADDI(reg_dst2, Imm(rand_imm_32_0_31))
// signed (+, ++), unsigned (+, ++)
(reg_dst1, reg_dst2)
}
def helper_two_srcs_diffval_diffreg_bothneg_c_x8_x15() = () =>
{
val reg_dst1 = reg_write_visible_x8_x15(xregs)
val reg_dst2 = reg_write_visible_x8_x15(xregs)
insts += C_ADDI(reg_dst1, Imm(rand_imm_32_0_31))
insts += C_ADDI(reg_dst2, Imm(rand_imm_32_0_31))
// signed (-, --), unsigned (++++, +++)
(reg_dst1, reg_dst2)
}
// def helper_two_srcs_sameval_diffreg_oppositesign_c_x8_x15() = () =>
// {
// val reg_src = reg_read_x8_x15(xregs)
// val reg_dst1 = reg_write_x8x15(xregs, reg_src)
// val reg_dst2 = reg_write_x8x15(xregs, reg_src)
// val reg_one = reg_write_visible_x8_x15(xregs)
// val reg_mask = reg_write_visible_x8_x15(xregs)
// insts += ADDI(reg_one, reg_read_x8_x15(xregs), Imm(1))
// insts += SLL(reg_one, reg_one, Imm(31))
// insts += ADDI(reg_mask, reg_read_x8_x15(xregs), Imm(-1))
// insts += XOR(reg_mask, reg_mask, reg_one)
// insts += AND(reg_dst1, reg_src, reg_mask)
// insts += OR(reg_dst2, reg_dst1, reg_one)
// // reg_dest1 sign bit 0, reg_dest2 sign bit 1
// (reg_dst1, reg_dst2)
// }
def helper_two_srcs_sameval_diffreg_oppositesign_c_x8_x15() = () =>
{
val reg_src = reg_read_x8_x15(xregs)
val reg_one = reg_write_visible_x8_x15(xregs)
val reg_mask = reg_write_visible_x8_x15(xregs)
insts += C_ADDI(reg_one, Imm(1))
insts += C_SLLI(reg_one, Imm(31))
insts += C_ADDI(reg_mask, Imm(-1))
insts += C_XOR(reg_mask, reg_one)
insts += C_AND(reg_src, reg_mask)
insts += C_OR(reg_one, reg_src)
// reg_dest1 sign bit 0, reg_dest2 sign bit 1
(reg_one, reg_src)
}
// def helper_two_srcs_diffval_diffreg_oppositesign_c_x8_x15() = () =>
// {
// val reg_src1 = reg_read_x8_x15(xregs)
// val reg_src2 = reg_read_x8_x15(xregs)
// val reg_dst1 = reg_write_x8x15(xregs, reg_src1)
// val reg_dst2 = reg_write_x8x15(xregs, reg_src2)
// val reg_one = reg_write_visible_x8_x15(xregs)
// val reg_mask = reg_write_visible_x8_x15(xregs)
// insts += ADDI(reg_one, reg_read_x8_x15(xregs), Imm(1))
// insts += SLL(reg_one, reg_one, Imm(31))
// insts += ADDI(reg_mask, reg_read_x8_x15(xregs), Imm(-1))
// insts += XOR(reg_mask, reg_mask, reg_one)
// insts += AND(reg_dst1, reg_src1, reg_mask)
// insts += OR(reg_dst2, reg_src2, reg_one)
// // reg_dest1 sign bit 0, reg_dest2 sign bit 1
// (reg_dst1, reg_dst2)
// }
def helper_two_srcs_diffval_diffreg_oppositesign_c_x8_x15() = () =>
{
val reg_src1 = reg_read_x8_x15(xregs)
val reg_src2 = reg_read_x8_x15(xregs)
val reg_one = reg_write_visible_x8_x15(xregs)
val reg_mask = reg_write_visible_x8_x15(xregs)
insts += C_ADDI(reg_one, Imm(1))
insts += C_SLLI(reg_one, Imm(31))
insts += C_ADDI(reg_mask, Imm(-1))
insts += C_XOR(reg_mask, reg_one)
insts += C_AND(reg_src1, reg_mask)
insts += C_OR(reg_src2, reg_one)
// reg_dest1 sign bit 0, reg_dest2 sign bit 1
(reg_src1, reg_src2)
}
def seq_taken_c_j() = () =>
{
insts += C_J(taken)
}
def seq_taken_c_jal() = () =>
{
val reg_x1 = reg_write_ra(xregs)
insts += C_JAL(taken)
}
def seq_taken_c_jalr() = () =>
{
val reg_x1 = reg_write_ra(xregs)
val reg_src1 = reg_read_zero(xregs)
val reg_dst1 = reg_write_hidden(xregs)
val reg_dst2 = reg_write_hidden(xregs)
//insts += LA(reg_dst1, Label("__needs_jalr_patch1"))
insts += C_JALR(reg_dst2, reg_dst1, Label("__needs_jalr_patch2"))
}
//tianchuan:We construct c instruction architecture
//*************************************************
def get_one_regs_and_branch_with_label( op: Opcode, helper: () => (Operand, Operand), label: Label, flip_ops:Boolean = false) = () =>
{
val regs = helper()
if(!flip_ops) insts += op(regs._1, label) else insts += op(regs._2, label)
}
//tianchuan:We construct c instruction architecture
//*************************************************
val reversible_tests_c = List(
(C_BEQZ, helper_two_srcs_sameval_samereg_any_c_x8_x15, taken),
(C_BEQZ, helper_two_srcs_sameval_samereg_zero_c_x8_x15, taken),
(C_BEQZ, helper_two_srcs_sameval_diffreg_any_c_x8_x15, taken),
(C_BEQZ, helper_two_srcs_sameval_diffreg_zero_c_x8_x15, taken),
(C_BEQZ, helper_two_srcs_diffval_diffreg_bothpos_c_x8_x15, nottaken),
(C_BEQZ, helper_two_srcs_diffval_diffreg_bothneg_c_x8_x15, nottaken),
(C_BEQZ, helper_two_srcs_sameval_diffreg_oppositesign_c_x8_x15, nottaken),
(C_BEQZ, helper_two_srcs_diffval_diffreg_oppositesign_c_x8_x15, nottaken),
(C_BNEZ, helper_two_srcs_sameval_samereg_any_c_x8_x15, nottaken),
(C_BNEZ, helper_two_srcs_sameval_samereg_zero_c_x8_x15, nottaken),
(C_BNEZ, helper_two_srcs_sameval_diffreg_any_c_x8_x15, nottaken),
(C_BNEZ, helper_two_srcs_sameval_diffreg_zero_c_x8_x15, nottaken),
(C_BNEZ, helper_two_srcs_diffval_diffreg_bothpos_c_x8_x15, taken),
(C_BNEZ, helper_two_srcs_diffval_diffreg_bothneg_c_x8_x15, taken),
(C_BNEZ, helper_two_srcs_sameval_diffreg_oppositesign_c_x8_x15, taken),
(C_BNEZ, helper_two_srcs_diffval_diffreg_oppositesign_c_x8_x15, taken)
)
val candidates = new ArrayBuffer[() => insts.type]
candidates += seq_taken_c_j()
candidates += seq_taken_c_jal()
candidates += seq_taken_c_jalr()
reversible_tests_c.foreach( t => candidates += get_one_regs_and_branch_with_label(t._1, t._2, t._3, false))
rand_pick(candidates)()
}
| SI-RISCV/e200_opensource | riscv-tools/riscv-torture/generator/src/main/scala/SeqCBranch.scala | Scala | apache-2.0 | 7,186 |
package com.ibm.techempower
import scala.collection.mutable.ListBuffer
import scala.language.implicitConversions
import scala.xml._
import com.ibm.plain.rest.{ Resource, Html }
import com.ibm.plain.jdbc.withConnection
import com.ibm.plain.jdbc.ConnectionHelper._
final class Fortunes
extends Resource {
Get {
val list = new ListBuffer[(Int, String)]
withConnection(datasource) { implicit c => for (row <- sql ! asRow) { list += row } }
list += ((0, "Additional fortune added at request time."))
html(rows(list.sortBy(_._2)))
}
@inline private[this] final def asRow = (r: RichResultSet) => (r.nextInt, r.nextString)
@inline private[this] final def rows(list: ListBuffer[(Int, String)]) = list.map { e => row(e._1, e._2) }
@inline private[this] final def row(id: Int, message: String) = <tr><td>{ id }</td><td>{ message }</td></tr>
@inline private[this] final def html(rows: ListBuffer[Elem]): Html =
<html>
<head><title>Fortunes</title></head>
<body> <table>
<tr><th>id</th><th>message</th></tr>
{ rows }
</table> </body>
</html>
private[this] final val sql = "select id, message from Fortune"
private[this] final val datasource = "MysqlBenchmark"
}
| seem-sky/FrameworkBenchmarks | plain/src/main/scala/com/ibm/techempower/Fortunes.scala | Scala | bsd-3-clause | 1,264 |
package controllers
import play.api._
import play.api.mvc._
import play.api.libs.json._
import play.api.libs.json.Json
import models.Task
import play.api.libs.functional.syntax._
object Tasks extends Controller {
def getAll = Action {
Ok(Json.toJson(Task.getAll))
}
def getById(taskId: Long) = Action {
try {
Ok(Json.toJson(Task.getById(taskId).get))
} catch {
case nse: NoSuchElementException =>
Ok("No task")
}
}
implicit val readTaskJson = (
(__ \\ 'taskId).read[String] and
(__ \\ 'title).read[String] and
(__ \\ 'text).read[String]) tupled
def newTask = Action { request =>
request.body.asJson.map { json =>
json.validate[(String, String, String)].map {
case (taskId, title, text) => Ok(Task.create(Task(title, text)).toString)
}.recoverTotal {
e => BadRequest(Json.obj("status" -> "KO", "message" -> JsError.toFlatJson(e)))
}
}.getOrElse {
BadRequest("Expecting Json data")
}
}
def remove(taskId: Long) = Action {
Task.delete(taskId)
Ok(Json.toJson("Ok"))
}
} | jbuffin/LightningTasks | app/controllers/Tasks.scala | Scala | mit | 1,033 |
package de.kasoki.trierbustimetracker2.adapter
import scala.collection.mutable.Buffer
import android.content.Context
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.BaseAdapter
import android.widget.TextView
import de.kasoki.swtrealtime.BusTime
import de.kasoki.trierbustimetracker2.R
class BusTimeAdapter(val context:Context) extends BaseAdapter {
var items:List[BusTime] = List[BusTime]()
override def getCount():Int = items.length
override def getItem(position:Int):Object = items(position)
override def getItemId(position:Int):Long = position
override def getView(position:Int, convertView:View, parent:ViewGroup):View = {
var view = convertView
if(view == null) {
val vi = context.getSystemService(Context.LAYOUT_INFLATER_SERVICE).asInstanceOf[LayoutInflater]
view = vi.inflate(R.layout.list_item_bustimes, null)
}
val numberText = view.findViewById(R.id.number_text).asInstanceOf[TextView]
val destinationText = view.findViewById(R.id.destination_text).asInstanceOf[TextView]
val timeText = view.findViewById(R.id.time_text).asInstanceOf[TextView]
val time = items(position)
var op = "+"
var delay = time.delay.toString
var minute = "m"
if(time.delay == 0) {
op = ""
delay = ""
minute = ""
}
numberText.setText("%02d".format(time.number))
destinationText.setText(time.destination)
timeText.setText(context.getResources().getString(R.string.bustime_arrival_text,
time.arrivalTimeAsString, op, delay, minute))
return view
}
}
| kasoki/TrierBusTimeTracker | src/main/scala/de/kasoki/trierbustimetracker2/adapter/BusTimeAdapter.scala | Scala | mit | 1,733 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package expressions
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import scala.annotation.tailrec
object BlockInIndentationRegion extends ParsingRule {
override def parse(implicit builder: ScalaPsiBuilder): Boolean = {
assert(builder.isScala3 && builder.isScala3IndentationBasedSyntaxEnabled)
val blockMarker = builder.mark()
val indentionWidthBefore = builder.currentIndentationWidth
def hasOutdent: Boolean = {
builder.findPreviousIndent.exists(_ <= indentionWidthBefore)
}
@tailrec
def parseNext(): Unit = {
builder.getTokenType match {
case _ if hasOutdent || builder.eof() =>
return
case ScalaTokenTypes.tRPARENTHESIS | ScalaTokenTypes.tRBRACE =>
return
case ScalaTokenTypes.kCASE =>
return
case ScalaTokenTypes.tSEMICOLON =>
builder.advanceLexer()
case _ =>
if (!BlockStat()) {
builder.advanceLexer() // ate something
}
}
parseNext()
}
parseNext()
blockMarker.done(ScalaElementType.BLOCK)
true
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/expressions/BlockInIndentationRegion.scala | Scala | apache-2.0 | 1,275 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import org.apache.spark.SparkException
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.ArrayData
import org.apache.spark.sql.types._
import org.apache.spark.util.collection.OpenHashMap
class PercentileSuite extends SparkFunSuite {
private val random = new java.util.Random()
private val data = (0 until 10000).map { _ =>
random.nextInt(10000)
}
test("serialize and de-serialize") {
val agg = new Percentile(BoundReference(0, IntegerType, true), Literal(0.5))
// Check empty serialize and deserialize
val buffer = new OpenHashMap[AnyRef, Long]()
assert(compareEquals(agg.deserialize(agg.serialize(buffer)), buffer))
// Check non-empty buffer serialize and deserialize.
data.foreach { key =>
buffer.changeValue(Integer.valueOf(key), 1L, _ + 1L)
}
assert(compareEquals(agg.deserialize(agg.serialize(buffer)), buffer))
}
test("class Percentile, high level interface, update, merge, eval...") {
val count = 10000
val percentages = Seq(0, 0.25, 0.5, 0.75, 1)
val expectedPercentiles = Seq(1, 2500.75, 5000.5, 7500.25, 10000)
val childExpression = Cast(BoundReference(0, IntegerType, nullable = false), DoubleType)
val percentageExpression = CreateArray(percentages.toSeq.map(Literal(_)))
val agg = new Percentile(childExpression, percentageExpression)
// Test with rows without frequency
val rows = (1 to count).map(x => Seq(x))
runTest(agg, rows, expectedPercentiles)
// Test with row with frequency. Second and third columns are frequency in Int and Long
val countForFrequencyTest = 1000
val rowsWithFrequency = (1 to countForFrequencyTest).map(x => Seq(x, x):+ x.toLong)
val expectedPercentilesWithFrquency = Seq(1.0, 500.0, 707.0, 866.0, 1000.0)
val frequencyExpressionInt = BoundReference(1, IntegerType, nullable = false)
val aggInt = new Percentile(childExpression, percentageExpression, frequencyExpressionInt)
runTest(aggInt, rowsWithFrequency, expectedPercentilesWithFrquency)
val frequencyExpressionLong = BoundReference(2, LongType, nullable = false)
val aggLong = new Percentile(childExpression, percentageExpression, frequencyExpressionLong)
runTest(aggLong, rowsWithFrequency, expectedPercentilesWithFrquency)
// Run test with Flatten data
val flattenRows = (1 to countForFrequencyTest).flatMap(current =>
(1 to current).map(y => current )).map(Seq(_))
runTest(agg, flattenRows, expectedPercentilesWithFrquency)
}
private def runTest(agg: Percentile,
rows : Seq[Seq[Any]],
expectedPercentiles : Seq[Double]): Unit = {
assert(agg.nullable)
val group1 = (0 until rows.length / 2)
val group1Buffer = agg.createAggregationBuffer()
group1.foreach { index =>
val input = InternalRow(rows(index): _*)
agg.update(group1Buffer, input)
}
val group2 = (rows.length / 2 until rows.length)
val group2Buffer = agg.createAggregationBuffer()
group2.foreach { index =>
val input = InternalRow(rows(index): _*)
agg.update(group2Buffer, input)
}
val mergeBuffer = agg.createAggregationBuffer()
agg.merge(mergeBuffer, group1Buffer)
agg.merge(mergeBuffer, group2Buffer)
agg.eval(mergeBuffer) match {
case arrayData: ArrayData =>
val percentiles = arrayData.toDoubleArray()
assert(percentiles.zip(expectedPercentiles)
.forall(pair => pair._1 == pair._2))
}
}
test("class Percentile, low level interface, update, merge, eval...") {
val childExpression = Cast(BoundReference(0, IntegerType, nullable = true), DoubleType)
val inputAggregationBufferOffset = 1
val mutableAggregationBufferOffset = 2
val percentage = 0.5
// Phase one, partial mode aggregation
val agg = new Percentile(childExpression, Literal(percentage))
.withNewInputAggBufferOffset(inputAggregationBufferOffset)
.withNewMutableAggBufferOffset(mutableAggregationBufferOffset)
val mutableAggBuffer = new GenericInternalRow(
new Array[Any](mutableAggregationBufferOffset + 1))
agg.initialize(mutableAggBuffer)
val dataCount = 10
(1 to dataCount).foreach { data =>
agg.update(mutableAggBuffer, InternalRow(data))
}
agg.serializeAggregateBufferInPlace(mutableAggBuffer)
// Serialize the aggregation buffer
val serialized = mutableAggBuffer.getBinary(mutableAggregationBufferOffset)
val inputAggBuffer = new GenericInternalRow(Array[Any](null, serialized))
// Phase 2: final mode aggregation
// Re-initialize the aggregation buffer
agg.initialize(mutableAggBuffer)
agg.merge(mutableAggBuffer, inputAggBuffer)
val expectedPercentile = 5.5
assert(agg.eval(mutableAggBuffer).asInstanceOf[Double] == expectedPercentile)
}
test("fail analysis if childExpression is invalid") {
val validDataTypes = Seq(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType)
val percentage = Literal(0.5)
validDataTypes.foreach { dataType =>
val child = AttributeReference("a", dataType)()
val percentile = new Percentile(child, percentage)
assertEqual(percentile.checkInputDataTypes(), TypeCheckSuccess)
}
val validFrequencyTypes = Seq(ByteType, ShortType, IntegerType, LongType)
for (dataType <- validDataTypes;
frequencyType <- validFrequencyTypes) {
val child = AttributeReference("a", dataType)()
val frq = AttributeReference("frq", frequencyType)()
val percentile = new Percentile(child, percentage, frq)
assertEqual(percentile.checkInputDataTypes(), TypeCheckSuccess)
}
val invalidDataTypes = Seq(BooleanType, StringType, DateType, TimestampType,
CalendarIntervalType, NullType)
invalidDataTypes.foreach { dataType =>
val child = AttributeReference("a", dataType)()
val percentile = new Percentile(child, percentage)
assertEqual(percentile.checkInputDataTypes(),
TypeCheckFailure(s"argument 1 requires numeric type, however, " +
s"'`a`' is of ${dataType.simpleString} type."))
}
val invalidFrequencyDataTypes = Seq(FloatType, DoubleType, BooleanType,
StringType, DateType, TimestampType,
CalendarIntervalType, NullType)
for(dataType <- invalidDataTypes;
frequencyType <- validFrequencyTypes) {
val child = AttributeReference("a", dataType)()
val frq = AttributeReference("frq", frequencyType)()
val percentile = new Percentile(child, percentage, frq)
assertEqual(percentile.checkInputDataTypes(),
TypeCheckFailure(s"argument 1 requires numeric type, however, " +
s"'`a`' is of ${dataType.simpleString} type."))
}
for(dataType <- validDataTypes;
frequencyType <- invalidFrequencyDataTypes) {
val child = AttributeReference("a", dataType)()
val frq = AttributeReference("frq", frequencyType)()
val percentile = new Percentile(child, percentage, frq)
assertEqual(percentile.checkInputDataTypes(),
TypeCheckFailure(s"argument 3 requires integral type, however, " +
s"'`frq`' is of ${frequencyType.simpleString} type."))
}
}
test("fails analysis if percentage(s) are invalid") {
val child = Cast(BoundReference(0, IntegerType, nullable = false), DoubleType)
val input = InternalRow(1)
val validPercentages = Seq(Literal(0D), Literal(0.5), Literal(1D),
CreateArray(Seq(0, 0.5, 1).map(Literal(_))))
validPercentages.foreach { percentage =>
val percentile1 = new Percentile(child, percentage)
assertEqual(percentile1.checkInputDataTypes(), TypeCheckSuccess)
}
val invalidPercentages = Seq(Literal(-0.5), Literal(1.5), Literal(2D),
CreateArray(Seq(-0.5, 0, 2).map(Literal(_))))
invalidPercentages.foreach { percentage =>
val percentile2 = new Percentile(child, percentage)
assertEqual(percentile2.checkInputDataTypes(),
TypeCheckFailure(s"Percentage(s) must be between 0.0 and 1.0, " +
s"but got ${percentage.simpleString(100)}"))
}
val nonFoldablePercentage = Seq(NonFoldableLiteral(0.5),
CreateArray(Seq(0, 0.5, 1).map(NonFoldableLiteral(_))))
nonFoldablePercentage.foreach { percentage =>
val percentile3 = new Percentile(child, percentage)
assertEqual(percentile3.checkInputDataTypes(),
TypeCheckFailure(s"The percentage(s) must be a constant literal, " +
s"but got ${percentage}"))
}
val invalidDataTypes = Seq(ByteType, ShortType, IntegerType, LongType, FloatType,
BooleanType, StringType, DateType, TimestampType, CalendarIntervalType, NullType)
invalidDataTypes.foreach { dataType =>
val percentage = Literal.default(dataType)
val percentile4 = new Percentile(child, percentage)
val checkResult = percentile4.checkInputDataTypes()
assert(checkResult.isFailure)
Seq("argument 2 requires double type, however, ",
s"is of ${dataType.simpleString} type.").foreach { errMsg =>
assert(checkResult.asInstanceOf[TypeCheckFailure].message.contains(errMsg))
}
}
}
test("null handling") {
// Percentile without frequency column
val childExpression = Cast(BoundReference(0, IntegerType, nullable = true), DoubleType)
val agg = new Percentile(childExpression, Literal(0.5))
val buffer = new GenericInternalRow(new Array[Any](1))
agg.initialize(buffer)
// Empty aggregation buffer
assert(agg.eval(buffer) == null)
// Empty input row
agg.update(buffer, InternalRow(null))
assert(agg.eval(buffer) == null)
// Percentile with Frequency column
val frequencyExpression = Cast(BoundReference(1, IntegerType, nullable = true), IntegerType)
val aggWithFrequency = new Percentile(childExpression, Literal(0.5), frequencyExpression)
val bufferWithFrequency = new GenericInternalRow(new Array[Any](2))
aggWithFrequency.initialize(bufferWithFrequency)
// Empty aggregation buffer
assert(aggWithFrequency.eval(bufferWithFrequency) == null)
// Empty input row
aggWithFrequency.update(bufferWithFrequency, InternalRow(null, null))
assert(aggWithFrequency.eval(bufferWithFrequency) == null)
// Add some non-empty row with empty frequency column
aggWithFrequency.update(bufferWithFrequency, InternalRow(0, null))
assert(aggWithFrequency.eval(bufferWithFrequency) == null)
// Add some non-empty row with zero frequency
aggWithFrequency.update(bufferWithFrequency, InternalRow(1, 0))
assert(aggWithFrequency.eval(bufferWithFrequency) == null)
// Add some non-empty row with positive frequency
aggWithFrequency.update(bufferWithFrequency, InternalRow(0, 1))
assert(aggWithFrequency.eval(bufferWithFrequency) != null)
}
test("negatives frequency column handling") {
val childExpression = Cast(BoundReference(0, IntegerType, nullable = true), DoubleType)
val freqExpression = Cast(BoundReference(1, IntegerType, nullable = true), IntegerType)
val agg = new Percentile(childExpression, Literal(0.5), freqExpression)
val buffer = new GenericInternalRow(new Array[Any](2))
agg.initialize(buffer)
val caught =
intercept[SparkException]{
// Add some non-empty row with negative frequency
agg.update(buffer, InternalRow(1, -5))
agg.eval(buffer)
}
assert(caught.getMessage.startsWith("Negative values found in "))
}
private def compareEquals(
left: OpenHashMap[AnyRef, Long], right: OpenHashMap[AnyRef, Long]): Boolean = {
left.size == right.size && left.forall { case (key, count) =>
right.apply(key) == count
}
}
private def assertEqual[T](left: T, right: T): Unit = {
assert(left == right)
}
}
| caneGuy/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/aggregate/PercentileSuite.scala | Scala | apache-2.0 | 12,802 |
package net.fehmicansaglam.tepkin.protocol
import java.nio.ByteOrder
import akka.util.ByteString
import net.fehmicansaglam.bson.BsonDocument
import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.bson.Implicits._
import net.fehmicansaglam.tepkin.protocol.message._
import org.scalatest.{FlatSpec, Matchers}
class MessageSpec extends FlatSpec with Matchers {
implicit val byteOrder = ByteOrder.LITTLE_ENDIAN
val fullCollectionName = "tepkin.message_spec"
"Message" should "construct correct DeleteMessage" in {
val selector: BsonDocument = "age" := 18
val actual = DeleteMessage(fullCollectionName, selector)
val expected = {
val body = ByteString.newBuilder
.putInt(0) // ZERO
.putBytes(fullCollectionName.getBytes("utf-8"))
.putByte(0)
.putInt(0)
.append(selector.encode)
.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2006)
.append(body)
.result()
}
actual.encode should be(expected)
}
it should "construct correct GetMoreMessage" in {
val cursorID = 1L
val numberToReturn = 0
val actual = GetMoreMessage(fullCollectionName, cursorID, numberToReturn)
val expected = {
val body = ByteString.newBuilder
.putInt(0) // ZERO
.putBytes(fullCollectionName.getBytes("utf-8"))
.putByte(0)
.putInt(numberToReturn)
.putLong(cursorID)
.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2005)
.append(body)
.result()
}
actual.encode should be(expected)
}
it should "construct correct InsertMessage" in {
val document: BsonDocument = "age" := 18
val actual = InsertMessage(fullCollectionName, Seq(document))
val expected = {
val body = ByteString.newBuilder
.putInt(0) // flags
.putBytes(fullCollectionName.getBytes("utf-8"))
.putByte(0)
.append(document.encode)
.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2002)
.append(body)
.result()
}
actual.encode should be(expected)
}
it should "construct correct KillCursorsMessage" in {
val cursorIDs = Seq(1L, 2L)
val actual = KillCursorsMessage(cursorIDs: _*)
val expected = {
val builder = ByteString.newBuilder
.putInt(0) // ZERO
.putInt(cursorIDs.size)
cursorIDs.foreach(builder.putLong)
val body = builder.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2007)
.append(body)
.result()
}
actual.encode should be(expected)
}
it should "construct correct QueryMessage" in {
val query: BsonDocument = "age" := 18
val fields: BsonDocument = "age" := 1
val numberToSkip = 0
val numberToReturn = 0
val actual = QueryMessage(fullCollectionName, query, Some(fields), numberToSkip, numberToReturn)
val expected = {
val builder = ByteString.newBuilder
.putInt(0) //flags
.putBytes(fullCollectionName.getBytes("utf-8"))
.putByte(0)
.putInt(numberToSkip)
.putInt(numberToReturn)
.append(query.encode)
builder.append(fields.encode)
val body = builder.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2004)
.append(body)
.result()
}
actual.encode should be(expected)
}
it should "construct correct UpdateMessage" in {
val selector: BsonDocument = "age" := 18
val update: BsonDocument = $set("age" := 33)
val actual = UpdateMessage(fullCollectionName, selector, update)
val expected = {
val body = ByteString.newBuilder
.putInt(0) // ZERO
.putBytes(fullCollectionName.getBytes("utf-8"))
.putByte(0)
.putInt(0) // flags
.append(selector.encode)
.append(update.encode)
.result()
ByteString.newBuilder
.putInt(body.size + 16)
.putInt(actual.requestID)
.putInt(0)
.putInt(2001)
.append(body)
.result()
}
actual.encode should be(expected)
}
}
| danielwegener/tepkin | tepkin/src/test/scala/net/fehmicansaglam/tepkin/protocol/MessageSpec.scala | Scala | apache-2.0 | 4,480 |
package com.twitter.summingbird.storm.builder
import java.util.concurrent.atomic.AtomicLong
import com.twitter.algebird.util.summer.Incrementor
/**
* @author pnaramsetti
*/
case class Counter(name: String) extends Incrementor {
private val counter = new AtomicLong()
override def incr: Unit = counter.incrementAndGet()
override def incrBy(amount: Long): Unit = counter.addAndGet(amount)
def size = counter.get()
override def toString: String = s"$name: size:$size"
}
| twitter/summingbird | summingbird-storm/src/test/scala/com/twitter/summingbird/storm/builder/Counter.scala | Scala | apache-2.0 | 484 |
package com.rocketfuel.sdbc.base
import com.rocketfuel.sdbc.base
trait Selectable[Key, Value, Connection, Select <: base.Select[Connection, Value]] {
def select(key: Key): Select
}
trait SelectableMethods[Connection, Select[T] <: base.Select[Connection, T]] {
def iterator[Key, Value](
key: Key
)(implicit selectable: base.Selectable[Key, Value, Connection, Select[Value]],
connection: Connection
): Iterator[Value] = {
selectable.select(key).iterator()
}
def option[Key, Value](
key: Key
)(implicit selectable: base.Selectable[Key, Value, Connection, Select[Value]],
connection: Connection
): Option[Value] = {
iterator(key).toStream.headOption
}
}
| wdacom/sdbc | base/src/main/scala/com/rocketfuel/sdbc/base/Selectable.scala | Scala | bsd-3-clause | 702 |
package web.actors
import akka.actor.Actor
import core._
trait ParentSentimentOutput extends SentimentOutput { actor: Actor =>
def outputCount(values: List[Iterable[(Category, Int)]]): Unit =
actor.context.parent ! TrackWorker.Publish(values)
}
| OmniaGM/activator-spray-twitter | web/app/web/actors/ParentSentimentOutput.scala | Scala | apache-2.0 | 253 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.photo
import squants._
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value Double
*/
final class Luminance private (val value: Double, val unit: LuminanceUnit)
extends Quantity[Luminance] {
def dimension = Luminance
def *(that: Area): LuminousIntensity = Candelas(this.value * that.toSquareMeters)
def toCandelasPerSquareMeters = to(CandelasPerSquareMeter)
}
object Luminance extends Dimension[Luminance] {
private[photo] def apply[A](n: A, unit: LuminanceUnit)(implicit num: Numeric[A]) = new Luminance(num.toDouble(n), unit)
def apply = parse _
def name = "Luminance"
def primaryUnit = CandelasPerSquareMeter
def siUnit = CandelasPerSquareMeter
def units = Set(CandelasPerSquareMeter)
}
trait LuminanceUnit extends UnitOfMeasure[Luminance] {
def apply[A](n: A)(implicit num: Numeric[A]) = Luminance(num.toDouble(n), this)
}
object CandelasPerSquareMeter extends LuminanceUnit with PrimaryUnit with SiUnit {
val symbol = "cd/m²"
}
object LuminanceConversions {
lazy val candelaPerSquareMeter = CandelasPerSquareMeter(1)
implicit class LuminanceConversions[A](n: A)(implicit num: Numeric[A]) {
def candelasPerSquareMeter = CandelasPerSquareMeter(n)
}
implicit object LuminanceNumeric extends AbstractQuantityNumeric[Luminance](Luminance.primaryUnit)
}
| underscorenico/squants | shared/src/main/scala/squants/photo/Luminance.scala | Scala | apache-2.0 | 1,860 |
package edu.oregonstate.mutation.statementHistory
class CommitInfo(val sha: String, val action: String) {
override def toString: String = {
return "[" + sha + "," + action + "]"
}
override def equals(other: Any): Boolean = {
other match {
case info: CommitInfo => info.sha == sha && info.action == action
case _ => false
}
}
}
| caiusb/statement-history | src/main/scala/edu/oregonstate/mutation/statementHistory/CommitInfo.scala | Scala | mit | 362 |
package slick.lifted
import slick.util.ConstArray
import scala.language.higherKinds
import scala.language.experimental.macros
import scala.annotation.implicitNotFound
import scala.reflect.macros.blackbox.Context
import slick.ast.{Join => AJoin, _}
import FunctionSymbolExtensionMethods._
import ScalaBaseType._
sealed trait QueryBase[T] extends Rep[T]
/** An instance of Query represents a query or view, i.e. a computation of a
* collection type (Rep[Seq[T]]). It is parameterized with both, the mixed
* type (the type of values you see e.g. when you call map()) and the unpacked
* type (the type of values that you get back when you run the query).
*
* Additional extension methods for queries containing a single column are
* defined in [[slick.lifted.SingleColumnQueryExtensionMethods]].
*/
sealed abstract class Query[+E, U, C[_]] extends QueryBase[C[U]] { self =>
def shaped: ShapedValue[_ <: E, U]
final lazy val packed = shaped.toNode
/** Build a new query by applying a function to all elements of this query
* and using the elements of the resulting queries. This corresponds to an
* implicit inner join in SQL. */
def flatMap[F, T, D[_]](f: E => Query[F, T, D]): Query[F, T, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator)).value
val fv = f(aliased)
new WrappingQuery[F, T, C](new Bind(generator, toNode, fv.toNode), fv.shaped)
}
/** Build a new query by applying a function to all elements of this query. */
def map[F, G, T](f: E => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, G]): Query[G, T, C] =
flatMap(v => Query[F, T, G](f(v)))
/** Select all elements of this query which satisfy a predicate. */
private def filterHelper[T](f: E => T, wrapExpr: Node => Node)
(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator))
val fv = f(aliased.value)
new WrappingQuery[E, U, C](Filter.ifRefutable(generator, toNode, wrapExpr(wt(fv).toNode)), shaped)
}
/** Select all elements of this query which satisfy a predicate. Unlike
* `withFilter, this method only allows `Rep`-valued predicates, so it
* guards against the accidental use plain Booleans. */
def filter[T <: Rep[_]](f: E => T)(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] =
withFilter(f)
def filterNot[T <: Rep[_]](f: E => T)(implicit wt: CanBeQueryCondition[T]): Query[E, U, C] =
filterHelper(f, node => Library.Not.typed(node.nodeType, node) )
/** Select all elements of this query which satisfy a predicate. This method
* is used when desugaring for-comprehensions over queries. There is no
* reason to call it directly because it is the same as `filter`. */
def withFilter[T : CanBeQueryCondition](f: E => T) = filterHelper(f, identity)
/** Join two queries with a cross join or inner join.
* An optional join predicate can be specified later by calling `on`. */
def join[E2, U2, D[_]](q2: Query[E2, U2, D]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[E, E2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.Inner,
aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Join two queries with a left outer join.
* An optional join predicate can be specified later by calling `on`.
* The right side of the join is lifted to an `Option`. If at least one element on the right
* matches, all matching elements are returned as `Some`, otherwise a single `None` row is
* returned. */
def joinLeft[E2, U2, D[_], O2](q2: Query[E2, _, D])(implicit ol: OptionLift[E2, O2], sh: Shape[FlatShapeLevel, O2, U2, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(ol.lift(q2.shaped.value), sh).encodeRef(Ref(rightGen))
new BaseJoinQuery[E, O2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.LeftOption,
aliased1.zip(aliased2), aliased1.value, q2.shaped.encodeRef(Ref(rightGen)).value)
}
/** Join two queries with a right outer join.
* An optional join predicate can be specified later by calling `on`.
* The left side of the join is lifted to an `Option`. If at least one element on the left
* matches, all matching elements are returned as `Some`, otherwise a single `None` row is
* returned. */
def joinRight[E1 >: E, E2, U2, D[_], O1, U1](q2: Query[E2, U2, D])(implicit ol: OptionLift[E1, O1], sh: Shape[FlatShapeLevel, O1, U1, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = ShapedValue(ol.lift(shaped.value), sh).encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[O1, E2, U1, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.RightOption,
aliased1.zip(aliased2), shaped.encodeRef(Ref(leftGen)).value, aliased2.value)
}
/** Join two queries with a full outer join.
* An optional join predicate can be specified later by calling `on`.
* Both sides of the join are lifted to an `Option`. If at least one element on either side
* matches the other side, all matching elements are returned as `Some`, otherwise a single
* `None` row is returned. */
def joinFull[E1 >: E, E2, U2, D[_], O1, U1, O2](q2: Query[E2, _, D])(implicit ol1: OptionLift[E1, O1], sh1: Shape[FlatShapeLevel, O1, U1, _], ol2: OptionLift[E2, O2], sh2: Shape[FlatShapeLevel, O2, U2, _]) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = ShapedValue(ol1.lift(shaped.value), sh1).encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(ol2.lift(q2.shaped.value), sh2).encodeRef(Ref(rightGen))
new BaseJoinQuery[O1, O2, U1, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, JoinType.OuterOption,
aliased1.zip(aliased2), shaped.encodeRef(Ref(leftGen)).value, q2.shaped.encodeRef(Ref(rightGen)).value)
}
private[this] def standardJoin[E2, U2, D[_]](q2: Query[E2, U2, D], jt: JoinType) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = q2.shaped.encodeRef(Ref(rightGen))
new BaseJoinQuery[E, E2, U, U2, C, E, E2](leftGen, rightGen, toNode, q2.toNode, jt,
aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Return a query formed from this query and another query by combining
* corresponding elements in pairs. */
def zip[E2, U2, D[_]](q2: Query[E2, U2, D]): Query[(E, E2), (U, U2), C] = standardJoin(q2, JoinType.Zip)
/** Return a query formed from this query and another query by combining
* corresponding elements with the specified function. */
def zipWith[E2, U2, F, G, T, D[_]](q2: Query[E2, U2, D], f: (E, E2) => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, G]): Query[G, T, C] =
standardJoin(q2, JoinType.Zip).map[F, G, T](x => f(x._1, x._2))
/** Zip this query with its indices (starting at 0). */
def zipWithIndex = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = shaped.encodeRef(Ref(leftGen))
val aliased2 = ShapedValue(Rep.forNode[Long](Ref(rightGen)), Shape.repColumnShape[Long, FlatShapeLevel])
new BaseJoinQuery[E, Rep[Long], U, Long, C, E, Rep[Long]](leftGen, rightGen, toNode, RangeFrom(0L), JoinType.Zip, aliased1.zip(aliased2), aliased1.value, aliased2.value)
}
/** Sort this query according to a function which extracts the ordering
* criteria from the query's elements. */
def sortBy[T](f: E => T)(implicit ev: T => Ordered): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator))
new WrappingQuery[E, U, C](SortBy(generator, toNode, ConstArray.from(f(aliased.value).columns)), shaped)
}
/** Sort this query according to a the ordering of its elements. */
def sorted(implicit ev: (E => Ordered)): Query[E, U, C] = sortBy(identity)
/** Partition this query into a query of pairs of a key and a nested query
* containing the elements for the key, according to some discriminator
* function. */
def groupBy[K, T, G, P](f: E => K)(implicit kshape: Shape[_ <: FlatShapeLevel, K, T, G], vshape: Shape[_ <: FlatShapeLevel, E, _, P]): Query[(G, Query[P, U, Seq]), (T, Query[P, U, Seq]), C] = {
val sym = new AnonSymbol
val key = ShapedValue(f(shaped.encodeRef(Ref(sym)).value), kshape).packedValue
val value = ShapedValue(pack.to[Seq], RepShape[FlatShapeLevel, Query[P, U, Seq], Query[P, U, Seq]])
val group = GroupBy(sym, toNode, key.toNode)
new WrappingQuery[(G, Query[P, U, Seq]), (T, Query[P, U, Seq]), C](group, key.zip(value))
}
/** Specify part of a select statement for update and marked for row level locking */
def forUpdate: Query[E, U, C] = {
val generator = new AnonSymbol
new WrappingQuery[E, U, C](ForUpdate(generator, toNode), shaped)
}
def encodeRef(path: Node): Query[E, U, C] = new Query[E, U, C] {
val shaped = self.shaped.encodeRef(path)
def toNode = path
}
/** Return a new query containing the elements from both operands. Duplicate
* elements are eliminated from the result. */
def union[O >: E, R, D[_]](other: Query[O, U, D]): Query[O, U, C] =
new WrappingQuery[O, U, C](Union(toNode, other.toNode, false), shaped)
/** Return a new query containing the elements from both operands. Duplicate
* elements are preserved. */
def unionAll[O >: E, R, D[_]](other: Query[O, U, D]): Query[O, U, C] =
new WrappingQuery[O, U, C](Union(toNode, other.toNode, true), shaped)
/** Return a new query containing the elements from both operands. Duplicate
* elements are preserved. */
def ++[O >: E, R, D[_]](other: Query[O, U, D]) = unionAll(other)
/** The total number of elements (i.e. rows). */
def length: Rep[Int] = Library.CountAll.column(toNode)
/** The total number of elements (i.e. rows). */
def size = length
/** The number of distinct elements of the query. */
@deprecated("Use `length` on `distinct` or `distinctOn` instead of `countDistinct`", "3.2")
def countDistinct: Rep[Int] = Library.CountDistinct.column(toNode)
/** Test whether this query is non-empty. */
def exists = Library.Exists.column[Boolean](toNode)
def pack[R](implicit packing: Shape[_ <: FlatShapeLevel, E, _, R]): Query[R, U, C] =
new Query[R, U, C] {
val shaped: ShapedValue[_ <: R, U] = self.shaped.packedValue(packing)
def toNode = self.toNode
}
/** Select the first `num` elements. */
def take(num: ConstColumn[Long]): Query[E, U, C] = new WrappingQuery[E, U, C](Take(toNode, num.toNode), shaped)
/** Select the first `num` elements. */
def take(num: Long): Query[E, U, C] = take(LiteralColumn(num))
/** Select the first `num` elements. */
def take(num: Int): Query[E, U, C] = take(num.toLong)
/** Select all elements except the first `num` ones. */
def drop(num: ConstColumn[Long]): Query[E, U, C] = new WrappingQuery[E, U, C](Drop(toNode, num.toNode), shaped)
/** Select all elements except the first `num` ones. */
def drop(num: Long): Query[E, U, C] = drop(LiteralColumn(num))
/** Select all elements except the first `num` ones. */
def drop(num: Int): Query[E, U, C] = drop(num.toLong)
/** Remove duplicate elements. When used on an ordered Query, there is no guarantee in which
* order duplicates are removed. This method is equivalent to `distinctOn(identity)`. */
def distinct: Query[E, U, C] =
distinctOn[E, U](identity)(shaped.shape.asInstanceOf[Shape[FlatShapeLevel, E, U, _]])
/** Remove duplicate elements which are the same in the given projection. When used on an
* ordered Query, there is no guarantee in which order duplicates are removed. */
def distinctOn[F, T](f: E => F)(implicit shape: Shape[_ <: FlatShapeLevel, F, T, _]): Query[E, U, C] = {
val generator = new AnonSymbol
val aliased = shaped.encodeRef(Ref(generator)).value
val fv = f(aliased)
new WrappingQuery[E, U, C](Distinct(generator, toNode, shape.toNode(fv)), shaped)
}
/** Change the collection type to build when executing the query. */
def to[D[_]](implicit ctc: TypedCollectionTypeConstructor[D]): Query[E, U, D] = new Query[E, U, D] {
val shaped = self.shaped
def toNode = CollectionCast(self.toNode, ctc)
}
/** Force a subquery to be created when using this Query as part of a larger Query. This method
* should never be necessary for correctness. If a query works with an explicit `.subquery` call
* but fails without, this should be considered a bug in Slick. The method is exposed in the API
* to enable workarounds to be written in such cases. */
def subquery: Query[E, U, C] = new WrappingQuery[E, U, C](Subquery(toNode, Subquery.Default), shaped)
}
/** The companion object for Query contains factory methods for creating queries. */
object Query {
/** Lift a scalar value to a Query. */
def apply[E, U, R](value: E)(implicit unpack: Shape[_ <: FlatShapeLevel, E, U, R]): Query[R, U, Seq] = {
val shaped = ShapedValue(value, unpack).packedValue
new WrappingQuery[R, U, Seq](Pure(shaped.toNode), shaped)
}
/** The empty Query. */
def empty: Query[Unit, Unit, Seq] = new Query[Unit, Unit, Seq] {
val toNode = shaped.toNode
def shaped = ShapedValue((), Shape.unitShape[FlatShapeLevel])
}
@inline implicit def queryShape[Level >: NestedShapeLevel <: ShapeLevel, T, Q <: QueryBase[_]](implicit ev: Q <:< Rep[T]) = RepShape[Level, Q, T]
}
/** A typeclass for types that can be used as predicates in `filter` calls. */
@implicitNotFound("Type ${T} cannot be a query condition (only Boolean, Rep[Boolean] and Rep[Option[Boolean]] are allowed")
trait CanBeQueryCondition[-T] extends (T => Rep[_])
object CanBeQueryCondition {
// Using implicits with explicit type annotation here (instead of previously implicit objects)
// because otherwise they would not be found in this file above this line.
// See https://github.com/slick/slick/pull/217
implicit val BooleanColumnCanBeQueryCondition : CanBeQueryCondition[Rep[Boolean]] =
new CanBeQueryCondition[Rep[Boolean]] {
def apply(value: Rep[Boolean]) = value
}
implicit val BooleanOptionColumnCanBeQueryCondition : CanBeQueryCondition[Rep[Option[Boolean]]] =
new CanBeQueryCondition[Rep[Option[Boolean]]] {
def apply(value: Rep[Option[Boolean]]) = value
}
implicit val BooleanCanBeQueryCondition : CanBeQueryCondition[Boolean] =
new CanBeQueryCondition[Boolean] {
def apply(value: Boolean) = new LiteralColumn(value)
}
}
class WrappingQuery[+E, U, C[_]](val toNode: Node, val shaped: ShapedValue[_ <: E, U]) extends Query[E, U, C]
final class BaseJoinQuery[+E1, +E2, U1, U2, C[_], +B1, +B2](leftGen: TermSymbol, rightGen: TermSymbol, left: Node, right: Node, jt: JoinType, base: ShapedValue[_ <: (E1, E2), (U1, U2)], b1: B1, b2: B2)
extends WrappingQuery[(E1, E2), (U1, U2), C](AJoin(leftGen, rightGen, left, right, jt, LiteralNode(true)), base) {
/** Add a join condition to a join operation. */
def on[T <: Rep[_]](pred: (B1, B2) => T)(implicit wt: CanBeQueryCondition[T]): Query[(E1, E2), (U1, U2), C] =
new WrappingQuery[(E1, E2), (U1, U2), C](AJoin(leftGen, rightGen, left, right, jt, wt(pred(b1, b2)).toNode), base)
}
/** Represents a database table. Profiles add extension methods to TableQuery
* for operations that can be performed on tables but not on arbitrary
* queries, e.g. getting the table DDL. */
class TableQuery[E <: AbstractTable[_]](cons: Tag => E) extends Query[E, E#TableElementType, Seq] {
lazy val shaped = {
val baseTable = cons(new BaseTag { base =>
def taggedAs(path: Node): AbstractTable[_] = cons(new RefTag(path) {
def taggedAs(path: Node) = base.taggedAs(path)
})
})
ShapedValue(baseTable, RepShape[FlatShapeLevel, E, E#TableElementType])
}
lazy val toNode = shaped.toNode
/** Get the "raw" table row that represents the table itself, as opposed to
* a Path for a variable of the table's type. This method should generally
* not be called from user code. */
def baseTableRow: E = shaped.value
}
object TableQuery {
/** Create a TableQuery for a table row class using an arbitrary constructor function. */
def apply[E <: AbstractTable[_]](cons: Tag => E): TableQuery[E] =
new TableQuery[E](cons)
/** Create a TableQuery for a table row class which has a constructor of type (Tag). */
def apply[E <: AbstractTable[_]]: TableQuery[E] =
macro TableQueryMacroImpl.apply[E]
}
object TableQueryMacroImpl {
def apply[E <: AbstractTable[_]](c: Context)(implicit e: c.WeakTypeTag[E]): c.Expr[TableQuery[E]] = {
import c.universe._
val cons = c.Expr[Tag => E](Function(
List(ValDef(Modifiers(Flag.PARAM), TermName("tag"), Ident(typeOf[Tag].typeSymbol), EmptyTree)),
Apply(
Select(New(TypeTree(e.tpe)), termNames.CONSTRUCTOR),
List(Ident(TermName("tag")))
)
))
reify { TableQuery.apply[E](cons.splice) }
}
}
| Radsaggi/slick | slick/src/main/scala/slick/lifted/Query.scala | Scala | bsd-2-clause | 17,121 |
package com.sksamuel.elastic4s.analyzers
import com.sksamuel.elastic4s.anaylzers.TokenFilterDsl
import org.scalatest.{Matchers, WordSpec}
class StemmerTokenFilterTest extends WordSpec with TokenFilterDsl with Matchers {
"StemmerTokenFilter builder" should {
"set language" in {
stemmerTokenFilter("testy").lang("vulcan").json.string shouldBe """{"type":"stemmer","name":"vulcan"}"""
}
}
}
| beni55/elastic4s | elastic4s-core-tests/src/test/scala/com/sksamuel/elastic4s/analyzers/StemmerTokenFilterTest.scala | Scala | apache-2.0 | 410 |
package actors
import akka.actor.{ ActorRef, Actor }
import models.{ SearchMatch, StopSearch, LogEntry, StartSearch }
import play.api.libs.ws._
import play.api.Play.current
import play.api.libs.json.{ JsArray, JsValue, Json }
import java.util.UUID
import scala.concurrent.ExecutionContext
import ExecutionContext.Implicits.global
import scala.concurrent.impl.Future
import scala.concurrent.Future
import play.api.libs.ws.ning.NingWSClient
import com.ning.http.client.AsyncHttpClientConfig
import scala.util.Success
import scala.util.Failure
import models.LogEntry
import play.api.libs.json.Format
/**
*/
class ElasticsearchActor extends Actor {
val channels = context.system.actorSelection("/user/channels")
def receive = {
case log: LogEntry => percolate(log, sender)
case StartSearch(id, searchString) => registerQuery(id, searchString)
case StopSearch(id) => unregisterQuery(id)
}
private def percolate(log: LogEntry, requestor: ActorRef) {
// println(s"percolate called $logJson")
implicit val logEntryFormat = Json.format[LogEntry]
val logJson = Json.toJson(log)
WS.url("http://localhost:9200/logentries/logentry/_percolate").post(Json.stringify(Json.obj("doc" -> logJson))).map {
response =>
val body = response.json
// println(s"body = $body")
val status = (body \\ "ok").as[Boolean]
if (status) {
val matchingIds = (body \\ "matches").asInstanceOf[JsArray].value.foldLeft(List[UUID]())((acc, v) => UUID.fromString(v.as[String]) :: acc)
if (!matchingIds.isEmpty) {
// println(s"MatchingIds = $matchingIds")
channels ! SearchMatch(log, matchingIds)
} else {
// println("No Matching Ids")
}
}
}
}
private def unregisterQuery(id: UUID) {
// println("unregister query")
WS.url("http://localhost:9200/_percolator/logentries/" + id.toString).delete
}
private def registerQuery(id: UUID, searchString: String) {
println(s"ElasticSearch - register query - $id")
val query = Json.obj(
"query" -> Json.obj(
"query_string" -> Json.obj(
"query" -> searchString
)
)
)
val client: WSClient = new NingWSClient(new AsyncHttpClientConfig.Builder().build())
val future: Future[WSResponse] = client //
.url("http://localhost:9200/_percolator/logentries/" + id.toString) //
.put(Json.stringify(query))
future onComplete {
case Success(response) => {
val status = response.status
val body = response.body
// println(s"Status $status - $body")
}
case Failure(t) => println("An error has occured: " + t.getMessage)
}
}
}
| dbuschman7/collection-of-things | playground/app/actors/ElasticSearchActor.scala | Scala | apache-2.0 | 2,759 |
package com.twitter.finagle.redis.util
import java.net.{InetSocketAddress, Socket}
import java.io.{BufferedWriter, File, FileWriter, PrintWriter}
import com.twitter.finagle.Redis
import com.twitter.finagle.redis.Client
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.util.Random
object RedisTestHelper {
private val SemVerRegex = "(\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)".r
lazy val redisServerExists: Boolean = redisServerVersion.isDefined
lazy val redisServerVersion: Option[(Int, Int, Int)] = {
try {
val p = new ProcessBuilder("redis-server", "--version").start()
p.waitFor(10, TimeUnit.SECONDS)
val exitValue = p.exitValue()
if (exitValue != 0 && exitValue != 1) None
else {
// An example output from redis 5.0.1:
// `Redis server v=5.0.1 sha=00000000:0 malloc=libc bits=64 build=c6d614f40d59bf0a`
val stdout = scala.io.Source.fromInputStream(p.getInputStream()).mkString
val version = stdout.split(" ")(2).substring(2)
version match {
case SemVerRegex(a, b, c) => Some((a.toInt, b.toInt, c.toInt))
case _ => None
}
}
} catch {
case _: Exception => None
}
}
}
// Helper classes for spinning up a little redis cluster
object RedisCluster { self =>
import collection.mutable.{Stack => MutableStack}
val instanceStack = MutableStack[ExternalRedis]()
def address: Option[InetSocketAddress] = instanceStack.head.address
def address(i: Int) = instanceStack(i).address
def addresses: Seq[Option[InetSocketAddress]] = instanceStack.map { i =>
i.address
}
def hostAddresses(from: Int = 0, until: Int = instanceStack.size): String = {
require(instanceStack.nonEmpty)
addresses
.slice(from, until)
.map { address =>
val addy = address.get
"%s:%d".format("127.0.0.1", addy.getPort())
}
.sorted
.mkString(",")
}
def start(count: Int = 1, mode: RedisMode = RedisMode.Standalone): Seq[ExternalRedis] = {
(0 until count).map { i =>
start(new ExternalRedis(mode))
}
}
def start(instance: ExternalRedis): ExternalRedis = {
instance.start()
instanceStack.push(instance)
instance
}
def stop(): ExternalRedis = {
val instance = instanceStack.pop()
instance.stop()
instance
}
def stopAll(): Unit = {
instanceStack.foreach { i =>
i.stop()
}
instanceStack.clear
}
// Make sure the process is always killed eventually
Runtime
.getRuntime()
.addShutdownHook(new Thread {
override def run(): Unit = {
self.instanceStack.foreach { instance =>
instance.stop()
}
}
})
}
sealed trait RedisMode
object RedisMode {
case object Standalone extends RedisMode
case object Sentinel extends RedisMode
case object Cluster extends RedisMode
}
class ExternalRedis(mode: RedisMode = RedisMode.Standalone) {
private[this] val rand = new Random
private[this] var process: Option[Process] = None
private[this] val possiblePorts = 49152.until(55535)
var address: Option[InetSocketAddress] = None
private[this] def assertRedisBinaryPresent(): Unit = {
require(RedisTestHelper.redisServerExists, "redis-server binary must be present.")
}
private[this] def findAddress(): Unit = {
var tries = possiblePorts.size - 1
while (address.isEmpty && tries >= 0) {
val addr = new InetSocketAddress(possiblePorts(tries))
val socket = new Socket
try {
socket.setReuseAddress(true)
socket.bind(addr)
address = Some(addr)
} catch {
case exc: Exception =>
address = None
tries -= 1
Thread.sleep(5)
} finally {
socket.close()
}
}
address.getOrElse { sys.error("Couldn't get an address for the external redis instance") }
}
protected def createConfigFile(port: Int): File = {
val confFile = File.createTempFile("redis-" + rand.nextInt(1000), ".tmp")
val nodesFile = File.createTempFile("redis-nodes-" + rand.nextInt(1000), ".tmp")
val appendFile = File.createTempFile("redis-append-" + rand.nextInt(1000), ".aof")
val dbFile = File.createTempFile("redis-db-" + rand.nextInt(1000), ".db")
confFile.deleteOnExit()
nodesFile.deleteOnExit()
appendFile.deleteOnExit()
dbFile.deleteOnExit()
val out = new PrintWriter(new BufferedWriter(new FileWriter(confFile)))
var conf = "port %s".format(port)
if (mode == RedisMode.Cluster) {
conf += s"""
cluster-enabled yes
cluster-config-file ${nodesFile.getAbsolutePath}
cluster-node-timeout 5000
appendonly yes
dir ${appendFile.getParent}
appendfilename ${appendFile.getName}
dbfilename ${dbFile.getName}
"""
}
out.write(conf)
out.println()
out.close()
confFile
}
def start(): Unit = {
val port = address.get.getPort()
val conf = createConfigFile(port).getAbsolutePath
val cmd: Seq[String] = if (mode == RedisMode.Sentinel) {
Seq("redis-server", conf, "--sentinel")
} else {
Seq("redis-server", conf)
}
val builder = new ProcessBuilder(cmd.asJava)
process = Some(builder.start())
Thread.sleep(200)
}
def stop(): Unit = {
process.foreach { p =>
p.destroy()
p.waitFor()
}
}
def restart(): Unit = {
stop()
start()
}
def newClient(): Client = Redis.newRichClient(s"127.0.0.1:${address.get.getPort}")
def withClient[T](f: Client => T): T = {
val client = newClient
try f(client)
finally client.close()
}
assertRedisBinaryPresent()
findAddress()
}
| luciferous/finagle | finagle-redis/src/main/scala/com/twitter/finagle/redis/util/TestServer.scala | Scala | apache-2.0 | 5,632 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.functions.aggfunctions
import java.math.BigDecimal
import java.lang.{Iterable => JIterable}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.TupleTypeInfo
import org.apache.flink.table.functions.AggregateFunction
/** The initial accumulator for Sum aggregate function */
class SumAccumulator[T] extends JTuple2[T, Boolean]
/**
* Base class for built-in Sum aggregate function
*
* @tparam T the type for the aggregation result
*/
abstract class SumAggFunction[T: Numeric] extends AggregateFunction[T, SumAccumulator[T]] {
private val numeric = implicitly[Numeric[T]]
override def createAccumulator(): SumAccumulator[T] = {
val acc = new SumAccumulator[T]()
acc.f0 = numeric.zero //sum
acc.f1 = false
acc
}
def accumulate(accumulator: SumAccumulator[T], value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[T]
accumulator.f0 = numeric.plus(v, accumulator.f0)
accumulator.f1 = true
}
}
override def getValue(accumulator: SumAccumulator[T]): T = {
if (accumulator.f1) {
accumulator.f0
} else {
null.asInstanceOf[T]
}
}
def merge(acc: SumAccumulator[T], its: JIterable[SumAccumulator[T]]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
if (a.f1) {
acc.f0 = numeric.plus(acc.f0, a.f0)
acc.f1 = true
}
}
}
def resetAccumulator(acc: SumAccumulator[T]): Unit = {
acc.f0 = numeric.zero
acc.f1 = false
}
def getAccumulatorType(): TypeInformation[_] = {
new TupleTypeInfo(
(new SumAccumulator).getClass,
getValueTypeInfo,
BasicTypeInfo.BOOLEAN_TYPE_INFO)
}
def getValueTypeInfo: TypeInformation[_]
}
/**
* Built-in Byte Sum aggregate function
*/
class ByteSumAggFunction extends SumAggFunction[Byte] {
override def getValueTypeInfo = BasicTypeInfo.BYTE_TYPE_INFO
}
/**
* Built-in Short Sum aggregate function
*/
class ShortSumAggFunction extends SumAggFunction[Short] {
override def getValueTypeInfo = BasicTypeInfo.SHORT_TYPE_INFO
}
/**
* Built-in Int Sum aggregate function
*/
class IntSumAggFunction extends SumAggFunction[Int] {
override def getValueTypeInfo = BasicTypeInfo.INT_TYPE_INFO
}
/**
* Built-in Long Sum aggregate function
*/
class LongSumAggFunction extends SumAggFunction[Long] {
override def getValueTypeInfo = BasicTypeInfo.LONG_TYPE_INFO
}
/**
* Built-in Float Sum aggregate function
*/
class FloatSumAggFunction extends SumAggFunction[Float] {
override def getValueTypeInfo = BasicTypeInfo.FLOAT_TYPE_INFO
}
/**
* Built-in Double Sum aggregate function
*/
class DoubleSumAggFunction extends SumAggFunction[Double] {
override def getValueTypeInfo = BasicTypeInfo.DOUBLE_TYPE_INFO
}
/** The initial accumulator for Big Decimal Sum aggregate function */
class DecimalSumAccumulator extends JTuple2[BigDecimal, Boolean] {
f0 = BigDecimal.ZERO
f1 = false
}
/**
* Built-in Big Decimal Sum aggregate function
*/
class DecimalSumAggFunction extends AggregateFunction[BigDecimal, DecimalSumAccumulator] {
override def createAccumulator(): DecimalSumAccumulator = {
new DecimalSumAccumulator
}
def accumulate(acc: DecimalSumAccumulator, value: Any): Unit = {
if (value != null) {
val v = value.asInstanceOf[BigDecimal]
acc.f0 = acc.f0.add(v)
acc.f1 = true
}
}
override def getValue(acc: DecimalSumAccumulator): BigDecimal = {
if (!acc.f1) {
null.asInstanceOf[BigDecimal]
} else {
acc.f0
}
}
def merge(acc: DecimalSumAccumulator, its: JIterable[DecimalSumAccumulator]): Unit = {
val iter = its.iterator()
while (iter.hasNext) {
val a = iter.next()
if (a.f1) {
acc.f0 = acc.f0.add(a.f0)
acc.f1 = true
}
}
}
def resetAccumulator(acc: DecimalSumAccumulator): Unit = {
acc.f0 = BigDecimal.ZERO
acc.f1 = false
}
def getAccumulatorType(): TypeInformation[_] = {
new TupleTypeInfo(
(new DecimalSumAccumulator).getClass,
BasicTypeInfo.BIG_DEC_TYPE_INFO,
BasicTypeInfo.BOOLEAN_TYPE_INFO)
}
}
| hwstreaming/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/functions/aggfunctions/SumAggFunction.scala | Scala | apache-2.0 | 5,099 |
package skuber.json
import org.specs2.mutable.Specification // for unit-style testing
import org.specs2.execute.Result
import org.specs2.execute.Failure
import org.specs2.execute.Success
import scala.math.BigInt
import java.util.Calendar
import skuber.EnvVar
import format._
import play.api.libs.json._
class EnvVarSpec extends Specification {
"This is a unit specification for the skuber formatter for env vars.\\n ".txt
// EnvVar reader and writer
"An EnvVar can be read from json\\n" >> {
"this can be done for an env var with a field ref with a field path" >> {
val env1 = Json.fromJson[EnvVar](Json.parse(
"""
|{
| "name": "PODNAME",
| "valueFrom" : {
| "fieldRef": {
| "fieldPath": "metadata.name"
| }
| }
|}
""".stripMargin)).get
val env2 = EnvVar("PODNAME", EnvVar.FieldRef("metadata.name"))
env1 mustEqual env2
}
}
}
| doriordan/skuber | client/src/test/scala/skuber/json/EnvVarSpec.scala | Scala | apache-2.0 | 980 |
package org.scalaide.core.internal.jdt.model
import java.util.{ HashMap ⇒ JHashMap }
import scala.tools.eclipse.contribution.weaving.jdt.IScalaSourceFile
import scala.tools.nsc.interactive.Response
import scala.tools.nsc.io.AbstractFile
import scala.tools.nsc.io.VirtualFile
import scala.util.control.Exception
import org.eclipse.core.resources.IFile
import org.eclipse.core.runtime.CoreException
import org.eclipse.core.runtime.IPath
import org.eclipse.core.runtime.IProgressMonitor
import org.eclipse.jdt.core.ICompilationUnit
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.IType
import org.eclipse.jdt.core.JavaModelException
import org.eclipse.jdt.core.WorkingCopyOwner
import org.eclipse.jdt.core.compiler.CharOperation
import org.eclipse.jdt.core.compiler.IProblem
import org.eclipse.jdt.core.dom.CompilationUnit
import org.eclipse.jdt.internal.core.{ CompilationUnit ⇒ JDTCompilationUnit }
import org.eclipse.jdt.internal.core.OpenableElementInfo
import org.eclipse.jdt.internal.core.PackageFragment
import org.eclipse.jdt.internal.core.util.HandleFactory
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.core.compiler.ScalaCompilationProblem
import org.scalaide.core.extensions.SourceFileProvider
import org.scalaide.core.resources.EclipseFile
class ScalaSourceFileProvider extends SourceFileProvider {
override def createFrom(path: IPath): Option[InteractiveCompilationUnit] =
ScalaSourceFile.createFromPath(path.toString)
}
object ScalaSourceFile {
/**
* Considering [[org.eclipse.jdt.internal.core.util.HandleFactory]] isn't thread-safe, and because
* `ScalaSourceFile#createFromPath` can be called concurrently from different threads, using a
* `ThreadLocal` ensures that a `HandleFactory` instance is never shared across threads.
*/
private val handleFactory: ThreadLocal[HandleFactory] = new ThreadLocal[HandleFactory] {
override protected def initialValue(): HandleFactory = new HandleFactory
}
/**
* Creates a Scala source file handle if the given resource path points to a scala source.
* The resource path is a path to a Scala source file in the workbench (e.g. /Proj/a/b/c/Foo.scala).
*
* @note This assumes that the resource path is the toString() of an `IPath`.
*
* @param path Is a path to a Scala source file in the workbench.
*/
def createFromPath(path: String): Option[ScalaSourceFile] = {
if (!path.endsWith(".scala"))
None
else {
// Always `null` because `handleFactory.createOpenable` is only called to open source files, and the `scope` is not needed for this.
val unusedScope = null
val source = handleFactory.get().createOpenable(path, unusedScope)
source match {
case ssf: ScalaSourceFile ⇒ Some(ssf)
case _ ⇒ None
}
}
}
}
class ScalaSourceFile(fragment: PackageFragment, elementName: String, workingCopyOwner: WorkingCopyOwner)
extends JDTCompilationUnit(fragment, elementName, workingCopyOwner) with ScalaCompilationUnit with IScalaSourceFile {
override def getMainTypeName: Array[Char] =
getElementName.substring(0, getElementName.length - ".scala".length).toCharArray()
/**
* Schedule this source file for reconciliation. Add the file to
* the loaded files managed by the presentation compiler.
*/
override def initialReconcile(): Response[Unit] = {
val reloaded = super.initialReconcile()
this.reconcile(
ICompilationUnit.NO_AST,
false /* don't force problem detection */,
null /* use primary owner */,
null /* no progress monitor */)
reloaded
}
def reconcile(newContents: String): List[ScalaCompilationProblem] = {
super.forceReconcile()
}
/**
* We cut short this call since reconciliation is performed through the usual mechanism in the
* editor. Calls arriving here come from the JDT, for instance from the breadcrumb view, and end
* up doing expensive computation on the UI thread.
*
* @see #1002412
*/
override def reconcile(
astLevel: Int,
reconcileFlags: Int,
workingCopyOwner: WorkingCopyOwner,
monitor: IProgressMonitor): CompilationUnit = {
null
}
override def makeConsistent(
astLevel: Int,
resolveBindings: Boolean,
reconcileFlags: Int,
problems: JHashMap[_, _],
monitor: IProgressMonitor): CompilationUnit = {
// don't rerun this expensive operation unless necessary
if (!isConsistent()) {
if (astLevel != ICompilationUnit.NO_AST && resolveBindings) {
val info = createElementInfo.asInstanceOf[OpenableElementInfo]
openWhenClosed(info, true, monitor)
} else
logger.info(s"Skipped `makeConsistent` with resolveBindings: $resolveBindings and astLevel: $astLevel")
}
null
}
override def codeSelect(offset: Int, length: Int, workingCopyOwner: WorkingCopyOwner): Array[IJavaElement] =
codeSelect(this, offset, length, workingCopyOwner)
override lazy val file: AbstractFile = {
val res = try { getCorrespondingResource } catch { case _: JavaModelException ⇒ null }
res match {
case f: IFile ⇒ new EclipseFile(f)
case _ ⇒ new VirtualFile(getElementName, getPath.toString)
}
}
/** Implementing the weaving interface requires to return `null` for an empty array. */
override def getProblems: Array[IProblem] = {
val probs = currentProblems()
if (probs.isEmpty) null else probs.toArray
}
override def getType(name: String): IType =
new LazyToplevelClass(this, name)
override def getContents(): Array[Char] = {
// in the following case, super#getContents() logs an exception for no good reason
if (getBufferManager().getBuffer(this) == null && getResource().getLocation() == null && getResource().getLocationURI() == null) {
return CharOperation.NO_CHAR
}
Exception.failAsValue(classOf[CoreException])(CharOperation.NO_CHAR) { super.getContents() }
}
/** Makes sure {{{this}}} source is not in the ignore buffer of the compiler and ask the compiler to reload it. */
final def forceReload(): Unit = scalaProject.presentationCompiler { compiler ⇒
compiler.askToDoFirst(this)
reload()
}
/** Ask the compiler to reload {{{this}}} source. */
final def reload(): Unit = scalaProject.presentationCompiler { _.askReload(this, lastSourceMap().sourceFile) }
/** Ask the compiler to discard {{{this}}} source. */
final def discard(): Unit = scalaProject.presentationCompiler { _.discardCompilationUnit(this) }
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/jdt/model/ScalaSourceFile.scala | Scala | bsd-3-clause | 6,570 |
/**
* Copyright (C) 2016 Verizon. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.verizon.bda.trapezium.framework
import java.io.{File, FileInputStream}
import java.util.Properties
import com.typesafe.config.{Config, ConfigFactory}
import com.verizon.bda.trapezium.framework.utils.EmptyRegistrator
import org.slf4j.LoggerFactory
/**
* @author Pankaj on 10/30/15.
* debasish83 common getProperties to read Java/Typesafe properties
*/
trait ApplicationManagerStartup {
def init(envMgr: String,
configDirMgr: String,
persistSchemaMgr: String)
val logger = LoggerFactory.getLogger(this.getClass)
def readProperties(configDir: String, propertyFile: String): Properties = {
val properties = new Properties()
if (configDir == null) {
logger.info(s"Reading property file ${propertyFile} from jar")
properties.load(classOf[EmptyRegistrator].getClassLoader().getResourceAsStream(propertyFile))
} else {
logger.info(s"Reading property file ${propertyFile} from ${configDir}")
properties.load(new FileInputStream(s"${configDir}/$propertyFile"))
}
properties
}
def readConfigs(configDir: String, configFile: String): Config = {
if (configDir == null) {
logger.info(s"Reading config file ${configFile} from jar")
ConfigFactory.load(configFile)
} else {
logger.info(s"Reading config file ${configFile} from ${configDir}")
ConfigFactory.parseFile(new File(s"${configDir}/$configFile"))
}
}
}
| Verizon/trapezium | framework/src/main/scala/com/verizon/bda/trapezium/framework/ApplicationManagerStartup.scala | Scala | apache-2.0 | 2,037 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package utils
import java.io.{ File, PrintWriter }
import java.util.Scanner
import scala.util.{ Left, Right }
import io.circe.Json
import io.circe.parser._
object JsonUtils {
def parseString(str: String): Json = parse(str) match {
case Right(json) => json
case Left(error) => throw error
}
def parseFile(file: File): Json = {
val sc = new Scanner(file)
val sb = new StringBuilder
while (sc.hasNextLine) { sb ++= sc.nextLine + "\\n" }
parseString(sb.toString)
}
def writeFile(file: File, json: Json): Unit = {
val string = json.noSpaces
val pw = new PrintWriter(file)
try pw.write(string) finally pw.close()
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/utils/JsonUtils.scala | Scala | apache-2.0 | 727 |
import org.apache.spark.sql.SparkSession
import org.jsoup.Jsoup
import scala.collection.JavaConverters._
object LatestListCrawler {
def main(args: Array[String]): Unit = {
val sparkSession = SparkSession.builder
.appName("PTTCrawler")
.getOrCreate()
val sc = sparkSession.sparkContext
import sparkSession.implicits._
val domain = sc.broadcast("https://www.ptt.cc")
val titleRegex = sc.broadcast("""(?<reply>Re:)?\s*\[(?<category>.+)\]\s*(?<title>.+)""".r)
val linkRegex = sc.broadcast("""/bbs\/\S+\/(?<id>\S+)\.html""".r)
val board = args(0)
val url = s"https://www.ptt.cc/bbs/$board/index.html"
val doc = Jsoup.connect(url).cookie("over18", "1").get()
val latest = doc.select("div.r-ent").iterator().asScala.map(x => {
val niceCount = x.select("div.nrec > span.hl").text
val titleLink = x.select("div.title > a")
val titleText = titleLink.text
val link = titleLink.attr("href")
val title = titleRegex.value.findFirstMatchIn(titleText) match {
case Some(m) => (if(m.group(1) != null) "1" else "0", m.group(2), m.group(3))
case None => ("0", "", titleText)
}
val id = linkRegex.value.findFirstMatchIn(link) match {
case Some(m) => m.group(1)
case None => ""
}
val date = x.select("div.meta > div.date").text
val author = x.select("div.meta > div.author").text
Row(niceCount, title._1, date, id, author, title._2, title._3, if(link.isEmpty) "" else domain.value + link)
}).toList.toDF
latest.show()
}
}
| cinic0101/spark-web-ptt-crawler | src/main/scala/LatestListCrawler.scala | Scala | mit | 1,572 |
package se.culvertsoft.mgen.visualdesigner.model
import scala.collection.JavaConversions.asScalaBuffer
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
case class ChildParent(child: Entity, parent: Entity)
class RichEntity(base: Entity) {
def clear() { throw new RuntimeException(s"Cannot call clear() on $this") }
def traverse(parent: Entity, f: ChildParent => Boolean) { throw new RuntimeException(s"Cannot call traverse(..) on $this") }
def moveChildUp(child: Entity) { throw new RuntimeException(s"Cannot call moveChildUp(..) on $this") }
def moveChildDown(child: Entity) { throw new RuntimeException(s"Cannot call moveChildUp(..) on $this") }
def canBeParentOf(e: Entity): Boolean = { throw new RuntimeException(s"Cannot call canBeParentOf(..,..) on $this") }
def add(e: Project) { throw new RuntimeException(s"Cannot add $e to $this") }
def add(e: Module) { throw new RuntimeException(s"Cannot add $e to $this") }
def add(e: EnumEntry) { throw new RuntimeException(s"Cannot add $e to $this") }
def add(e: EnumType) { throw new RuntimeException(s"Cannot add $e to $this") }
def add(e: CustomType) { throw new RuntimeException(s"Cannot add $e to $this") }
def add(e: CustomTypeField) { throw new RuntimeException(s"Cannot add $e to $this") }
def remove(e: Project) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
def remove(e: Module) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
def remove(e: EnumEntry) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
def remove(e: EnumType) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
def remove(e: CustomType) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
def remove(e: CustomTypeField) { throw new RuntimeException(s"Cannot transferAway $e from $this") }
final def traverse(f: ChildParent => Boolean) { traverse(null, f) }
final def foreach[A](f: ChildParent => A) { traverse({ x: ChildParent => f(x); true }) }
final def foreachFirstLevelChild[A](f: ChildParent => A) { traverse { x: ChildParent => if (x.parent != null) f(x); x.parent == null } }
final def firstLevelChildren(): Seq[Entity] = { val out = new ArrayBuffer[Entity]; foreachFirstLevelChild(out += _.child); out }
final def numFirstLevelChildren(): Int = { var out = 0; foreachFirstLevelChild(_ => out += 1); out }
final def allChildren(): Seq[Entity] = { val out = new ArrayBuffer[Entity]; foreach(cp => if (cp.parent != null) out += cp.child); out }
final def contains(e: Entity): Boolean = {
findFirst(_.child.getId() == e.getId()).isDefined
}
final def containsAtFirstLevel(e: Entity): Boolean = {
foreachFirstLevelChild { cp =>
if (cp.child eq e) {
return true
}
}
return false
}
final def findFirst(f: ChildParent => Boolean): Option[ChildParent] = {
foreach { cp =>
if (f(cp)) {
return Some(cp)
}
}
return None
}
final def findLast(f: ChildParent => Boolean): Option[ChildParent] = {
var out: Option[ChildParent] = None
foreach { cp =>
if (f(cp)) {
out = Some(cp)
}
}
return out
}
def add(e: Entity) {
e match {
case e: Project => add(e)
case e: Module => add(e)
case e: EnumEntry => add(e)
case e: EnumType => add(e)
case e: CustomType => add(e)
case e: CustomTypeField => add(e)
}
}
def remove(e: Entity) {
e match {
case e: Project => remove(e)
case e: Module => remove(e)
case e: EnumEntry => remove(e)
case e: EnumType => remove(e)
case e: CustomType => remove(e)
case e: CustomTypeField => remove(e)
}
}
}
class RichProject(base: Project) extends RichEntity(base) {
import ModelOps._
override def clear() {
base.getModules().clear()
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
for (d <- base.getDependencies()) {
d.traverse(base, f)
}
for (m <- base.getModules()) {
m.traverse(base, f)
}
}
}
override def canBeParentOf(e: Entity): Boolean = {
e match {
case e: CustomType => false
case _ => true
}
}
override def add(e: Project) { base.getDependencies().add(e) }
override def add(e: Module) { base.getModules().add(e) }
override def remove(e: Project) { base.getDependencies().remove(e) }
override def remove(e: Module) { base.getModules().remove(e) }
override def moveChildUp(child: Entity) {
child match {
case module: Module =>
val iprev = base.getModules().indexWhere(_ eq module)
if (iprev >= 1) {
base.getModules().remove(iprev)
base.getModules().insert(iprev - 1, module)
}
}
}
override def moveChildDown(child: Entity) {
child match {
case module: Module =>
val iprev = base.getModules().indexWhere(_ eq module)
if (iprev < (base.getModules().size() - 1)) {
base.getModules().remove(iprev)
base.getModules().insert(iprev + 1, module)
}
}
}
}
class RichModule(base: Module) extends RichEntity(base) {
import ModelOps._
override def clear() {
base.getSubmodules().clear()
base.getEnums().clear()
base.getTypes().clear()
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
for (m <- base.getSubmodules()) {
m.traverse(base, f)
}
for (t <- base.getEnums()) {
t.traverse(base, f)
}
for (t <- base.getTypes()) {
t.traverse(base, f)
}
}
}
override def canBeParentOf(e: Entity): Boolean = {
e match {
case e: Module => true
case e: CustomType => true
case e: EnumType => true
case _ => false
}
}
override def add(e: Module) { base.getSubmodules().add(e) }
override def add(e: EnumType) { base.getEnums().add(e) }
override def add(e: CustomType) { base.getTypes().add(e) }
override def remove(e: Module) { base.getSubmodules().remove(e) }
override def remove(e: EnumType) { base.getEnums().remove(e) }
override def remove(e: CustomType) { base.getTypes().remove(e) }
override def moveChildUp(child: Entity) {
child match {
case module: Module =>
val iprev = base.getSubmodules().indexWhere(_ eq module)
if (iprev >= 1) {
base.getSubmodules().remove(iprev)
base.getSubmodules().insert(iprev - 1, module)
}
case typ: EnumType =>
val iprev = base.getEnums().indexWhere(_ eq typ)
if (iprev >= 1) {
base.getEnums().remove(iprev)
base.getEnums().insert(iprev - 1, typ)
}
case typ: CustomType =>
val iprev = base.getTypes().indexWhere(_ eq typ)
if (iprev >= 1) {
base.getTypes().remove(iprev)
base.getTypes().insert(iprev - 1, typ)
}
}
}
override def moveChildDown(child: Entity) {
child match {
case module: Module =>
val iprev = base.getSubmodules().indexWhere(_ eq module)
if (iprev < (base.getSubmodules().size() - 1)) {
base.getSubmodules().remove(iprev)
base.getSubmodules().insert(iprev + 1, module)
}
case typ: EnumType =>
val iprev = base.getEnums().indexWhere(_ eq typ)
if (iprev < (base.getEnums().size() - 1)) {
base.getEnums().remove(iprev)
base.getEnums().insert(iprev + 1, typ)
}
case typ: CustomType =>
val iprev = base.getTypes().indexWhere(_ eq typ)
if (iprev < (base.getTypes().size() - 1)) {
base.getTypes().remove(iprev)
base.getTypes().insert(iprev + 1, typ)
}
}
}
}
class RichEnumEntry(base: EnumEntry) extends RichEntity(base) {
override def clear() {
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
}
}
override def canBeParentOf(e: Entity): Boolean = {
false
}
override def moveChildUp(child: Entity) {
}
override def moveChildDown(child: Entity) {
}
}
class RichEnumType(base: EnumType) extends RichEntity(base) {
import ModelOps._
override def clear() {
base.getEntries().clear()
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
for (field <- base.getEntries()) {
field.traverse(base, f)
}
}
}
override def add(e: EnumEntry) {
base.getEntries().add(e)
}
override def remove(e: EnumEntry) {
base.getEntries().remove(e)
}
override def canBeParentOf(e: Entity): Boolean = {
e match {
case field: EnumEntry => true
case _ => false
}
}
override def moveChildUp(child: Entity) {
child match {
case field: EnumEntry =>
val iprev = base.getEntries().indexWhere(_ eq field)
if (iprev >= 1) {
base.getEntries().remove(iprev)
base.getEntries().insert(iprev - 1, field)
}
}
}
override def moveChildDown(child: Entity) {
child match {
case field: EnumEntry =>
val iprev = base.getEntries().indexWhere(_ eq field)
if (iprev < (base.getEntries().size() - 1)) {
base.getEntries().remove(iprev)
base.getEntries().insert(iprev + 1, field)
}
}
}
}
class RichCustomType(base: CustomType) extends RichEntity(base) {
import ModelOps._
override def clear() {
base.getFields().clear()
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
for (field <- base.getFields()) {
field.traverse(base, f)
}
}
}
override def add(e: CustomTypeField) {
base.getFields().add(e)
}
override def remove(e: CustomTypeField) {
base.getFields().remove(e)
}
override def canBeParentOf(e: Entity): Boolean = {
e match {
case field: CustomTypeField => true
case _ => false
}
}
override def moveChildUp(child: Entity) {
child match {
case field: CustomTypeField =>
val iprev = base.getFields().indexWhere(_ eq field)
if (iprev >= 1) {
base.getFields().remove(iprev)
base.getFields().insert(iprev - 1, field)
}
}
}
override def moveChildDown(child: Entity) {
child match {
case field: CustomTypeField =>
val iprev = base.getFields().indexWhere(_ eq field)
if (iprev < (base.getFields().size() - 1)) {
base.getFields().remove(iprev)
base.getFields().insert(iprev + 1, field)
}
}
}
}
class RichCustomTypeField(base: CustomTypeField) extends RichEntity(base) {
override def clear() {
}
override def traverse(parent: Entity, f: ChildParent => Boolean) {
if (f(ChildParent(base, parent))) {
}
}
override def canBeParentOf(e: Entity): Boolean = {
false
}
override def moveChildUp(child: Entity) {
}
override def moveChildDown(child: Entity) {
}
}
object ModelOps {
implicit def toRichCustomType(base: Entity): RichEntity = {
base match {
case base: Project => new RichProject(base)
case base: Module => new RichModule(base)
case base: EnumEntry => new RichEnumEntry(base)
case base: EnumType => new RichEnumType(base)
case base: CustomType => new RichCustomType(base)
case base: CustomTypeField => new RichCustomTypeField(base)
}
}
} | culvertsoft/mgen-visualdesigner | src/main/scala/se/culvertsoft/mgen/visualdesigner/model/ModelOps.scala | Scala | gpl-2.0 | 11,584 |
package org.littlewings.javaee7.cdi
import javax.enterprise.context.ApplicationScoped
import javax.inject.Inject
import org.apache.deltaspike.scheduler.api.Scheduled
import org.quartz.{Job, JobExecutionContext}
import org.slf4j.{Logger, LoggerFactory}
@Scheduled(cronExpression = "0 0/1 * * * ?")
@ApplicationScoped
class QuartzBasedJob extends Job {
@Inject
var applicationScopedMessageService: ApplicationScopedMessageService = _
@Inject
var sessionScopedMessageService: SessionScopedMessageService = _
@Inject
var requestScopedMessageService: RequestScopedMessageService = _
@Inject
var pseudoScopedMessageService: PseudoScopedMessageService = _
val logger: Logger = LoggerFactory.getLogger(getClass)
override def execute(context: JobExecutionContext): Unit = {
logger.info("[{}] startup job", getClass.getSimpleName)
applicationScopedMessageService.loggingMessage()
sessionScopedMessageService.loggingMessage()
requestScopedMessageService.loggingMessage()
pseudoScopedMessageService.loggingMessage()
logger.info("[{}] end job", getClass.getSimpleName)
}
}
@Scheduled(cronExpression = "10 0/1 * * * ?")
@ApplicationScoped
class QuartzBasedJob2 extends Job {
@Inject
var applicationScopedMessageService: ApplicationScopedMessageService = _
@Inject
var sessionScopedMessageService: SessionScopedMessageService = _
@Inject
var requestScopedMessageService: RequestScopedMessageService = _
@Inject
var pseudoScopedMessageService: PseudoScopedMessageService = _
val logger: Logger = LoggerFactory.getLogger(getClass)
override def execute(context: JobExecutionContext): Unit = {
logger.info("[{}] startup job", getClass.getSimpleName)
applicationScopedMessageService.loggingMessage()
sessionScopedMessageService.loggingMessage()
requestScopedMessageService.loggingMessage()
pseudoScopedMessageService.loggingMessage()
logger.info("[{}] end job", getClass.getSimpleName)
}
}
| kazuhira-r/javaee7-scala-examples | cdi-deltaspike-scheduler/src/main/scala/org/littlewings/javaee7/cdi/QuartzBasedJob.scala | Scala | mit | 1,980 |
// Copyright 2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.spark
import commbank.grimlock.framework.{ Persist => FwPersist, SaveStringsAsText => FwSaveStringsAsText }
import commbank.grimlock.framework.environment.tuner.{ Default, Tuner }
import commbank.grimlock.spark.environment.Context
import commbank.grimlock.spark.environment.tuner.SparkImplicits._
import org.apache.hadoop.io.Writable
import org.apache.spark.sql.{ Encoder => SparkEncoder, Row }
import scala.reflect.ClassTag
/** Trait for peristing a `RDD`. */
trait Persist[X] extends FwPersist[X, Context] {
/** The underlying data. */
val data: Context.U[X]
protected def saveText[
T <: Tuner
](
context: Context,
file: String,
writer: FwPersist.TextWriter[X],
tuner: T
): Context.U[X] = {
data
.flatMap { case x => writer(x) }
.tunedSaveAsText(context, tuner, file)
data
}
}
/** Companion object to `Persist` with additional methods. */
object Persist {
/** Spark parquet loader implementation using `DataFrameReader`. */
val parquetRowLoader = new FwPersist.Loader[Row, Context] {
def load(context: Context, file: String): Context.U[Row] = context.session.sqlContext.read.parquet(file).rdd
}
/** Spark text file loader implementation. */
val textLoader = new FwPersist.Loader[String, Context] {
def load(context: Context, file: String): Context.U[String] = context.session.sparkContext.textFile(file)
}
/** Function that provides spark parquet loader implementation. The method uses `DataFrameReader` to read parquet. */
def parquetLoader[T : ClassTag](implicit ev: SparkEncoder[T]) = new FwPersist.Loader[T, Context] {
def load(
context: Context,
file: String
): Context.U[T] = context.session.sqlContext.read.parquet(file).as[T].rdd
}
/** Function that provides spark sequence file loader implementation. */
def sequenceLoader[
K <: Writable : ClassTag,
V <: Writable : ClassTag
] = new FwPersist.Loader[(K, V), Context] {
def load(
context: Context,
file: String
): Context.U[(K, V)] = context.session.sparkContext.sequenceFile[K, V](file)
}
}
/** Case class that enriches a `RDD` of strings with saveAsText functionality. */
case class SaveStringsAsText(data: Context.U[String]) extends FwSaveStringsAsText[Context] with Persist[String] {
def saveAsText[
T <: Tuner
](
context: Context,
file: String,
tuner: T = Default()
)(implicit
ev: FwPersist.SaveAsTextTuner[Context.U, T]
): Context.U[String] = saveText(context, file, Option(_), tuner)
}
| CommBank/grimlock | grimlock-core/src/main/scala/commbank/grimlock/spark/Persist.scala | Scala | apache-2.0 | 3,184 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.util
package complete
class FixedSetExamplesTest extends UnitSpec {
"adding a prefix" should "produce a smaller set of examples with the prefix removed" in {
val _ = new Examples {
fixedSetExamples.withAddedPrefix("f")() should contain theSameElementsAs
(List("oo", "ool", "u"))
fixedSetExamples.withAddedPrefix("fo")() should contain theSameElementsAs (List("o", "ol"))
fixedSetExamples.withAddedPrefix("b")() should contain theSameElementsAs (List("ar"))
}
}
"without a prefix" should "produce the original set" in {
val _ = new Examples {
fixedSetExamples() shouldBe exampleSet
}
}
trait Examples {
val exampleSet = List("foo", "bar", "fool", "fu")
val fixedSetExamples = FixedSetExamples(exampleSet)
}
}
| sbt/sbt | internal/util-complete/src/test/scala/sbt/complete/FixedSetExamplesTest.scala | Scala | apache-2.0 | 947 |
package org.jetbrains.plugins.scala.lang.macros.expansion
import java.io._
import com.intellij.codeInsight.daemon.DaemonCodeAnalyzer
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.fileEditor.FileEditorManager
import com.intellij.openapi.project.Project
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiElement, PsiManager}
import org.jetbrains.plugins.scala.extensions
import scala.collection.mutable
import org.jetbrains.plugins.scala.extensions.invokeLater
import org.jetbrains.plugins.scala.lang.psi.api.base.ScAnnotation
import org.jetbrains.plugins.scala.util.{MacroExpansion, Place}
import scala.util.Using
/**
* @author Mikhail Mutcianko
* @since 20.09.16
*/
class ReflectExpansionsCollector(project: Project) {
import ReflectExpansionsCollector._
private val collectedExpansions: mutable.HashMap[Place, MacroExpansion] = mutable.HashMap.empty
private var parser: ScalaReflectMacroExpansionParser = _
private val LOG = Logger.getInstance(classOf[ReflectExpansionsCollector])
deserializeExpansions()
def getExpansion(elem: PsiElement): Option[MacroExpansion] = {
val offset = PsiTreeUtil.getParentOfType(elem, classOf[ScAnnotation]) match {
case _: ScAnnotation => elem.getTextOffset
case _ => elem.getNode.getTextRange.getEndOffset
}
val path = elem.getContainingFile.getVirtualFile.getPath
val place = Place(path, offset)()
collectedExpansions.get(place)
}
def processCompilerMessage(text: String): Unit = {
parser.processMessage(text)
}
def compilationStarted(): Unit = {
collectedExpansions.clear()
parser = new ScalaReflectMacroExpansionParser(project.getName)
}
def compilationFinished(): Unit = {
parser.expansions.foreach { exp => collectedExpansions += exp.place -> exp }
serializeExpansions()
if (collectedExpansions.nonEmpty)
invokeLater(restartAnalyzer(project))
}
private def deserializeExpansions(): Unit = {
val file = new File(System.getProperty("java.io.tmpdir") + s"/expansion-${project.getName}")
if (!file.exists())
return
try {
Using.resource(new ObjectInputStream(new BufferedInputStream(new FileInputStream(file)))) { os =>
collectedExpansions ++= os.readObject().asInstanceOf[collectedExpansions.type]
}
} catch {
case e: Throwable =>
LOG.warn("Filed to deserialize macro expansions, removing cache", e)
file.delete()
}
}
def serializeExpansions(): Unit = {
val file = new File(System.getProperty("java.io.tmpdir") + s"/expansion-${project.getName}")
try {
Using.resource(new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(file)))) {
_.writeObject(collectedExpansions)
}
} catch {
case e: Throwable =>
LOG.warn("Filed to serialize macro expansions, removing cache", e)
collectedExpansions.clear()
file.delete()
}
}
}
object ReflectExpansionsCollector {
def restartAnalyzer(project: Project): Unit = {
if (project == null || project.isDisposed) return
FileEditorManager.getInstance(project).getSelectedEditors.filter(_.isValid).foreach { editor =>
val analyzer = DaemonCodeAnalyzer.getInstance(project)
val psiManager = PsiManager.getInstance(project)
Option(psiManager.findFile(editor.getFile)).map(analyzer.restart)
}
}
def getInstance(project: Project): ReflectExpansionsCollector =
project.getService(classOf[ReflectExpansionsCollector])
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/macros/expansion/ReflectExpansionsCollector.scala | Scala | apache-2.0 | 3,535 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.yggdrasil
package table
import quasar.blueeyes._, json._
import quasar.precog.{BitSet, MimeType, MimeTypes}
import quasar.precog.common._
import quasar.precog.common.ingest.FileContent
import quasar.precog.util.RawBitSet
import quasar.yggdrasil.bytecode._
import quasar.yggdrasil.util._
import quasar.yggdrasil.table.cf.util.{ Remap, Empty }
import TransSpecModule._
import org.slf4j.Logger
import org.slf4s.Logging
import quasar.precog.util.IOUtils
import scalaz._, Scalaz._, Ordering._
import java.io.File
import java.nio.CharBuffer
import java.time.ZonedDateTime
import scala.annotation.tailrec
import scala.collection.mutable
trait ColumnarTableTypes[M[+ _]] {
type F1 = CF1
type F2 = CF2
type FN = CFN
type Scanner = CScanner
type Mapper = CMapper[M]
type Reducer[α] = CReducer[α]
type RowId = Int
}
trait ColumnarTableModuleConfig {
def maxSliceSize: Int
// This is a slice size that we'd like our slices to be at least as large as.
def minIdealSliceSize: Int = maxSliceSize / 4
// This is what we consider a "small" slice. This may affect points where
// we take proactive measures to prevent problems caused by small slices.
def smallSliceSize: Int
def maxSaneCrossSize: Long = 2400000000L // 2.4 billion
}
object ColumnarTableModule extends Logging {
def renderJson[M[+ _]](slices: StreamT[M, Slice], prefix: String, delimiter: String, suffix: String)(implicit M: Monad[M]): StreamT[M, CharBuffer] = {
def wrap(stream: StreamT[M, CharBuffer]) = {
if (prefix == "" && suffix == "") stream
else if (suffix == "") CharBuffer.wrap(prefix) :: stream
else if (prefix == "") stream ++ (CharBuffer.wrap(suffix) :: StreamT.empty[M, CharBuffer])
else CharBuffer.wrap(prefix) :: (stream ++ (CharBuffer.wrap(suffix) :: StreamT.empty[M, CharBuffer]))
}
def foldFlatMap(slices: StreamT[M, Slice], rendered: Boolean): StreamT[M, CharBuffer] = {
StreamT[M, CharBuffer](slices.step map {
case StreamT.Yield(slice, tail) =>
val (stream, rendered2) = slice.renderJson[M](delimiter)
val stream2 = if (rendered && rendered2) CharBuffer.wrap(delimiter) :: stream else stream
StreamT.Skip(stream2 ++ foldFlatMap(tail(), rendered || rendered2))
case StreamT.Skip(tail) =>
StreamT.Skip(foldFlatMap(tail(), rendered))
case StreamT.Done =>
StreamT.Done
})
}
wrap(foldFlatMap(slices, false))
}
/**
* This method renders the entire table into a single string,
* encoded as CSV.
*
* In the future we may want something Stream-based, but for now
* the method seems to be "fast enough" for our purposes.
*
* The column headers are currently stringified CPaths. These are
* introduced introduced slice-by-slice in alphabetical order. So
* if there is one slice, the headers will be totally
* alphabetical. If two slices, the alphabetized headers from the
* first slice are first, and then the other headers (also
* alphabetized). And so on.
*
* The escaping here should match Microsoft's:
*
* If a value contains commas, double-quotes, or CR/LF, it will be
* escaped. To escape a value, it is wrapped in double quotes. Any
* double-quotes in the value are themselves doubled. So:
*
* the fox said: "hello, my name is fred."
*
* becomes:
*
* "the fox said: ""hello, my name is fred."""
*/
def renderCsv[M[+ _]](slices: StreamT[M, Slice])(implicit M: Monad[M]): StreamT[M, CharBuffer] = {
import scala.collection.{ Map => GenMap }
/**
* Represents the column headers we have. We track three things:
*
* 1. n: the number of headers so far.
* 2. m: a map from path strings to header position
* 3. a: an array of path strings used.
*
* The class is immutable so as we find new headers we'll create
* new instances. If this proves to be a problem we could easily
* make a mutable version.
*/
class Indices(n: Int, m: GenMap[String, Int], a: Array[String]) {
def size = n
def getPaths: Array[String] = a
def columnForPath(path: String) = m(path)
def combine(that: Indices): Indices = {
val buf = new mutable.ArrayBuffer[String](a.length)
buf ++= a
that.getPaths.foreach(p => if (!m.contains(p)) buf.append(p))
Indices.fromPaths(buf.toArray)
}
override def equals(that: Any): Boolean = that match {
case that: Indices =>
val len = n
if (len != that.size) return false
var i = 0
val paths = that.getPaths
while (i < len) {
if (a(i) != paths(i)) return false
i += 1
}
true
case _ =>
false
}
def writeToBuilder(sb: StringBuilder): Unit = {
if (n == 0) return ()
sb.append(a(0))
var i = 1
val len = n
while (i < len) { sb.append(','); sb.append(a(i)); i += 1 }
sb.append("\\r\\n")
}
}
object Indices {
def empty: Indices = new Indices(0, Map.empty[String, Int], new Array[String](0))
def fromPaths(ps: Array[String]): Indices = {
val paths = ps.sorted
val m = mutable.Map[String, Int]()
var i = 0
val len = paths.length
while (i < len) { m(paths(i)) = i; i += 1 }
new Indices(len, m, paths)
}
}
// these methods will quote CSV values for us
// they could probably be a bit faster but are OK so far.
def quoteIfNeeded(s: String): String = if (needsQuoting(s)) quote(s) else s
def quote(s: String): String = "\\"" + s.replace("\\"", "\\"\\"") + "\\""
def needsQuoting(s: String): Boolean = {
var i = 0
while (i < s.length) {
val c = s.charAt(i)
if (c == ',' || c == '"' || c == '\\r' || c == '\\n') return true
i += 1
}
false
}
/**
* Render a particular column of a slice into an array of
* Strings, handling any escaping that is needed.
*/
def renderColumn(col: Column, rows: Int): Array[String] = {
val arr = new Array[String](rows)
var row = 0
while (row < rows) {
arr(row) =
if (col.isDefinedAt(row))
quoteIfNeeded(col.strValue(row))
else
""
row += 1
}
arr
}
/**
* Generate indices for this slice.
*/
def indicesForSlice(slice: Slice): Indices =
Indices.fromPaths(slice.columns.keys.map(_.selector.toString).toArray)
/**
* Renders a slice into an array of lines, as well as updating
* our Indices with any previous unseen paths.
*
* Since slice's underlying data is column-oriented, we evaluate
* each column individually, building an array of values. Then
* we stride across these arrays building our rows (Line
* objects).
*
* Since we know in advance how many rows we have, we can return
* an array of lines.
*/
def renderSlice(pastIndices: Option[Indices], slice: Slice): (Indices, CharBuffer) = {
val indices = indicesForSlice(slice)
val height = slice.size
val width = indices.size
if (width == 0) return (indices, CharBuffer.allocate(0))
val items = slice.columns.toArray
val ncols = items.length
// load each column into strings
val columns = items.map {
case (_, col) =>
renderColumn(col, height)
}
val positions = items.map {
case (ColumnRef(path, _), _) =>
indices.columnForPath(path.toString)
}
val sb = new StringBuilder()
pastIndices match {
case None => indices.writeToBuilder(sb)
case Some(ind) =>
if (ind != indices) {
sb.append("\\r\\n")
indices.writeToBuilder(sb)
}
}
var row = 0
while (row < height) {
// fill in all the buckets for this particular row
val buckets = Array.fill(width)("")
var i = 0
while (i < ncols) {
val s = columns(i)(row)
if (s != "") buckets(positions(i)) = s
i += 1
}
// having filled the buckets, add them to the string builder
sb.append(buckets(0))
i = 1
while (i < width) {
sb.append(',')
sb.append(buckets(i))
i += 1
}
sb.append("\\r\\n")
row += 1
}
(indices, CharBuffer.wrap(sb))
}
StreamT.unfoldM(slices -> none[Indices]) {
case (stream, pastIndices) =>
stream.uncons.map {
case Some((slice, tail)) =>
val (indices, cb) = renderSlice(pastIndices, slice)
some(cb -> (tail -> some(indices)))
case None =>
none
}
}
}
def toCharBuffers[N[+ _]: Monad](output: MimeType, slices: StreamT[N, Slice]): StreamT[N, CharBuffer] = {
import FileContent._
import MimeTypes._
val AnyMimeType = anymaintype / anysubtype
output match {
case ApplicationJson | AnyMimeType => ColumnarTableModule.renderJson(slices, "[", ",", "]")
case XJsonStream => ColumnarTableModule.renderJson(slices, "", "\\n", "")
case TextCSV => ColumnarTableModule.renderCsv(slices)
case other =>
log.warn("Unrecognized output type requested for conversion of slice stream to char buffers: %s".format(output))
StreamT.empty[N, CharBuffer]
}
}
}
trait ColumnarTableModule[M[+ _]]
extends TableModule[M]
with ColumnarTableTypes[M]
with IdSourceScannerModule
with SliceTransforms[M]
with SamplableColumnarTableModule[M]
with IndicesModule[M] {
import TableModule._
import trans._
type Table <: ColumnarTable
type TableCompanion <: ColumnarTableCompanion
case class TableMetrics(startCount: Int, sliceTraversedCount: Int)
implicit def M: Monad[M]
def newScratchDir(): File = IOUtils.createTmpDir("ctmscratch").unsafePerformIO
def jdbmCommitInterval: Long = 200000l
implicit def liftF1(f: F1) = new F1Like {
def compose(f1: F1) = f compose f1
def andThen(f1: F1) = f andThen f1
}
implicit def liftF2(f: F2) = new F2Like {
def applyl(cv: CValue) = CF1("builtin::liftF2::applyl") { f(Column.const(cv), _) }
def applyr(cv: CValue) = CF1("builtin::liftF2::applyl") { f(_, Column.const(cv)) }
def andThen(f1: F1) = CF2("builtin::liftF2::andThen") { (c1, c2) =>
f(c1, c2) flatMap f1.apply
}
}
trait ColumnarTableCompanion extends TableCompanionLike {
def apply(slices: StreamT[M, Slice], size: TableSize): Table
def singleton(slice: Slice): Table
implicit def groupIdShow: Show[GroupId] = Show.showFromToString[GroupId]
def empty: Table = Table(StreamT.empty[M, Slice], ExactSize(0))
def uniformDistribution(init: MmixPrng): Table = {
val gen: StreamT[M, Slice] = StreamT.unfoldM[M, Slice, MmixPrng](init) { prng =>
val (column, nextGen) = Column.uniformDistribution(prng)
Some((Slice(Map(ColumnRef(CPath.Identity, CDouble) -> column), yggConfig.maxSliceSize), nextGen)).point[M]
}
Table(gen, InfiniteSize)
}
def constBoolean(v: collection.Set[Boolean]): Table = {
val column = ArrayBoolColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CBoolean) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constLong(v: collection.Set[Long]): Table = {
val column = ArrayLongColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CLong) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constDouble(v: collection.Set[Double]): Table = {
val column = ArrayDoubleColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CDouble) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constDecimal(v: collection.Set[BigDecimal]): Table = {
val column = ArrayNumColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CNum) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constString(v: collection.Set[String]): Table = {
val column = ArrayStrColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CString) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constDate(v: collection.Set[ZonedDateTime]): Table = {
val column = ArrayDateColumn(v.toArray)
Table(Slice(Map(ColumnRef(CPath.Identity, CDate) -> column), v.size) :: StreamT.empty[M, Slice], ExactSize(v.size))
}
def constNull: Table =
Table(Slice(Map(ColumnRef(CPath.Identity, CNull) -> new InfiniteColumn with NullColumn), 1) :: StreamT.empty[M, Slice], ExactSize(1))
def constEmptyObject: Table =
Table(Slice(Map(ColumnRef(CPath.Identity, CEmptyObject) -> new InfiniteColumn with EmptyObjectColumn), 1) :: StreamT.empty[M, Slice], ExactSize(1))
def constEmptyArray: Table =
Table(Slice(Map(ColumnRef(CPath.Identity, CEmptyArray) -> new InfiniteColumn with EmptyArrayColumn), 1) :: StreamT.empty[M, Slice], ExactSize(1))
def transformStream[A](sliceTransform: SliceTransform1[A], slices: StreamT[M, Slice]): StreamT[M, Slice] = {
def stream(state: A, slices: StreamT[M, Slice]): StreamT[M, Slice] = StreamT(
for {
head <- slices.uncons
back <- {
head map {
case (s, sx) => {
sliceTransform.f(state, s) map {
case (nextState, s0) =>
StreamT.Yield(s0, stream(nextState, sx))
}
}
} getOrElse {
M.point(StreamT.Done)
}
}
} yield back
)
stream(sliceTransform.initial, slices)
}
/**
* Merge controls the iteration over the table of group key values.
*/
def merge[N[+ _]](grouping: GroupingSpec)(body: (RValue, GroupId => M[Table]) => N[Table])(implicit nt: N ~> M): M[Table] = {
import GroupKeySpec.{ dnf, toVector }
type Key = Seq[RValue]
type KeySchema = Seq[CPathField]
def sources(spec: GroupKeySpec): Seq[GroupKeySpecSource] = (spec: @unchecked) match {
case GroupKeySpecAnd(left, right) => sources(left) ++ sources(right)
case src: GroupKeySpecSource => Vector(src)
}
def mkProjections(spec: GroupKeySpec) =
toVector(dnf(spec)).map(sources(_).map { s =>
(s.key, s.spec)
})
case class IndexedSource(groupId: GroupId, index: TableIndex, keySchema: KeySchema)
(for {
source <- grouping.sources
groupKeyProjections <- mkProjections(source.groupKeySpec)
disjunctGroupKeyTransSpecs = groupKeyProjections.map { case (key, spec) => spec }
} yield {
TableIndex.createFromTable(source.table, disjunctGroupKeyTransSpecs, source.targetTrans.getOrElse(TransSpec1.Id)).map { index =>
IndexedSource(source.groupId, index, groupKeyProjections.map(_._1))
}
}).sequence.flatMap { sourceKeys =>
val fullSchema = sourceKeys.flatMap(_.keySchema).distinct
val indicesGroupedBySource = sourceKeys.groupBy(_.groupId).mapValues(_.map(y => (y.index, y.keySchema)).toSeq).values.toSeq
def unionOfIntersections(indicesGroupedBySource: Seq[Seq[(TableIndex, KeySchema)]]): Set[Key] = {
def allSourceDNF[T](l: Seq[Seq[T]]): Seq[Seq[T]] = {
l match {
case Seq(hd) => hd.map(Seq(_))
case Seq(hd, tl @ _ *) => {
for {
disjunctHd <- hd
disjunctTl <- allSourceDNF(tl)
} yield disjunctHd +: disjunctTl
}
case empty => empty
}
}
def normalizedKeys(index: TableIndex, keySchema: KeySchema): collection.Set[Key] = {
val schemaMap = for (k <- fullSchema) yield keySchema.indexOf(k)
for (key <- index.getUniqueKeys)
yield for (k <- schemaMap) yield if (k == -1) CUndefined else key(k)
}
def intersect(keys0: collection.Set[Key], keys1: collection.Set[Key]): collection.Set[Key] = {
def consistent(key0: Key, key1: Key): Boolean =
(key0 zip key1).forall {
case (k0, k1) => k0 == k1 || k0 == CUndefined || k1 == CUndefined
}
def merge(key0: Key, key1: Key): Key =
(key0 zip key1).map {
case (k0, CUndefined) => k0
case (_, k1) => k1
}
// TODO: This "mini-cross" is much better than the
// previous mega-cross. However in many situations we
// could do even less work. Consider further optimization
// (e.g. when one key schema is a subset of the other).
// Relatedly it might make sense to topologically sort the
// Indices by their keyschemas so that we end up intersecting
// key with their subset.
keys0.flatMap { key0 =>
keys1.flatMap(key1 => if (consistent(key0, key1)) Some(merge(key0, key1)) else None)
}
}
allSourceDNF(indicesGroupedBySource).foldLeft(Set.empty[Key]) {
case (acc, intersection) =>
val hd = normalizedKeys(intersection.head._1, intersection.head._2)
acc | intersection.tail.foldLeft(hd) {
case (keys0, (index1, schema1)) =>
val keys1 = normalizedKeys(index1, schema1)
intersect(keys0, keys1)
}
}
}
def jValueFromGroupKey(key: Seq[RValue], cpaths: Seq[CPathField]): RValue = {
val items = (cpaths zip key).map(t => (t._1.name, t._2))
RObject(items.toMap)
}
val groupKeys: Set[Key] = unionOfIntersections(indicesGroupedBySource)
// given a groupKey, return an M[Table] which represents running
// the evaluator on that subgroup.
def evaluateGroupKey(groupKey: Key): M[Table] = {
val groupKeyTable = jValueFromGroupKey(groupKey, fullSchema)
def map(gid: GroupId): M[Table] = {
val subTableProjections = (sourceKeys
.filter(_.groupId == gid)
.map { indexedSource =>
val keySchema = indexedSource.keySchema
val projectedKeyIndices = for (k <- fullSchema) yield keySchema.indexOf(k)
(indexedSource.index, projectedKeyIndices, groupKey)
})
.toList
M.point(TableIndex.joinSubTables(subTableProjections).normalize) // TODO: normalize necessary?
}
nt(body(groupKeyTable, map))
}
// TODO: this can probably be done as one step, but for now
// it's probably fine.
val tables: StreamT[M, Table] = StreamT.unfoldM(groupKeys.toList) {
case k :: ks =>
evaluateGroupKey(k).map(t => Some((t, ks)))
case Nil =>
M.point(None)
}
val slices: StreamT[M, Slice] = tables.flatMap(_.slices)
M.point(Table(slices, UnknownSize))
}
}
/// Utility Methods ///
/**
* Reduce the specified table to obtain the in-memory set of strings representing the vfs paths
* to be loaded.
*/
protected def pathsM(table: Table) = {
table reduce {
new CReducer[Set[Path]] {
def reduce(schema: CSchema, range: Range): Set[Path] = {
schema.columns(JTextT) flatMap {
case s: StrColumn => range.filter(s.isDefinedAt).map(i => Path(s(i)))
case _ => Set()
}
}
}
}
}
def fromRValues(values: Stream[RValue], maxSliceSize: Option[Int] = None): Table = {
val sliceSize = maxSliceSize.getOrElse(yggConfig.maxSliceSize)
def makeSlice(data: Stream[RValue]): (Slice, Stream[RValue]) = {
val (prefix, suffix) = data.splitAt(sliceSize)
(Slice.fromRValues(prefix), suffix)
}
Table(
StreamT.unfoldM(values) { events =>
M.point {
(!events.isEmpty) option {
makeSlice(events.toStream)
}
}
},
ExactSize(values.length)
)
}
def join(left: Table, right: Table, orderHint: Option[JoinOrder] = None)(leftKeySpec: TransSpec1,
rightKeySpec: TransSpec1,
joinSpec: TransSpec2): M[(JoinOrder, Table)] = {
val emptySpec = trans.ConstLiteral(CEmptyArray, Leaf(Source))
for {
left0 <- left.sort(leftKeySpec)
right0 <- right.sort(rightKeySpec)
cogrouped = left0.cogroup(leftKeySpec, rightKeySpec, right0)(emptySpec, emptySpec, trans.WrapArray(joinSpec))
} yield {
JoinOrder.KeyOrder -> cogrouped.transform(trans.DerefArrayStatic(Leaf(Source), CPathIndex(0)))
}
}
def cross(left: Table, right: Table, orderHint: Option[CrossOrder] = None)(spec: TransSpec2): M[(CrossOrder, Table)] = {
import CrossOrder._
M.point(orderHint match {
case Some(CrossRight | CrossRightLeft) =>
CrossRight -> right.cross(left)(TransSpec2.flip(spec))
case _ =>
CrossLeft -> left.cross(right)(spec)
})
}
}
abstract class ColumnarTable(slices0: StreamT[M, Slice], val size: TableSize) extends TableLike with SamplableColumnarTable { self: Table =>
import SliceTransform._
private final val readStarts = new java.util.concurrent.atomic.AtomicInteger
private final val blockReads = new java.util.concurrent.atomic.AtomicInteger
val slices = StreamT(
StreamT
.Skip({
readStarts.getAndIncrement
slices0.map(s => { blockReads.getAndIncrement; s })
})
.point[M]
)
/**
* Folds over the table to produce a single value (stored in a singleton table).
*/
def reduce[A](reducer: Reducer[A])(implicit monoid: Monoid[A]): M[A] = {
def rec(stream: StreamT[M, A], acc: A): M[A] = {
stream.uncons flatMap {
case Some((head, tail)) => rec(tail, head |+| acc)
case None => M.point(acc)
}
}
rec(
slices map { s =>
val schema = new CSchema {
def columnRefs = s.columns.keySet
def columnMap(jtpe: JType) =
s.columns collect {
case (ref @ ColumnRef(cpath, ctype), col) if Schema.includes(jtpe, cpath, ctype) =>
ref -> col
}
}
reducer.reduce(schema, 0 until s.size)
},
monoid.zero
)
}
def compact(spec: TransSpec1, definedness: Definedness = AnyDefined): Table = {
val specTransform = SliceTransform.composeSliceTransform(spec)
val compactTransform = {
SliceTransform.composeSliceTransform(Leaf(Source)).zip(specTransform) { (s1, s2) =>
s1.compact(s2, definedness)
}
}
Table(Table.transformStream(compactTransform, slices), size).normalize
}
/**
* Performs a one-pass transformation of the keys and values in the table.
* If the key transform is not identity, the resulting table will have
* unknown sort order.
*/
def transform(spec: TransSpec1): Table = {
Table(Table.transformStream(composeSliceTransform(spec), slices), this.size)
}
def force: M[Table] = {
def loop(slices: StreamT[M, Slice], acc: List[Slice], size: Long): M[(List[Slice], Long)] = slices.uncons flatMap {
case Some((slice, tail)) if slice.size > 0 =>
loop(tail, slice.materialized :: acc, size + slice.size)
case Some((_, tail)) =>
loop(tail, acc, size)
case None =>
M.point((acc.reverse, size))
}
val former = new (Id.Id ~> M) { def apply[A](a: Id.Id[A]): M[A] = M.point(a) }
loop(slices, Nil, 0L).map {
case (stream, size) =>
Table(StreamT.fromIterable(stream).trans(former), ExactSize(size))
}
}
def paged(limit: Int): Table = {
val slices2 = slices flatMap { slice =>
StreamT.unfoldM(0) { idx =>
val back =
if (idx >= slice.size)
None
else
Some((slice.takeRange(idx, limit), idx + limit))
M.point(back)
}
}
Table(slices2, size)
}
def concat(t2: Table): Table = {
val resultSize = TableSize(size.maxSize + t2.size.maxSize)
val resultSlices = slices ++ t2.slices
Table(resultSlices, resultSize)
}
/**
* Zips two tables together in their current sorted order.
* If the tables are not normalized first and thus have different slices sizes,
* then since the zipping is done per slice, this can produce a result that is
* different than if the tables were normalized.
*/
def zip(t2: Table): M[Table] = {
def rec(slices1: StreamT[M, Slice], slices2: StreamT[M, Slice]): StreamT[M, Slice] = {
StreamT(slices1.uncons flatMap {
case Some((head1, tail1)) =>
slices2.uncons map {
case Some((head2, tail2)) =>
StreamT.Yield(head1 zip head2, rec(tail1, tail2))
case None =>
StreamT.Done
}
case None =>
M point StreamT.Done
})
}
val resultSize = EstimateSize(0, size.maxSize min t2.size.maxSize)
M point Table(rec(slices, t2.slices), resultSize)
// todo investigate why the code below makes all of RandomLibSpecs explode
// val resultSlices = Apply[({ type l[a] = StreamT[M, a] })#l].zip.zip(slices, t2.slices) map { case (s1, s2) => s1.zip(s2) }
// Table(resultSlices, resultSize)
}
def toArray[A](implicit tpe: CValueType[A]): Table = {
val slices2: StreamT[M, Slice] = slices map { _.toArray[A] }
Table(slices2, size)
}
/**
* Returns a table where each slice (except maybe the last) has slice size `length`.
* Also removes slices of size zero. If an optional `maxLength0` size is provided,
* then the slices need only land in the range between `length` and `maxLength0`.
* For slices being loaded from ingest, it is often the case that we are missing a
* few rows at the end, so we shouldn't be too strict.
*/
def canonicalize(length: Int, maxLength0: Option[Int] = None): Table = {
val minLength = length
val maxLength = maxLength0 getOrElse length
require(maxLength > 0 && minLength >= 0 && maxLength >= minLength, "length bounds must be positive and ordered")
def concat(rslices: List[Slice]): Slice = rslices.reverse match {
case Nil => Slice(Map.empty, 0)
case slice :: Nil => slice
case slices =>
val slice = Slice.concat(slices)
if (slices.size > (slice.size / yggConfig.smallSliceSize)) {
slice.materialized // Deal w/ lots of small slices by materializing them.
} else {
slice
}
}
def step(sliceSize: Int, acc: List[Slice], stream: StreamT[M, Slice]): M[StreamT.Step[Slice, StreamT[M, Slice]]] = {
stream.uncons flatMap {
case Some((head, tail)) =>
if (head.size == 0) {
// Skip empty slices.
step(sliceSize, acc, tail)
} else if (sliceSize + head.size >= minLength) {
// We emit a slice, but the last slice added may fall on a stream boundary.
val splitAt = math.min(head.size, maxLength - sliceSize)
if (splitAt < head.size) {
val (prefix, suffix) = head.split(splitAt)
val slice = concat(prefix :: acc)
M.point(StreamT.Yield(slice, StreamT(step(0, Nil, suffix :: tail))))
} else {
val slice = concat(head :: acc)
M.point(StreamT.Yield(slice, StreamT(step(0, Nil, tail))))
}
} else {
// Just keep swimming (aka accumulating).
step(sliceSize + head.size, head :: acc, tail)
}
case None =>
if (sliceSize > 0) {
M.point(StreamT.Yield(concat(acc), StreamT.empty[M, Slice]))
} else {
M.point(StreamT.Done)
}
}
}
Table(StreamT(step(0, Nil, slices)), size)
}
/**
* Cogroups this table with another table, using equality on the specified
* transformation on rows of the table.
*/
def cogroup(leftKey: TransSpec1, rightKey: TransSpec1, that: Table)(leftResultTrans: TransSpec1,
rightResultTrans: TransSpec1,
bothResultTrans: TransSpec2): Table = {
// println("Cogrouping with respect to\\nleftKey: " + leftKey + "\\nrightKey: " + rightKey)
class IndexBuffers(lInitialSize: Int, rInitialSize: Int) {
val lbuf = new ArrayIntList(lInitialSize)
val rbuf = new ArrayIntList(rInitialSize)
val leqbuf = new ArrayIntList(lInitialSize max rInitialSize)
val reqbuf = new ArrayIntList(lInitialSize max rInitialSize)
@inline def advanceLeft(lpos: Int): Unit = {
lbuf.add(lpos)
rbuf.add(-1)
leqbuf.add(-1)
reqbuf.add(-1)
}
@inline def advanceRight(rpos: Int): Unit = {
lbuf.add(-1)
rbuf.add(rpos)
leqbuf.add(-1)
reqbuf.add(-1)
}
@inline def advanceBoth(lpos: Int, rpos: Int): Unit = {
// println("advanceBoth: lpos = %d, rpos = %d" format (lpos, rpos))
lbuf.add(-1)
rbuf.add(-1)
leqbuf.add(lpos)
reqbuf.add(rpos)
}
def cogrouped[LR, RR, BR](lslice: Slice,
rslice: Slice,
leftTransform: SliceTransform1[LR],
rightTransform: SliceTransform1[RR],
bothTransform: SliceTransform2[BR]): M[(Slice, LR, RR, BR)] = {
val remappedLeft = lslice.remap(lbuf)
val remappedRight = rslice.remap(rbuf)
val remappedLeq = lslice.remap(leqbuf)
val remappedReq = rslice.remap(reqbuf)
for {
pairL <- leftTransform(remappedLeft)
(ls0, lx) = pairL
pairR <- rightTransform(remappedRight)
(rs0, rx) = pairR
pairB <- bothTransform(remappedLeq, remappedReq)
(bs0, bx) = pairB
} yield {
assert(lx.size == rx.size && rx.size == bx.size)
val resultSlice = lx zip rx zip bx
(resultSlice, ls0, rs0, bs0)
}
}
override def toString = {
"left: " + lbuf.toArray.mkString("[", ",", "]") + "\\n" +
"right: " + rbuf.toArray.mkString("[", ",", "]") + "\\n" +
"both: " + (leqbuf.toArray zip reqbuf.toArray).mkString("[", ",", "]")
}
}
final case class SliceId(id: Int) {
def +(n: Int): SliceId = SliceId(id + n)
}
case class SlicePosition[K](sliceId: SliceId,
/** The position in the current slice. This will only be nonzero when the slice has been appended
* to as a result of a cartesian crossing the slice boundary */
pos: Int,
/** Present if not in a final right or left run. A pair of a key slice that is parallel to the
* current data slice, and the value that is needed as input to sltk or srtk to produce the next key. */
keyState: K,
key: Slice,
/** The current slice to be operated upon. */
data: Slice,
/** The remainder of the stream to be operated upon. */
tail: StreamT[M, Slice])
sealed trait NextStep[A, B]
case class SplitLeft[A, B](lpos: Int) extends NextStep[A, B]
case class SplitRight[A, B](rpos: Int) extends NextStep[A, B]
case class NextCartesianLeft[A, B](left: SlicePosition[A],
right: SlicePosition[B],
rightStart: Option[SlicePosition[B]],
rightEnd: Option[SlicePosition[B]])
extends NextStep[A, B]
case class NextCartesianRight[A, B](left: SlicePosition[A],
right: SlicePosition[B],
rightStart: Option[SlicePosition[B]],
rightEnd: Option[SlicePosition[B]])
extends NextStep[A, B]
case class SkipRight[A, B](left: SlicePosition[A], rightEnd: SlicePosition[B]) extends NextStep[A, B]
case class RestartRight[A, B](left: SlicePosition[A], rightStart: SlicePosition[B], rightEnd: SlicePosition[B]) extends NextStep[A, B]
def cogroup0[LK, RK, LR, RR, BR](stlk: SliceTransform1[LK],
strk: SliceTransform1[RK],
stlr: SliceTransform1[LR],
strr: SliceTransform1[RR],
stbr: SliceTransform2[BR]) = {
sealed trait CogroupState
case class EndLeft(lr: LR, lhead: Slice, ltail: StreamT[M, Slice]) extends CogroupState
case class Cogroup(lr: LR,
rr: RR,
br: BR,
left: SlicePosition[LK],
right: SlicePosition[RK],
rightStart: Option[SlicePosition[RK]],
rightEnd: Option[SlicePosition[RK]])
extends CogroupState
case class EndRight(rr: RR, rhead: Slice, rtail: StreamT[M, Slice]) extends CogroupState
case object CogroupDone extends CogroupState
// step is the continuation function fed to uncons. It is called once for each emitted slice
def step(state: CogroupState): M[Option[(Slice, CogroupState)]] = {
// step0 is the inner monadic recursion needed to cross slice boundaries within the emission of a slice
def step0(lr: LR,
rr: RR,
br: BR,
leftPosition: SlicePosition[LK],
rightPosition: SlicePosition[RK],
rightStart0: Option[SlicePosition[RK]],
rightEnd0: Option[SlicePosition[RK]])(
ibufs: IndexBuffers = new IndexBuffers(leftPosition.key.size, rightPosition.key.size)): M[Option[(Slice, CogroupState)]] = {
val SlicePosition(lSliceId, lpos0, lkstate, lkey, lhead, ltail) = leftPosition
val SlicePosition(rSliceId, rpos0, rkstate, rkey, rhead, rtail) = rightPosition
val comparator = Slice.rowComparatorFor(lkey, rkey) {
// since we've used the key transforms, and since transforms are contracturally
// forbidden from changing slice size, we can just use all
_.columns.keys map (_.selector)
}
// the inner tight loop; this will recur while we're within the bounds of
// a pair of slices. Any operation that must cross slice boundaries
// must exit this inner loop and recur through the outer monadic loop
// xrstart is an int with sentinel value for effieiency, but is Option at the slice level.
@inline
@tailrec
def buildRemappings(lpos: Int,
rpos: Int,
rightStart: Option[SlicePosition[RK]],
rightEnd: Option[SlicePosition[RK]],
endRight: Boolean): NextStep[LK, RK] = {
// println("lpos = %d, rpos = %d, rightStart = %s, rightEnd = %s, endRight = %s" format (lpos, rpos, rightStart, rightEnd, endRight))
// println("Left key: " + lkey.toJson(lpos))
// println("Right key: " + rkey.toJson(rpos))
// println("Left data: " + lhead.toJson(lpos))
// println("Right data: " + rhead.toJson(rpos))
rightStart match {
case Some(resetMarker @ SlicePosition(rightStartSliceId, rightStartPos, _, rightStartSlice, _, _)) =>
// We're currently in a cartesian.
if (lpos < lhead.size && rpos < rhead.size) {
comparator.compare(lpos, rpos) match {
case LT if rightStartSliceId == rSliceId =>
buildRemappings(lpos + 1, rightStartPos, rightStart, Some(rightPosition.copy(pos = rpos)), endRight)
case LT =>
// Transition to emit the current slice and reset the right side, carry rightPosition through
RestartRight(leftPosition.copy(pos = lpos + 1), resetMarker, rightPosition.copy(pos = rpos))
case GT =>
// catch input-out-of-order errors early
rightEnd match {
case None =>
// println("lhead\\n" + lkey.toJsonString())
// println("rhead\\n" + rkey.toJsonString())
sys.error(
"Inputs are not sorted; value on the left exceeded value on the right at the end of equal span. lpos = %d, rpos = %d"
.format(lpos, rpos))
case Some(SlicePosition(endSliceId, endPos, _, endSlice, _, _)) if endSliceId == rSliceId =>
buildRemappings(lpos, endPos, None, None, endRight)
case Some(rend @ SlicePosition(endSliceId, _, _, _, _, _)) =>
// Step out of buildRemappings so that we can restart with the current rightEnd
SkipRight(leftPosition.copy(pos = lpos), rend)
}
case EQ =>
ibufs.advanceBoth(lpos, rpos)
buildRemappings(lpos, rpos + 1, rightStart, rightEnd, endRight)
}
} else if (lpos < lhead.size) {
if (endRight) {
// println(s"Restarting right: lpos = ${lpos + 1}; rpos = $rpos")
RestartRight(leftPosition.copy(pos = lpos + 1), resetMarker, rightPosition.copy(pos = rpos))
} else {
// right slice is exhausted, so we need to emit that slice from the right tail
// then continue in the cartesian
NextCartesianRight(leftPosition.copy(pos = lpos), rightPosition.copy(pos = rpos), rightStart, rightEnd)
}
} else if (rpos < rhead.size) {
// left slice is exhausted, so we need to emit that slice from the left tail
// then continue in the cartesian
NextCartesianLeft(leftPosition, rightPosition.copy(pos = rpos), rightStart, rightEnd)
} else {
sys.error("This state should be unreachable, since we only increment one side at a time.")
}
case None =>
// not currently in a cartesian, hence we can simply proceed.
if (lpos < lhead.size && rpos < rhead.size) {
comparator.compare(lpos, rpos) match {
case LT =>
ibufs.advanceLeft(lpos)
buildRemappings(lpos + 1, rpos, None, None, endRight)
case GT =>
ibufs.advanceRight(rpos)
buildRemappings(lpos, rpos + 1, None, None, endRight)
case EQ =>
ibufs.advanceBoth(lpos, rpos)
buildRemappings(lpos, rpos + 1, Some(rightPosition.copy(pos = rpos)), None, endRight)
}
} else if (lpos < lhead.size) {
// right side is exhausted, so we should just split the left and emit
SplitLeft(lpos)
} else if (rpos < rhead.size) {
// left side is exhausted, so we should just split the right and emit
SplitRight(rpos)
} else {
sys.error("This state should be unreachable, since we only increment one side at a time.")
}
}
}
def continue(nextStep: NextStep[LK, RK]): M[Option[(Slice, CogroupState)]] = nextStep match {
case SplitLeft(lpos) =>
val (lpref, lsuf) = lhead.split(lpos)
val (_, lksuf) = lkey.split(lpos)
ibufs.cogrouped(lpref, rhead, SliceTransform1[LR](lr, stlr.f), SliceTransform1[RR](rr, strr.f), SliceTransform2[BR](br, stbr.f)) flatMap {
case (completeSlice, lr0, rr0, br0) => {
rtail.uncons flatMap {
case Some((nextRightHead, nextRightTail)) =>
strk.f(rkstate, nextRightHead) map {
case (rkstate0, rkey0) => {
val nextState = Cogroup(
lr0,
rr0,
br0,
SlicePosition(lSliceId, 0, lkstate, lksuf, lsuf, ltail),
SlicePosition(rSliceId + 1, 0, rkstate0, rkey0, nextRightHead, nextRightTail),
None,
None)
Some(completeSlice -> nextState)
}
}
case None =>
val nextState = EndLeft(lr0, lsuf, ltail)
M.point(Some(completeSlice -> nextState))
}
}
}
case SplitRight(rpos) =>
val (rpref, rsuf) = rhead.split(rpos)
val (_, rksuf) = rkey.split(rpos)
ibufs.cogrouped(lhead, rpref, SliceTransform1[LR](lr, stlr.f), SliceTransform1[RR](rr, strr.f), SliceTransform2[BR](br, stbr.f)) flatMap {
case (completeSlice, lr0, rr0, br0) => {
ltail.uncons flatMap {
case Some((nextLeftHead, nextLeftTail)) =>
stlk.f(lkstate, nextLeftHead) map {
case (lkstate0, lkey0) => {
val nextState = Cogroup(
lr0,
rr0,
br0,
SlicePosition(lSliceId + 1, 0, lkstate0, lkey0, nextLeftHead, nextLeftTail),
SlicePosition(rSliceId, 0, rkstate, rksuf, rsuf, rtail),
None,
None)
Some(completeSlice -> nextState)
}
}
case None =>
val nextState = EndRight(rr0, rsuf, rtail)
M.point(Some(completeSlice -> nextState))
}
}
}
case NextCartesianLeft(left, right, rightStart, rightEnd) =>
left.tail.uncons flatMap {
case Some((nextLeftHead, nextLeftTail)) =>
ibufs
.cogrouped(left.data, right.data, SliceTransform1[LR](lr, stlr.f), SliceTransform1[RR](rr, strr.f), SliceTransform2[BR](br, stbr.f)) flatMap {
case (completeSlice, lr0, rr0, br0) => {
stlk.f(lkstate, nextLeftHead) map {
case (lkstate0, lkey0) => {
val nextState =
Cogroup(lr0, rr0, br0, SlicePosition(lSliceId + 1, 0, lkstate0, lkey0, nextLeftHead, nextLeftTail), right, rightStart, rightEnd)
Some(completeSlice -> nextState)
}
}
}
}
case None =>
(rightStart, rightEnd) match {
case (Some(_), Some(end)) =>
val (rpref, rsuf) = end.data.split(end.pos)
ibufs
.cogrouped(left.data, rpref, SliceTransform1[LR](lr, stlr.f), SliceTransform1[RR](rr, strr.f), SliceTransform2[BR](br, stbr.f)) map {
case (completeSlice, lr0, rr0, br0) => {
val nextState = EndRight(rr0, rsuf, end.tail)
Some(completeSlice -> nextState)
}
}
case _ =>
ibufs.cogrouped(
left.data,
right.data,
SliceTransform1[LR](lr, stlr.f),
SliceTransform1[RR](rr, strr.f),
SliceTransform2[BR](br, stbr.f)) map {
case (completeSlice, lr0, rr0, br0) =>
Some(completeSlice -> CogroupDone)
}
}
}
case NextCartesianRight(left, right, rightStart, rightEnd) =>
right.tail.uncons flatMap {
case Some((nextRightHead, nextRightTail)) =>
ibufs
.cogrouped(left.data, right.data, SliceTransform1[LR](lr, stlr.f), SliceTransform1[RR](rr, strr.f), SliceTransform2[BR](br, stbr.f)) flatMap {
case (completeSlice, lr0, rr0, br0) => {
strk.f(rkstate, nextRightHead) map {
case (rkstate0, rkey0) => {
val nextState =
Cogroup(lr0, rr0, br0, left, SlicePosition(rSliceId + 1, 0, rkstate0, rkey0, nextRightHead, nextRightTail), rightStart, rightEnd)
Some(completeSlice -> nextState)
}
}
}
}
case None =>
continue(buildRemappings(left.pos, right.pos, rightStart, rightEnd, true))
}
case SkipRight(left, rightEnd) =>
step0(lr, rr, br, left, rightEnd, None, None)()
case RestartRight(left, rightStart, rightEnd) =>
ibufs.cogrouped(
left.data,
rightPosition.data,
SliceTransform1[LR](lr, stlr.f),
SliceTransform1[RR](rr, strr.f),
SliceTransform2[BR](br, stbr.f)) map {
case (completeSlice, lr0, rr0, br0) => {
val nextState = Cogroup(lr0, rr0, br0, left, rightStart, Some(rightStart), Some(rightEnd))
// println(s"Computing restart state as $nextState")
Some(completeSlice -> nextState)
}
}
}
continue(buildRemappings(lpos0, rpos0, rightStart0, rightEnd0, false))
} // end of step0
state match {
case EndLeft(lr, data, tail) =>
stlr.f(lr, data) flatMap {
case (lr0, leftResult) => {
tail.uncons map { unconsed =>
Some(leftResult -> (unconsed map { case (nhead, ntail) => EndLeft(lr0, nhead, ntail) } getOrElse CogroupDone))
}
}
}
case Cogroup(lr, rr, br, left, right, rightReset, rightEnd) =>
step0(lr, rr, br, left, right, rightReset, rightEnd)()
case EndRight(rr, data, tail) =>
strr.f(rr, data) flatMap {
case (rr0, rightResult) => {
tail.uncons map { unconsed =>
Some(rightResult -> (unconsed map { case (nhead, ntail) => EndRight(rr0, nhead, ntail) } getOrElse CogroupDone))
}
}
}
case CogroupDone => M.point(None)
}
} // end of step
val initialState = for {
// We have to compact both sides to avoid any rows for which the key is completely undefined
leftUnconsed <- self.compact(leftKey).slices.uncons
rightUnconsed <- that.compact(rightKey).slices.uncons
back <- {
val cogroup = for {
(leftHead, leftTail) <- leftUnconsed
(rightHead, rightTail) <- rightUnconsed
} yield {
for {
pairL <- stlk(leftHead)
(lkstate, lkey) = pairL
pairR <- strk(rightHead)
(rkstate, rkey) = pairR
} yield {
Cogroup(
stlr.initial,
strr.initial,
stbr.initial,
SlicePosition(SliceId(0), 0, lkstate, lkey, leftHead, leftTail),
SlicePosition(SliceId(0), 0, rkstate, rkey, rightHead, rightTail),
None,
None)
}
}
val optM = cogroup orElse {
leftUnconsed map {
case (head, tail) => EndLeft(stlr.initial, head, tail)
} map { M point _ }
} orElse {
rightUnconsed map {
case (head, tail) => EndRight(strr.initial, head, tail)
} map { M point _ }
}
optM map { m =>
m map { Some(_) }
} getOrElse {
M.point(None)
}
}
} yield back
Table(StreamT.wrapEffect(initialState map { state =>
StreamT.unfoldM[M, Slice, CogroupState](state getOrElse CogroupDone)(step)
}), UnknownSize)
}
cogroup0(
composeSliceTransform(leftKey),
composeSliceTransform(rightKey),
composeSliceTransform(leftResultTrans),
composeSliceTransform(rightResultTrans),
composeSliceTransform2(bothResultTrans))
}
/**
* Performs a full cartesian cross on this table with the specified table,
* applying the specified transformation to merge the two tables into
* a single table.
*/
def cross(that: Table)(spec: TransSpec2): Table = {
def cross0[A](transform: SliceTransform2[A]): M[StreamT[M, Slice]] = {
case class CrossState(a: A, position: Int, tail: StreamT[M, Slice])
def crossBothSingle(lhead: Slice, rhead: Slice)(a0: A): M[(A, StreamT[M, Slice])] = {
// We try to fill out the slices as much as possible, so we work with
// several rows from the left at a time.
val lrowsPerSlice = math.max(1, yggConfig.maxSliceSize / rhead.size)
val sliceSize = lrowsPerSlice * rhead.size
// Note that this is still memory efficient, as the columns are re-used
// between all slices.
val results = (0 until lhead.size by lrowsPerSlice).foldLeft(M.point((a0, List.empty[Slice]))) {
case (accM, offset) =>
accM flatMap {
case (a, acc) =>
val rows = math.min(sliceSize, (lhead.size - offset) * rhead.size)
val lslice = new Slice {
val size = rows
val columns = lhead.columns.lazyMapValues(Remap({ i =>
offset + (i / rhead.size)
})(_).get)
}
val rslice = new Slice {
val size = rows
val columns =
if (rhead.size == 0)
rhead.columns.lazyMapValues(Empty(_).get)
else
rhead.columns.lazyMapValues(Remap(_ % rhead.size)(_).get)
}
transform.f(a, lslice, rslice) map {
case (b, resultSlice) =>
(b, resultSlice :: acc)
}
}
}
results map {
case (a1, slices) =>
val sliceStream = slices.reverse.toStream
(a1, StreamT.fromStream(M.point(sliceStream)))
}
}
def crossLeftSingle(lhead: Slice, right: StreamT[M, Slice])(a0: A): StreamT[M, Slice] = {
def step(state: CrossState): M[Option[(Slice, CrossState)]] = {
if (state.position < lhead.size) {
state.tail.uncons flatMap {
case Some((rhead, rtail0)) =>
val lslice = new Slice {
val size = rhead.size
val columns = lhead.columns.lazyMapValues(Remap(i => state.position)(_).get)
}
transform.f(state.a, lslice, rhead) map {
case (a0, resultSlice) =>
Some((resultSlice, CrossState(a0, state.position, rtail0)))
}
case None =>
step(CrossState(state.a, state.position + 1, right))
}
} else {
M.point(None)
}
}
StreamT.unfoldM(CrossState(a0, 0, right))(step _)
}
def crossRightSingle(left: StreamT[M, Slice], rhead: Slice)(a0: A): StreamT[M, Slice] = {
StreamT(left.uncons flatMap {
case Some((lhead, ltail0)) =>
crossBothSingle(lhead, rhead)(a0) map {
case (a1, prefix) =>
StreamT.Skip(prefix ++ crossRightSingle(ltail0, rhead)(a1))
}
case None =>
M.point(StreamT.Done)
})
}
def crossBoth(ltail: StreamT[M, Slice], rtail: StreamT[M, Slice]): StreamT[M, Slice] = {
// This doesn't carry the Transform's state around, so, I think it is broken.
ltail.flatMap(crossLeftSingle(_, rtail)(transform.initial))
}
// We canonicalize the tables so that no slices are too small.
val left = this.canonicalize(yggConfig.minIdealSliceSize, Some(yggConfig.maxSliceSize))
val right = that.canonicalize(yggConfig.minIdealSliceSize, Some(yggConfig.maxSliceSize))
left.slices.uncons flatMap {
case Some((lhead, ltail)) =>
right.slices.uncons flatMap {
case Some((rhead, rtail)) =>
for {
lempty <- ltail.isEmpty //TODO: Scalaz result here is negated from what it should be!
rempty <- rtail.isEmpty
back <- {
// println(s"checking cross: lempty = $lempty; rempty = $rempty")
if (lempty && rempty) {
// both are small sets, so find the cross in memory
crossBothSingle(lhead, rhead)(transform.initial) map { _._2 }
} else if (lempty) {
// left side is a small set, so restart it in memory
M.point(crossLeftSingle(lhead, rhead :: rtail)(transform.initial))
} else if (rempty) {
// right side is a small set, so restart it in memory
M.point(crossRightSingle(lhead :: ltail, rhead)(transform.initial))
} else {
// both large sets, so just walk the left restarting the right.
M.point(crossBoth(lhead :: ltail, rhead :: rtail))
}
}
} yield back
case None => M.point(StreamT.empty[M, Slice])
}
case None => M.point(StreamT.empty[M, Slice])
}
}
// TODO: We should be able to fully compute the size of the result above.
val newSize = (size, that.size) match {
case (ExactSize(l), ExactSize(r)) => TableSize(l max r, l * r)
case (EstimateSize(ln, lx), ExactSize(r)) => TableSize(ln max r, lx * r)
case (ExactSize(l), EstimateSize(rn, rx)) => TableSize(l max rn, l * rx)
case _ => UnknownSize // Bail on anything else for now (see above TODO)
}
Table(StreamT(cross0(composeSliceTransform2(spec)) map { tail =>
StreamT.Skip(tail)
}), newSize)
}
def leftShift(focus: CPath): Table = {
def lens(columns: Map[ColumnRef, Column]): (Map[ColumnRef, Column], Map[ColumnRef, Column]) = {
val (focused, unfocused) = columns.partition(_._1.selector.hasPrefix(focus))
// discard scalar values at focus
val typed = focused filter {
case (ColumnRef(`focus`, CArrayType(_)), _) => true
case (ColumnRef(`focus`, _), _) => false
case _ => true
}
// remap focused to be at "root"
val remapped = typed map {
case (ColumnRef(path, tpe), col) =>
(ColumnRef(path.dropPrefix(focus).get, tpe), col)
}
(remapped, unfocused)
}
// eagerly force the slices, since we'll be backtracking within each
val slices2 = slices.map(_.materialized) flatMap { slice =>
val (focused, unfocused) = lens(slice.columns)
val innerHeads = {
val unsorted = focused.keys.toVector flatMap {
case ColumnRef(path, _) => path.head.toVector
}
// sort the index lexicographically except in the case of indices
unsorted sortWith {
case (CPathIndex(i1), CPathIndex(i2)) => i1 < i2
case (p1, p2) => p1.toString < p2.toString
}
}
val innerIndex = Map(innerHeads.zipWithIndex: _*)
val primitiveWaterMarks = focused.toList collect {
case (ColumnRef(CPath.Identity, CArrayType(_)), col: HomogeneousArrayColumn[a]) =>
(0 until slice.size).map(col(_).length).max
}
// .max doesn't work, because Scala doesn't understand monoids
val primitiveMax = primitiveWaterMarks.fold(0)(math.max)
// TODO doesn't handle the case where we have a sparse array with a missing column!
// this value may be 0 if we're looking at CEmptyObject | CEmptyArray
val highWaterMark = math.max(innerHeads.length, primitiveMax)
val resplit = if (slice.size * highWaterMark > yggConfig.maxSliceSize) {
val numSplits =
math.ceil((slice.size * highWaterMark).toDouble / yggConfig.maxSliceSize).toInt
val size = math.ceil(yggConfig.maxSliceSize.toDouble / highWaterMark).toInt
// we repeatedly apply windowing to slice. this avoids linear delegation through Remap
val acc = (0 until numSplits).foldLeft(Vector.empty[Slice]) {
case (acc, split) =>
acc :+ slice.takeRange(size * split, size)
}
acc.filterNot(_.isEmpty)
} else {
Vector(slice)
}
// shadow the outer `slice`
StreamT.fromIterable(resplit).trans(λ[Id.Id ~> M](M.point(_))) map { slice =>
// ...and the outer `focused` and `unfocused`
val (focused, unfocused) = lens(slice.columns)
// a CF1 for inflating column sizes to account for shifting
val expansion = cf.util.Remap(_ / highWaterMark)
// expand all of the unfocused columns, then mostly leave them alone
val unfocusedExpanded = unfocused map {
case (ref, col) => ref -> expansion(col).get
}
val remapped: List[(ColumnRef, Column)] = focused.toList map {
case (ColumnRef(CPath.Identity, CArrayType(tpe)), col: HomogeneousArrayColumn[a]) =>
???
// because of how we have defined things, path is guaranteed NOT to be Identity now
case (ColumnRef(path, tpe), col) =>
val head = path.head.get
val locus = innerIndex(head)
// explode column and then sparsen by mod ring
val expanded =
expansion.andThen(cf.util.filterBy(_ % highWaterMark == locus))(col).get
ColumnRef(path.dropPrefix(head).get, tpe) -> expanded
}
// put together all the same-ref columns which now are mapped to the same path
val merged: Map[ColumnRef, Column] = remapped.groupBy(_._1).map({
// the key here is the column ref; the value is the list of same-type pairs
case (ref, toMerge) =>
ref -> toMerge.map(_._2).reduce(cf.util.UnionRight(_, _).get)
})(collection.breakOut)
// figure out the definedness of the exploded, filtered result
// this is necessary so we can implement inner-concat semantics
val definedness: BitSet =
merged.values.map(_.definedAt(0, slice.size * highWaterMark)).reduceOption(_ | _).getOrElse(new BitSet)
// move all of our results into second index of an array
val indexed = merged map {
case (ColumnRef(path, tpe), col) =>
ColumnRef(1 \\: path, tpe) -> col
}
val refinedHeads = innerHeads collect {
case CPathField(field) => -\\/(field)
case CPathIndex(idx) => \\/-(idx)
}
val hasFields = refinedHeads.exists(_.isLeft)
val hasIndices = refinedHeads.exists(_.isRight)
// generate the field names column
val fieldsCol = if (hasFields) {
val loci = refinedHeads.zipWithIndex collect {
case (-\\/(_), i) => i
} toSet
val col = new StrColumn {
def apply(row: Int) = {
val -\\/(back) = refinedHeads(row % refinedHeads.length)
back
}
def isDefinedAt(row: Int) =
loci(row % refinedHeads.length) && definedness(row)
}
Some(col)
} else {
None
}
// generate the array indices column
val indicesCol = if (hasIndices) {
val loci = refinedHeads.zipWithIndex collect {
case (\\/-(_), i) => i
} toSet
val col = new LongColumn {
def apply(row: Int) = {
val \\/-(back) = refinedHeads(row % refinedHeads.length)
back
}
def isDefinedAt(row: Int) =
loci(row % refinedHeads.length) && definedness(row)
}
Some(col)
} else {
None
}
// put the fields and index columns into the same path, in the first index of the array
val fassigned = fieldsCol.map(col => ColumnRef(CPathIndex(0), CString) -> col).toList
val iassigned = indicesCol.map(col => ColumnRef(CPathIndex(0), CLong) -> col).toList
// merge them together to produce the heterogeneous output
val idCols = Map(fassigned ++ iassigned: _*)
// put the focus prefix BACK on the results and ids (which are now in an array together)
val focusedTransformed = (indexed ++ idCols) map {
case (ColumnRef(path, tpe), col) =>
ColumnRef(focus \\ path, tpe) -> col
}
// we need to go back to our original columns and filter them by results
// if we don't do this, the data will be highly sparse (like an outer join)
val unfocusedTransformed = unfocusedExpanded map {
case (ref, col) =>
ref -> cf.util.filter(0, slice.size * highWaterMark, definedness)(col).get
}
// glue everything back together with the unfocused and compute the new size
Slice(focusedTransformed ++ unfocusedTransformed, slice.size * highWaterMark)
}
}
// without peaking into the effects, we don't know exactly what has happened
// it's possible that there were no vector values, or those vectors were all
// cardinality 0, in which case the table will have shrunk (perhaps to empty!)
Table(slices2, UnknownSize)
}
/**
* Yields a new table with distinct rows. Assumes this table is sorted.
*/
def distinct(spec: TransSpec1): Table = {
def distinct0[T](id: SliceTransform1[Option[Slice]], filter: SliceTransform1[T]): Table = {
def stream(state: (Option[Slice], T), slices: StreamT[M, Slice]): StreamT[M, Slice] = StreamT(
for {
head <- slices.uncons
back <- {
head map {
case (s, sx) => {
for {
pairPrev <- id.f(state._1, s)
(prevFilter, cur) = pairPrev
// TODO use an Applicative
pairNext <- filter.f(state._2, s)
(nextT, curFilter) = pairNext
} yield {
val next = cur.distinct(prevFilter, curFilter)
StreamT.Yield(next, stream((if (next.size > 0) Some(curFilter) else prevFilter, nextT), sx))
}
}
} getOrElse {
M.point(StreamT.Done)
}
}
} yield back
)
val slices0 = StreamT.wrapEffect(this.sort(spec) map { sorted =>
stream((id.initial, filter.initial), sorted.slices)
})
Table(slices0, EstimateSize(0L, size.maxSize))
}
distinct0(SliceTransform.identity(None: Option[Slice]), composeSliceTransform(spec))
}
def drop(count: Long): Table = {
val slices2 = StreamT.unfoldM[M, StreamT[M, Slice], Option[(StreamT[M, Slice], Long)]](Some((slices, 0L))) {
case Some((slices, dropped)) =>
slices.uncons map {
case Some((slice, tail)) =>
if (slice.size <= count - dropped)
Some((StreamT.empty[M, Slice], Some((tail, dropped + slice.size))))
else
Some((slice.drop((count - dropped).toInt) :: tail, None))
case None => None
}
case None => M.point(None)
}
Table(slices2.flatMap(x => x), size + ExactSize(-count))
}
def take(count: Long): Table = {
val slices2 = StreamT.unfoldM[M, StreamT[M, Slice], Option[(StreamT[M, Slice], Long)]](Some((slices, 0L))) {
case Some((slices, taken)) =>
slices.uncons map {
case Some((slice, tail)) =>
if (slice.size <= count - taken)
Some((slice :: StreamT.empty[M, Slice], Some((tail, taken + slice.size))))
else
Some((slice.take((count - taken).toInt) :: StreamT.empty[M, Slice], None))
case None => None
}
case None => M.point(None)
}
Table(slices2.flatMap(x => x), EstimateSize(0, count))
}
/**
* In order to call partitionMerge, the table must be sorted according to
* the values specified by the partitionBy transspec.
*/
def partitionMerge(partitionBy: TransSpec1, keepKey: Boolean = false)(f: Table => M[Table]): M[Table] = {
// Find the first element that compares LT
@tailrec def findEnd(compare: Int => Ordering, imin: Int, imax: Int): Int = {
val minOrd = compare(imin)
if (minOrd eq EQ) {
val maxOrd = compare(imax)
if (maxOrd eq EQ) {
imax + 1
} else if (maxOrd eq LT) {
val imid = imin + ((imax - imin) / 2)
val midOrd = compare(imid)
if (midOrd eq LT) {
findEnd(compare, imin, imid - 1)
} else if (midOrd eq EQ) {
findEnd(compare, imid, imax - 1)
} else {
sys.error("Inputs to partitionMerge not sorted.")
}
} else {
sys.error("Inputs to partitionMerge not sorted.")
}
} else if ((minOrd eq LT) && (compare(imax) eq LT)) {
imin
} else {
sys.error("Inputs to partitionMerge not sorted.")
}
}
def subTable(comparatorGen: Slice => (Int => Ordering), slices: StreamT[M, Slice]): M[Table] = {
def subTable0(slices: StreamT[M, Slice], subSlices: StreamT[M, Slice], size: Int): M[Table] = {
slices.uncons flatMap {
case Some((head, tail)) =>
val headComparator = comparatorGen(head)
val spanEnd = findEnd(headComparator, 0, head.size - 1)
if (spanEnd < head.size) {
M.point(Table(subSlices ++ (head.take(spanEnd) :: StreamT.empty[M, Slice]), ExactSize(size + spanEnd)))
} else {
subTable0(tail, subSlices ++ (head :: StreamT.empty[M, Slice]), size + head.size)
}
case None =>
M.point(Table(subSlices, ExactSize(size)))
}
}
subTable0(slices, StreamT.empty[M, Slice], 0)
}
def dropAndSplit(comparatorGen: Slice => (Int => Ordering), slices: StreamT[M, Slice], spanStart: Int): StreamT[M, Slice] = StreamT.wrapEffect {
slices.uncons map {
case Some((head, tail)) =>
val headComparator = comparatorGen(head)
val spanEnd = findEnd(headComparator, spanStart, head.size - 1)
if (spanEnd < head.size) {
stepPartition(head, spanEnd, tail)
} else {
dropAndSplit(comparatorGen, tail, 0)
}
case None =>
StreamT.empty[M, Slice]
}
}
def stepPartition(head: Slice, spanStart: Int, tail: StreamT[M, Slice]): StreamT[M, Slice] = {
val comparatorGen = (s: Slice) => {
val rowComparator = Slice.rowComparatorFor(head, s) { s0 =>
s0.columns.keys collect {
case ColumnRef(path @ CPath(CPathField("0"), _ @_ *), _) => path
}
}
(i: Int) =>
rowComparator.compare(spanStart, i)
}
val groupTable = subTable(comparatorGen, head.drop(spanStart) :: tail)
val groupedM = (if (keepKey)
groupTable
else
groupTable.map(_.transform(DerefObjectStatic(Leaf(Source), CPathField("1"))))).flatMap(f)
val groupedStream: StreamT[M, Slice] = StreamT.wrapEffect(groupedM.map(_.slices))
groupedStream ++ dropAndSplit(comparatorGen, head :: tail, spanStart)
}
val keyTrans = OuterObjectConcat(
WrapObject(partitionBy, "0"),
WrapObject(Leaf(Source), "1")
)
this.transform(keyTrans).compact(TransSpec1.Id).slices.uncons map {
case Some((head, tail)) =>
Table(stepPartition(head, 0, tail), UnknownSize)
case None =>
Table.empty
}
}
def normalize: Table = Table(slices.filter(!_.isEmpty), size)
def schemas: M[Set[JType]] = {
// Returns true iff masks contains an array equivalent to mask.
def contains(masks: List[Array[Int]], mask: Array[Int]): Boolean = {
@tailrec
def equal(x: Array[Int], y: Array[Int], i: Int): Boolean =
if (i >= x.length) {
true
} else if (x(i) != y(i)) {
false
} else {
equal(x, y, i + 1)
}
@tailrec
def loop(xs: List[Array[Int]], y: Array[Int]): Boolean = xs match {
case x :: xs if x.length == y.length && equal(x, y, 0) => true
case _ :: xs => loop(xs, y)
case Nil => false
}
loop(masks, mask)
}
def isZero(x: Array[Int]): Boolean = {
@tailrec def loop(i: Int): Boolean =
if (i < 0) {
true
} else if (x(i) != 0) {
false
} else {
loop(i - 1)
}
loop(x.length - 1)
}
// Constructs a schema from a set of defined ColumnRefs. Metadata is
// ignored and there can be no unions. The set of ColumnRefs must all be
// defined and hence must create a valid JSON object.
def mkSchema(cols: List[ColumnRef]): Option[JType] = {
def leafType(ctype: CType): JType = ctype match {
case CBoolean => JBooleanT
case CLong | CDouble | CNum => JNumberT
case CString => JTextT
case CDate => JDateT
case CPeriod => JPeriodT
case CArrayType(elemType) => leafType(elemType)
case CEmptyObject => JObjectFixedT(Map.empty)
case CEmptyArray => JArrayFixedT(Map.empty)
case CNull => JNullT
case CUndefined => sys.error("not supported")
}
def fresh(paths: List[CPathNode], leaf: JType): Option[JType] = paths match {
case CPathField(field) :: paths =>
fresh(paths, leaf) map { tpe =>
JObjectFixedT(Map(field -> tpe))
}
case CPathIndex(i) :: paths =>
fresh(paths, leaf) map { tpe =>
JArrayFixedT(Map(i -> tpe))
}
case CPathArray :: paths => fresh(paths, leaf) map (JArrayHomogeneousT(_))
case CPathMeta(field) :: _ => None
case Nil => Some(leaf)
}
def merge(schema: Option[JType], paths: List[CPathNode], leaf: JType): Option[JType] = (schema, paths) match {
case (Some(JObjectFixedT(fields)), CPathField(field) :: paths) =>
merge(fields get field, paths, leaf) map { tpe =>
JObjectFixedT(fields + (field -> tpe))
} orElse schema
case (Some(JArrayFixedT(indices)), CPathIndex(idx) :: paths) =>
merge(indices get idx, paths, leaf) map { tpe =>
JArrayFixedT(indices + (idx -> tpe))
} orElse schema
case (None, paths) =>
fresh(paths, leaf)
case (jtype, paths) =>
sys.error("Invalid schema.") // This shouldn't happen for any real data.
}
cols.foldLeft(None: Option[JType]) {
case (schema, ColumnRef(cpath, ctype)) =>
merge(schema, cpath.nodes, leafType(ctype))
}
}
// Collects all possible schemas from some slices.
def collectSchemas(schemas: Set[JType], slices: StreamT[M, Slice]): M[Set[JType]] = {
def buildMasks(cols: Array[Column], sliceSize: Int): List[Array[Int]] = {
import java.util.Arrays.copyOf
val mask = RawBitSet.create(cols.length)
@tailrec def build0(row: Int, masks: List[Array[Int]]): List[Array[Int]] = {
if (row < sliceSize) {
RawBitSet.clear(mask)
var j = 0
while (j < cols.length) {
if (cols(j) isDefinedAt row) RawBitSet.set(mask, j)
j += 1
}
build0(row + 1, if (!contains(masks, mask) && !isZero(mask)) copyOf(mask, mask.length) :: masks else masks)
} else masks
}
build0(0, Nil)
}
slices.uncons flatMap {
case Some((slice, slices)) =>
val (refs0, cols0) = slice.columns.unzip
val masks = buildMasks(cols0.toArray, slice.size)
val refs: List[(ColumnRef, Int)] = refs0.zipWithIndex.toList
val next = masks flatMap { schemaMask =>
mkSchema(refs collect { case (ref, i) if RawBitSet.get(schemaMask, i) => ref })
}
collectSchemas(schemas ++ next, slices)
case None =>
M.point(schemas)
}
}
collectSchemas(Set.empty, slices)
}
def renderJson(prefix: String = "", delimiter: String = "\\n", suffix: String = ""): StreamT[M, CharBuffer] =
ColumnarTableModule.renderJson(slices, prefix, delimiter, suffix)
def renderCsv(): StreamT[M, CharBuffer] =
ColumnarTableModule.renderCsv(slices)
def slicePrinter(prelude: String)(f: Slice => String): Table = {
Table(
StreamT(
StreamT
.Skip({
println(prelude);
slices map { s =>
println(f(s)); s
}
})
.point[M]),
size)
}
def logged(logger: Logger, logPrefix: String = "", prelude: String = "", appendix: String = "")(f: Slice => String): Table = {
val preludeEffect = StreamT(StreamT.Skip({ logger.debug(logPrefix + " " + prelude); StreamT.empty[M, Slice] }).point[M])
val appendixEffect = StreamT(StreamT.Skip({ logger.debug(logPrefix + " " + appendix); StreamT.empty[M, Slice] }).point[M])
val sliceEffect = if (logger.isTraceEnabled) slices map { s =>
logger.trace(logPrefix + " " + f(s)); s
} else slices
Table(preludeEffect ++ sliceEffect ++ appendixEffect, size)
}
def printer(prelude: String = "", flag: String = ""): Table = slicePrinter(prelude)(s => s.toJsonString(flag))
def toStrings: M[Iterable[String]] = {
toEvents { (slice, row) =>
slice.toString(row)
}
}
def toJson: M[Iterable[JValue]] = {
toEvents { (slice, row) =>
slice.toJson(row)
}
}
private def toEvents[A](f: (Slice, RowId) => Option[A]): M[Iterable[A]] = {
for (stream <- self.compact(Leaf(Source)).slices.toStream) yield {
for (slice <- stream; i <- 0 until slice.size; a <- f(slice, i)) yield a
}
}
def metrics = TableMetrics(readStarts.get, blockReads.get)
}
}
| drostron/quasar | yggdrasil/src/main/scala/quasar/yggdrasil/table/ColumnarTableModule.scala | Scala | apache-2.0 | 79,373 |
package mesosphere.marathon
package core.condition
import play.api.libs.json._
import org.apache.mesos.Protos.{TaskState => MesosTaskState}
/**
* To define the status of an Instance, this trait is used and stored for each Task in Task.Status.
* The existing case objects are:
* - marathon exclusive status
* - representations of the mesos.Protos.TaskStatus
* - mapping of existing (soon-to-be deprecated) mesos.Protos.TaskStatus.TASK_LOST to the new representations
*/
sealed trait Condition extends Product with Serializable {
/**
* @return whether condition is considered a lost state.
*
* UnreachableInactive is not considered Lost because it depends on the context
*/
def isLost: Boolean = {
import Condition._
this match {
case Gone | Unreachable | Unknown | Dropped => true
case _ => false
}
}
/**
* @return whether condition is a terminal state.
*/
def isTerminal: Boolean =
this match {
case _: Condition.Terminal => true
case _ => false
}
/**
* @return whether considered is considered active.
*/
def isActive: Boolean =
this match {
case _: Condition.Active => true
case _ => false
}
}
object Condition {
sealed trait Terminal extends Condition
sealed trait Failure extends Terminal
sealed trait Active extends Condition
/** Scheduled: Task should be launched by matching offers. Mesos does not know anything about it. */
case object Scheduled extends Condition
/** Provisioned: An offer for task has been accepted but Mesos did not start the task yet. */
case object Provisioned extends Active
/** Error: indicates that a task launch attempt failed because of an error in the task specification */
case object Error extends Failure
/** Failed: task aborted with an error */
case object Failed extends Failure
/** Finished: task completes successfully */
case object Finished extends Terminal
/** Killed: task was killed */
case object Killed extends Terminal
/** Killing: the request to kill the task has been received, but the task has not yet been killed */
case object Killing extends Active
/** Running: the state after the task has begun running successfully */
case object Running extends Active
/**
* Staging: the master has received the framework’s request to launch the task but the task has not yet started to
* run
*/
case object Staging extends Active
/** Starting: task is currently starting */
case object Starting extends Active
/** Unreachable: the master has not heard from the agent running the task for a configurable period of time */
case object Unreachable extends Active
/**
* The task has been unreachable for a configurable time. A replacement task is started but this one won't be killed
* yet.
*/
case object UnreachableInactive extends Condition
/** Gone: the task was running on an agent that has been terminated */
case object Gone extends Failure
/** Dropped: the task failed to launch because of a transient error (e.g., spontaneously disconnected agent) */
case object Dropped extends Failure
/** Unknown: the master has no knowledge of the task */
case object Unknown extends Failure
private[this] val conditionToMesosTaskState = {
Map(
Error -> MesosTaskState.TASK_ERROR,
Failed -> MesosTaskState.TASK_FAILED,
Finished -> MesosTaskState.TASK_FINISHED,
Killed -> MesosTaskState.TASK_KILLED,
Killing -> MesosTaskState.TASK_KILLING,
Running -> MesosTaskState.TASK_RUNNING,
Staging -> MesosTaskState.TASK_STAGING,
Starting -> MesosTaskState.TASK_STARTING,
Unreachable -> MesosTaskState.TASK_UNREACHABLE,
UnreachableInactive -> MesosTaskState.TASK_UNREACHABLE,
Gone -> MesosTaskState.TASK_GONE,
Dropped -> MesosTaskState.TASK_DROPPED,
Unknown -> MesosTaskState.TASK_UNKNOWN
)
}
val all = Seq(
Error,
Failed,
Finished,
Killed,
Killing,
Running,
Staging,
Starting,
Unreachable,
UnreachableInactive,
Gone,
Dropped,
Unknown,
Scheduled,
Provisioned
)
private val lowerCaseStringToCondition: Map[String, Condition] = all.iterator.map { c =>
c.toString.toLowerCase -> c
}.toMap
/** Converts the Condition to a mesos task state where such a conversion is possible */
def toMesosTaskState(condition: Condition): Option[MesosTaskState] =
conditionToMesosTaskState.get(condition)
/**
* Converts the Condition to a mesos task state where such a conversion is possible; if not possible, return
* TASK_STAGING.
*/
def toMesosTaskStateOrStaging(condition: Condition): MesosTaskState =
conditionToMesosTaskState.getOrElse(condition, MesosTaskState.TASK_STAGING)
def apply(str: String): Condition =
lowerCaseStringToCondition.getOrElse(str.toLowerCase, Unknown)
def unapply(condition: Condition): Option[String] = Some(condition.toString.toLowerCase)
val conditionReader = new Reads[Condition] {
private def readString(j: JsReadable) = j.validate[String].map(Condition(_))
override def reads(json: JsValue): JsResult[Condition] =
readString(json).orElse {
json.validate[JsObject].flatMap { obj => readString(obj \\ "str") }
}
}
implicit val conditionFormat = Format[Condition](conditionReader, Writes(condition => JsString(condition.toString)))
}
| mesosphere/marathon | src/main/scala/mesosphere/marathon/core/condition/Condition.scala | Scala | apache-2.0 | 5,447 |
package gitbucket.core.api
import gitbucket.core.util.RepositoryName
import gitbucket.core.model.CommitComment
import java.util.Date
/**
* https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
*/
case class ApiPullRequestReviewComment(
id: Int, // 29724692
// "diff_hunk": "@@ -1 +1 @@\\n-# public-repo",
path: String, // "README.md",
// "position": 1,
// "original_position": 1,
commit_id: String, // "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c",
// "original_commit_id": "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c",
user: ApiUser,
body: String, // "Maybe you should use more emoji on this line.",
created_at: Date, // "2015-05-05T23:40:27Z",
updated_at: Date // "2015-05-05T23:40:27Z",
)(repositoryName: RepositoryName, issueId: Int)
extends FieldSerializable {
// "url": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/comments/29724692",
val url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/pulls/comments/${id}")
// "html_url": "https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692",
val html_url = ApiPath(s"/${repositoryName.fullName}/pull/${issueId}#discussion_r${id}")
// "pull_request_url": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/1",
val pull_request_url = ApiPath(s"/api/v3/repos/${repositoryName.fullName}/pulls/${issueId}")
/*
"_links": {
"self": {
"href": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/comments/29724692"
},
"html": {
"href": "https://github.com/baxterthehacker/public-repo/pull/1#discussion_r29724692"
},
"pull_request": {
"href": "https://api.github.com/repos/baxterthehacker/public-repo/pulls/1"
}
}
*/
val _links = Map(
"self" -> Map("href" -> url),
"html" -> Map("href" -> html_url),
"pull_request" -> Map("href" -> pull_request_url)
)
}
object ApiPullRequestReviewComment {
def apply(
comment: CommitComment,
commentedUser: ApiUser,
repositoryName: RepositoryName,
issueId: Int
): ApiPullRequestReviewComment =
new ApiPullRequestReviewComment(
id = comment.commentId,
path = comment.fileName.getOrElse(""),
commit_id = comment.commitId,
user = commentedUser,
body = comment.content,
created_at = comment.registeredDate,
updated_at = comment.updatedDate
)(repositoryName, issueId)
}
| McFoggy/gitbucket | src/main/scala/gitbucket/core/api/ApiPullRequestReviewComment.scala | Scala | apache-2.0 | 2,438 |
package is.hail.types.physical.stypes.concrete
import is.hail.annotations.Region
import is.hail.asm4s.{BooleanInfo, Code, LongInfo, Settable, SettableBuilder, TypeInfo, Value}
import is.hail.expr.ir.orderings.CodeOrdering
import is.hail.expr.ir.{EmitCodeBuilder, IEmitCode}
import is.hail.types.physical.stypes._
import is.hail.types.physical.stypes.interfaces.{SInterval, SIntervalValue}
import is.hail.types.physical.{PInterval, PType}
import is.hail.types.virtual.Type
import is.hail.utils.FastIndexedSeq
final case class SIntervalPointer(pType: PInterval) extends SInterval {
require(!pType.required)
override def _coerceOrCopy(cb: EmitCodeBuilder, region: Value[Region], value: SValue, deepCopy: Boolean): SValue =
value match {
case value: SIntervalValue =>
new SIntervalPointerValue(this, pType.store(cb, region, value, deepCopy), value.includesStart(), value.includesEnd())
}
override def castRename(t: Type): SType = SIntervalPointer(pType.deepRename(t).asInstanceOf[PInterval])
override lazy val virtualType: Type = pType.virtualType
override def settableTupleTypes(): IndexedSeq[TypeInfo[_]] = FastIndexedSeq(LongInfo, BooleanInfo, BooleanInfo)
override def fromSettables(settables: IndexedSeq[Settable[_]]): SIntervalPointerSettable = {
val IndexedSeq(a: Settable[Long@unchecked], includesStart: Settable[Boolean@unchecked], includesEnd: Settable[Boolean@unchecked]) = settables
assert(a.ti == LongInfo)
assert(includesStart.ti == BooleanInfo)
assert(includesEnd.ti == BooleanInfo)
new SIntervalPointerSettable(this, a, includesStart, includesEnd)
}
override def fromValues(values: IndexedSeq[Value[_]]): SIntervalPointerValue = {
val IndexedSeq(a: Value[Long@unchecked], includesStart: Value[Boolean@unchecked], includesEnd: Value[Boolean@unchecked]) = values
assert(a.ti == LongInfo)
assert(includesStart.ti == BooleanInfo)
assert(includesEnd.ti == BooleanInfo)
new SIntervalPointerValue(this, a, includesStart, includesEnd)
}
override def pointType: SType = pType.pointType.sType
override def pointEmitType: EmitType = EmitType(pType.pointType.sType, pType.pointType.required)
override def storageType(): PType = pType
override def copiedType: SType = SIntervalPointer(pType.copiedType.asInstanceOf[PInterval])
override def containsPointers: Boolean = pType.containsPointers
}
class SIntervalPointerValue(
val st: SIntervalPointer,
val a: Value[Long],
val includesStart: Value[Boolean],
val includesEnd: Value[Boolean]
) extends SIntervalValue {
override lazy val valueTuple: IndexedSeq[Value[_]] = FastIndexedSeq(a, includesStart, includesEnd)
val pt: PInterval = st.pType
override def loadStart(cb: EmitCodeBuilder): IEmitCode =
IEmitCode(cb,
!pt.startDefined(cb, a),
pt.pointType.loadCheapSCode(cb, pt.loadStart(a)))
override def startDefined(cb: EmitCodeBuilder): Value[Boolean] =
pt.startDefined(cb, a)
override def loadEnd(cb: EmitCodeBuilder): IEmitCode =
IEmitCode(cb,
!pt.endDefined(cb, a),
pt.pointType.loadCheapSCode(cb, pt.loadEnd(a)))
override def endDefined(cb: EmitCodeBuilder): Value[Boolean] =
pt.endDefined(cb, a)
override def isEmpty(cb: EmitCodeBuilder): Value[Boolean] = {
val gt = cb.emb.ecb.getOrderingFunction(st.pointType, CodeOrdering.Gt())
val gteq = cb.emb.ecb.getOrderingFunction(st.pointType, CodeOrdering.Gteq())
val start = cb.memoize(loadStart(cb), "start")
val end = cb.memoize(loadEnd(cb), "end")
val empty = cb.newLocal[Boolean]("is_empty")
cb.ifx(includesStart && includesEnd,
cb.assign(empty, gt(cb, start, end)),
cb.assign(empty, gteq(cb, start, end)))
empty
}
}
object SIntervalPointerSettable {
def apply(sb: SettableBuilder, st: SIntervalPointer, name: String): SIntervalPointerSettable = {
new SIntervalPointerSettable(st,
sb.newSettable[Long](s"${ name }_a"),
sb.newSettable[Boolean](s"${ name }_includes_start"),
sb.newSettable[Boolean](s"${ name }_includes_end"))
}
}
final class SIntervalPointerSettable(
st: SIntervalPointer,
override val a: Settable[Long],
override val includesStart: Settable[Boolean],
override val includesEnd: Settable[Boolean]
) extends SIntervalPointerValue(st, a, includesStart, includesEnd) with SSettable {
override def settableTuple(): IndexedSeq[Settable[_]] = FastIndexedSeq(a, includesStart, includesEnd)
override def store(cb: EmitCodeBuilder, v: SValue): Unit = v match {
case v: SIntervalPointerValue =>
cb.assign(a, v.a)
cb.assign(includesStart, v.includesStart)
cb.assign(includesEnd, v.includesEnd)
}
}
| hail-is/hail | hail/src/main/scala/is/hail/types/physical/stypes/concrete/SIntervalPointer.scala | Scala | mit | 4,683 |
import sbt._
class LiftProject(info: ProjectInfo) extends DefaultWebProject(info) {
val liftVersion = property[Version]
// uncomment the following if you want to use the snapshot repo
// val scalatoolsSnapshot = ScalaToolsSnapshots
// If you're using JRebel for Lift development, uncomment
// this line
// override def scanDirectories = Nil
lazy val JavaNet = "Java.net Maven2 Repository" at "http://download.java.net/maven/2/"
override def libraryDependencies = Set(
"net.liftweb" %% "lift-webkit" % liftVersion.value.toString % "compile",
"net.liftweb" %% "lift-mapper" % liftVersion.value.toString % "compile",
"org.mortbay.jetty" % "jetty" % "6.1.26" % "test",
"junit" % "junit" % "4.7" % "test",
"ch.qos.logback" % "logback-classic" % "0.9.26",
"org.scala-tools.testing" %% "specs" % "1.6.8" % "test",
"com.h2database" % "h2" % "1.2.147",
"mysql" % "mysql-connector-java" % "5.1.12" % "compile->default",
"org.apache.sanselan" % "sanselan" % "0.97-incubator",
"org.slf4j" % "slf4j-log4j12" % "1.6.1",
"org.jsoup" % "jsoup" % "1.5.1"
) ++ super.libraryDependencies
}
| TopicQuests/IBISLift | project/build/LiftProject.scala | Scala | apache-2.0 | 1,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import org.apache.hadoop.yarn.util.RackResolver
import org.apache.log4j.{Level, Logger}
import org.apache.spark._
import org.apache.spark.scheduler.TaskSchedulerImpl
import org.apache.spark.util.Utils
private[spark] class YarnScheduler(sc: SparkContext) extends TaskSchedulerImpl(sc) {
// RackResolver logs an INFO message whenever it resolves a rack, which is way too often.
//RackResolver在解析机架时会记录INFO消息,这种情况太常见了
if (Logger.getLogger(classOf[RackResolver]).getLevel == null) {
Logger.getLogger(classOf[RackResolver]).setLevel(Level.WARN)
}
// By default, rack is unknown
//默认情况下,机架未知
override def getRackForHost(hostPort: String): Option[String] = {
val host = Utils.parseHostPort(hostPort)._1
Option(RackResolver.resolve(sc.hadoopConfiguration, host).getNetworkLocation)
}
}
| tophua/spark1.52 | yarn/src/main/scala/org/apache/spark/scheduler/cluster/YarnScheduler.scala | Scala | apache-2.0 | 1,713 |
package org.finra.datagenerator.scaffolding.dependency
/**
* Created by dkopel on 9/19/16.
*/
/**
* Represents a void that can be filled by
* the appropriate value
*
*/
trait Dependency extends Serializable with Ordered[Dependency] {
/*
Determines whether or not this dependency is required or not.
A dependency that is not required, its resolution will still be attempted
however it is considered not erroneous if no resolution may be found
*/
def required: Boolean
/*
Used to comp
*/
def priority: Long
def dependencies: Seq[Dependency]
def resolved: Boolean
}
| FINRAOS/DataGenerator | rubber-scaffolding/rubber-dependency/src/main/scala/org/finra/datagenerator/scaffolding/dependency/Dependency.scala | Scala | apache-2.0 | 628 |
package advanced
import scala.language/*->scalaShadowing::language.*/.higherKinds/*->scalaShadowing::language.higherKinds.*/
import scala.language/*->scalaShadowing::language.*/.reflectiveCalls/*->scalaShadowing::language.reflectiveCalls.*/
import scala.reflect.Selectable/*->scala::reflect::Selectable.*/.reflectiveSelectable/*->scala::reflect::Selectable.reflectiveSelectable().*/
class C/*<-advanced::C#*/[T/*<-advanced::C#[T]*/] {
def t/*<-advanced::C#t().*/: T/*->advanced::C#[T]*/ = ???/*->scala::Predef.`???`().*/
}
class Structural/*<-advanced::Structural#*/ {
def s1/*<-advanced::Structural#s1().*/: { val x/*<-local0*/: Int/*->scala::Int#*/ } = ???/*->scala::Predef.`???`().*/
def s2/*<-advanced::Structural#s2().*/: { val x/*<-local1*/: Int/*->scala::Int#*/ } = new { val x/*<-local3*/: Int/*->scala::Int#*/ = ???/*->scala::Predef.`???`().*/ }
def s3/*<-advanced::Structural#s3().*/: { def m/*<-local4*/(x/*<-local5*/: Int/*->scala::Int#*/): Int/*->scala::Int#*/ } = new { def m/*<-local7*/(x/*<-local8*/: Int/*->scala::Int#*/): Int/*->scala::Int#*/ = ???/*->scala::Predef.`???`().*/ }
}
class Wildcards/*<-advanced::Wildcards#*/ {
def e1/*<-advanced::Wildcards#e1().*/: List/*->scala::package.List#*/[_] = ???/*->scala::Predef.`???`().*/
}
object Test/*<-advanced::Test.*/ {
val s/*<-advanced::Test.s.*/ = new Structural/*->advanced::Structural#*/
val s1/*<-advanced::Test.s1.*/ = s/*->advanced::Test.s.*/.s1/*->advanced::Structural#s1().*/
val s1x/*<-advanced::Test.s1x.*/ = /*->scala::reflect::Selectable.reflectiveSelectable().*/s/*->advanced::Test.s.*/.s1/*->advanced::Structural#s1().*//*->scala::Selectable#selectDynamic().*/.x
val s2/*<-advanced::Test.s2.*/ = s/*->advanced::Test.s.*/.s2/*->advanced::Structural#s2().*/
val s2x/*<-advanced::Test.s2x.*/ = /*->scala::reflect::Selectable.reflectiveSelectable().*/s/*->advanced::Test.s.*/.s2/*->advanced::Structural#s2().*//*->scala::Selectable#selectDynamic().*/.x
val s3/*<-advanced::Test.s3.*/ = s/*->advanced::Test.s.*/.s3/*->advanced::Structural#s3().*/
val s3x/*<-advanced::Test.s3x.*/ = /*->scala::reflect::Selectable.reflectiveSelectable().*/s/*->advanced::Test.s.*/.s3/*->advanced::Structural#s3().*//*->scala::Selectable#applyDynamic().*/.m/*->scala::reflect::ClassTag.apply().*//*->java::lang::Integer#TYPE.*/(???/*->scala::Predef.`???`().*/)
val e/*<-advanced::Test.e.*/ = new Wildcards/*->advanced::Wildcards#*/
val e1/*<-advanced::Test.e1.*/ = e/*->advanced::Test.e.*/.e1/*->advanced::Wildcards#e1().*/
val e1x/*<-advanced::Test.e1x.*/ = e/*->advanced::Test.e.*/.e1/*->advanced::Wildcards#e1().*/.head/*->scala::collection::IterableOps#head().*/
{
(???/*->scala::Predef.`???`().*/ : Any/*->scala::Any#*/) match {
case e3/*<-local9*/: List/*->scala::package.List#*/[_] =>
val e3x/*<-local10*/ = e3/*->local9*/.head/*->scala::collection::IterableOps#head().*/
()
}
}
}
| som-snytt/dotty | tests/semanticdb/expect/Advanced.expect.scala | Scala | apache-2.0 | 2,919 |
package org.scaladebugger.api.lowlevel.requests.filters.processors
import com.sun.jdi.ObjectReference
import com.sun.jdi.request._
import org.scaladebugger.api.lowlevel.requests.filters.{JDIRequestFilter, JDIRequestFilterProcessor, InstanceFilter}
/**
* Represents a processor for the instance filter.
*
* @param instanceFilter The instance filter to use when processing
*/
class InstanceFilterProcessor(
val instanceFilter: InstanceFilter
) extends JDIRequestFilterProcessor {
private val objectReference = instanceFilter.objectReference
/**
* Processes the provided event request with the filter logic.
*
* @param eventRequest The request to process
*
* @return The updated request
*/
override def process(eventRequest: EventRequest): EventRequest = {
// Apply the filter to the JDI request if it supports the filter
if (eventRequest != null) (eventRequest match {
case r: AccessWatchpointRequest => r.addInstanceFilter _
case r: BreakpointRequest => r.addInstanceFilter _
case r: ExceptionRequest => r.addInstanceFilter _
case r: MethodEntryRequest => r.addInstanceFilter _
case r: MethodExitRequest => r.addInstanceFilter _
case r: ModificationWatchpointRequest => r.addInstanceFilter _
case r: MonitorContendedEnteredRequest => r.addInstanceFilter _
case r: MonitorContendedEnterRequest => r.addInstanceFilter _
case r: MonitorWaitedRequest => r.addInstanceFilter _
case r: MonitorWaitRequest => r.addInstanceFilter _
case r: StepRequest => r.addInstanceFilter _
case _ => (_: ObjectReference) => {}
})(objectReference)
eventRequest
}
override val argument: JDIRequestFilter = instanceFilter
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/requests/filters/processors/InstanceFilterProcessor.scala | Scala | apache-2.0 | 1,873 |
package com.flurdy.socialcrowd.model
import org.specs2.mutable._
import org.specs2.specification._
import org.joda.time.DateTime
import org.joda.time.Duration
import com.flurdy.socialcrowd.model._
import util.Random
class SocialMemberSpec extends Specification {
"Social Member" should {
"post action are stored in posts" in {
val member = new SocialMember("Alice")
member.getPosts must be empty
member.post("Hello world")
member.getPosts must not be empty
}
"follow action adds friend to friends" in {
val member = new SocialMember("Alice")
val friend = new SocialMember("Peter")
member.friends must be empty
member.follows(friend)
member.friends must not be empty
}
"follow action ignore adding yourself to friends" in {
val member = new SocialMember("Alice")
member.friends must be empty
member.follows(member)
member.friends must be empty
}
"read posts action responds with members post" in {
val member = new SocialMember("Alice")
member.post("Hello World")
val output = member.getPosts.map(_.messagePost)
output must be contain("Hello World (now)")
}
"return all posts" in {
val member = new SocialMember("Alice")
member.post("Hello World")
member.post("Hello Another World")
member.post("Hello Third World")
val output = member.getPosts.map(_.messagePost)
output must have length(3)
output must be contain("Hello World (now)")
output must be contain("Hello Another World (now)")
output must be contain("Hello Third World (now)")
}
"return ordered posts" in {
val member = new SocialMember("Alice")
member.postMessage(new SocialMessage("Alice","Hello World", DateTime.now.minusSeconds(10)))
member.postMessage(new SocialMessage("Alice","Hello Third World", DateTime.now.minusMinutes(10)))
member.postMessage(new SocialMessage("Alice","Hello Another World", DateTime.now.minusSeconds(50)))
val output = member.getPosts.map(_.messagePost)
output(0) must be equalTo("Hello World (10 seconds ago)")
output(1) must be equalTo("Hello Another World (50 seconds ago)")
output(2) must be equalTo("Hello Third World (10 minutes ago)")
}
"wall action responds with members and friends posts" in {
val member = new SocialMember("Alice")
val friend = new SocialMember("Peter")
member.follows(friend)
member.post("Hello World")
friend.post("Hello Sun")
val output = member.showWall.map(_.wallPost)
output must have length(2)
output must be contain("Alice - Hello World (now)")
output must be contain("Peter - Hello Sun (now)")
}
"wall responds with ordered members and friends posts" in {
val member = new SocialMember("Alice")
val friend = new SocialMember("Peter")
member.follows(friend)
member.postMessage(new SocialMessage("Alice","Hello World", DateTime.now.minusSeconds(10)))
member.postMessage(new SocialMessage("Alice","Hello Another World", DateTime.now.minusSeconds(50)))
member.postMessage(new SocialMessage("Alice","Hello Third World", DateTime.now.minusMinutes(10)))
friend.postMessage(new SocialMessage("Peter","Friend World", DateTime.now.minusSeconds(15)))
friend.postMessage(new SocialMessage("Peter","Friend Third World", DateTime.now.minusMinutes(9)))
val output = member.showWall.map(_.wallPost)
output must have length(5)
output(0) must be equalTo("Alice - Hello World (10 seconds ago)")
output(1) must be equalTo("Peter - Friend World (15 seconds ago)")
output(4) must be equalTo("Alice - Hello Third World (10 minutes ago)")
}
"return a lot of ordered wall posts" in {
val member = new SocialMember("Alice")
for (minute <- Seq.fill(50000)(Random.nextInt(50000))){
member.postMessage(
new SocialMessage(
"Alice",s"Hello World $minute",
DateTime.now.minusSeconds(minute)))
}
val before = DateTime.now
val output = member.showWall.map(_.wallPost)
val after = DateTime.now
val duration = new Duration(before,after)
output must not be empty
duration.getStandardSeconds() must beLessThan(3L)
}
}
}
| flurdy/socialcrowd | src/test/scala/model/SocialMemberSpec.scala | Scala | mit | 4,688 |
package spire.time.joda
import org.joda.time._
import spire.algebra._
import spire.math.Rational
import spire.std.any._
package object datetime
extends DateTimeInstances
package object days
extends DaysInstances
package object duration
extends DurationInstances
package object hours
extends HoursInstances
package object instant
extends InstantInstances
package object localdate
extends LocalDateInstances
package object localtime
extends LocalTimeInstances
package object minutes
extends MinutesInstances
package object months
extends MonthsInstances
package object seconds
extends SecondsInstances
package object weeks
extends WeeksInstances
package object years
extends YearsInstances
package object any
extends DateTimeInstances
with DaysInstances
with DurationInstances
with HoursInstances
with LocalDateInstances
with MinutesInstances
with MonthsInstances
with SecondsInstances
with WeeksInstances
with YearsInstances
trait DateTimeInstances {
implicit val dateTimeOrder =
Auto.order[DateTime]
implicit val dateTimeMetricSpace =
new MetricSpace[DateTime, Duration] {
def distance(t1: DateTime, t2: DateTime): Duration =
new Duration(t1, t2)
}
implicit val dateTimeTorsor =
new Torsor[DateTime, Duration] {
def actl(d: Duration, dt: DateTime): DateTime =
dt.plus(d)
def actr(dt: DateTime, d: Duration): DateTime =
dt.plus(d)
def diff(dt1: DateTime, dt2: DateTime): Duration =
new Period(dt1, dt2).toStandardDuration
}
}
trait DaysInstances {
implicit val daysOrder = Auto.order[Days]
implicit val daysAbGroup = Auto.abGroup[Days](Days.ZERO)
implicit lazy val daysModuleInt = Auto.module[Days](Days.ZERO)
}
trait DurationInstances extends LowPriorityDurationInstances {
implicit val durationOrder = Auto.order[Duration]
implicit val durationAbGroup = Auto.abGroup[Duration](Duration.ZERO)
implicit val durationModuleLong = new Module[Duration, Long] {
implicit val scalar: Rng[Long] = Rng[Long]
def zero: Duration = Duration.ZERO
def negate(v: Duration): Duration = new Duration(-v.getMillis)
def plus(v: Duration, w: Duration): Duration = v plus w
override def minus(v: Duration, w: Duration): Duration = v minus w
def timesl(r: Long, v: Duration): Duration = new Duration(v.getMillis * r)
}
}
trait LowPriorityDurationInstances {
val field = Field[Rational]
implicit val durationInnerProductSpaceRational = new InnerProductSpace[Duration, Rational] {
implicit val scalar: Field[Rational] = field
def zero: Duration = Duration.ZERO
def negate(v: Duration): Duration = new Duration(-v.getMillis)
def plus(v: Duration, w: Duration): Duration = v plus w
override def minus(v: Duration, w: Duration): Duration = v minus w
def timesl(r: Rational, v: Duration): Duration = new Duration(v.getMillis * r.toLong)
override def divr(v: Duration, r: Rational): Duration = new Duration((Rational(v.getMillis) / r).toLong)
def dot(v: Duration, w: Duration): Rational = Rational(v.getMillis) * Rational(w.getMillis)
}
}
trait HoursInstances {
implicit val hoursOrder = Auto.order[Hours]
implicit val hoursAbGroup = Auto.abGroup[Hours](Hours.ZERO)
implicit val hoursModuleInt = Auto.module[Hours](Hours.ZERO)
}
trait InstantInstances {
implicit val dateTimeOrder = Auto.order[Instant]
}
trait LocalDateInstances {
implicit val localDateOrder = Auto.order[LocalDate]
}
trait LocalTimeInstances {
implicit val localTimeOrder = Auto.order[LocalTime]
}
trait MinutesInstances {
implicit val minutesOrder = Auto.order[Minutes]
implicit val minutesAbGroup = Auto.abGroup[Minutes](Minutes.ZERO)
implicit val minutesModuleInt = Auto.module[Minutes](Minutes.ZERO)
}
trait MonthsInstances {
implicit val monthsOrder = Auto.order[Months]
implicit val monthsAbGroup = Auto.abGroup[Months](Months.ZERO)
implicit val monthsModuleInt = Auto.module[Months](Months.ZERO)
}
trait SecondsInstances {
implicit val secondsOrder = Auto.order[Seconds]
implicit val secondsAbGroup = Auto.abGroup[Seconds](Seconds.ZERO)
implicit val secondsModuleInt = Auto.module[Seconds](Seconds.ZERO)
}
trait WeeksInstances {
implicit val weeksOrder = Auto.order[Weeks]
implicit val weeksAbGroup = Auto.abGroup[Weeks](Weeks.ZERO)
implicit val weeksModuleInt = Auto.module[Weeks](Weeks.ZERO)
}
trait YearsInstances {
implicit val yearsOrder = Auto.order[Years]
implicit val yearsAbGroup = Auto.abGroup[Years](Years.ZERO)
implicit val yearsModuleInt = Auto.module[Years](Years.ZERO)
}
| non/spire-time | core/src/main/scala/spire/time/joda/package.scala | Scala | mit | 4,653 |
package lmxml
package markdown
package test
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
class MarkdownTest extends FlatSpec with ShouldMatchers {
val parser = new PlainLmxmlParser(2) with MarkdownParsing
val source = """html
head title "test"
body
md ```
I'm talking with _emphasis_!
Did you __hear__ me!?
- Work
- Fun
- Play
I want:
code
talk
```
"""
val expectedXml = """<p>I'm talking with <em>emphasis</em>!
</p><p>Did you <strong>hear</strong> me!?
</p><ul><li>Work
</li><li>Fun
</li><li>Play
</li></ul><p>I want:
</p><pre><code>code
talk
</code></pre>"""
"Markdown mixin" should "be able to parse mardown nodes" in {
val expected = Seq(
LmxmlNode("html", children = Seq(
LmxmlNode("head", children = Seq(
LmxmlNode("title", children = Seq(TextNode("test")))
)),
LmxmlNode("body", children = Seq(
TextNode(expectedXml.toString, true, Nil)
))
))
)
parser.parseNodes(source) should be === expected
}
"Markdown convert" should "convert markdown upon conversion" in {
val format = MarkdownConvert andThen XmlConvert andThen (_.toString)
val header = "<head><title>test</title></head>"
val expected = """<html>%s<body>%s</body></html>""".format(header, expectedXml)
DefaultLmxmlParser.fullParse(source)(format) should be === expected
}
}
| philcali/lmxml | markdown/src/test/scala/markdown.scala | Scala | mit | 1,412 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.zookeeper
import java.util.UUID
import akka.actor._
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import com.webtrends.harness.component.zookeeper.ZookeeperActor.GetSetWeightInterval
import com.webtrends.harness.component.zookeeper.discoverable.DiscoverableService.{MakeDiscoverable, QueryForInstances, UpdateWeight}
import com.webtrends.harness.component.zookeeper.mock.MockZookeeper
import org.apache.curator.test.TestingServer
import org.apache.curator.x.discovery.{ServiceInstance, UriSpec}
import org.specs2.mutable.SpecificationWithJUnit
import scala.concurrent.Await
import scala.concurrent.duration._
class ZookeeperServiceSpec
extends SpecificationWithJUnit {
val zkServer = new TestingServer()
implicit val system = ActorSystem("test", loadConfig)
val service = MockZookeeper(zkServer.getConnectString)
val zkActor = ZookeeperService.getZkActor.get
implicit val to = Timeout(5 seconds)
val awaitResultTimeout = 5000 milliseconds
sequential
"The zookeeper service" should {
"allow callers to create a node for a valid path" in {
val res = Await.result(service.createNode("/test", ephemeral = false, Some("data".getBytes)), awaitResultTimeout)
res shouldEqual "/test"
}
"allow callers to create a node for a valid namespace and path" in {
val res = Await.result(service.createNode("/namespacetest", ephemeral = false, Some("namespacedata".getBytes), Some("space")), awaitResultTimeout)
res shouldEqual "/namespacetest"
}
"allow callers to delete a node for a valid path" in {
val res = Await.result(service.createNode("/deleteTest", ephemeral = false, Some("data".getBytes)), awaitResultTimeout)
res shouldEqual "/deleteTest"
val res2 = Await.result(service.deleteNode("/deleteTest"), awaitResultTimeout)
res2 shouldEqual "/deleteTest"
}
"allow callers to delete a node for a valid namespace and path " in {
val res = Await.result(service.createNode("/deleteTest", ephemeral = false, Some("data".getBytes), Some("space")), awaitResultTimeout)
res shouldEqual "/deleteTest"
val res2 = Await.result(service.deleteNode("/deleteTest", Some("space")), awaitResultTimeout)
res2 shouldEqual "/deleteTest"
}
"allow callers to get data for a valid path " in {
val res = Await.result(service.getData("/test"), awaitResultTimeout)
new String(res) shouldEqual "data"
}
"allow callers to get data for a valid namespace and path " in {
val res = Await.result(service.getData("/namespacetest", Some("space")), awaitResultTimeout)
new String(res) shouldEqual "namespacedata"
}
" allow callers to get data for a valid path with a namespace" in {
val res = Await.result(service.getData("/namespacetest", Some("space")), awaitResultTimeout)
new String(res) shouldEqual "namespacedata"
}
" return an error when getting data for an invalid path " in {
Await.result(service.getData("/testbad"), awaitResultTimeout) must throwA[Exception]
}
" allow callers to get children with no data for a valid path " in {
Await.result(service.createNode("/test/child", ephemeral = false, None), awaitResultTimeout)
val res2 = Await.result(service.getChildren("/test"), awaitResultTimeout)
res2.head._1 shouldEqual "child"
res2.head._2 shouldEqual None
}
" allow callers to get children with data for a valid path " in {
Await.result(service.setData("/test/child", "data".getBytes), awaitResultTimeout)
val res2 = Await.result(service.getChildren("/test", includeData = true), awaitResultTimeout)
res2.head._1 shouldEqual "child"
res2.head._2.get shouldEqual "data".getBytes
}
" return an error when getting children for an invalid path " in {
Await.result(service.getChildren("/testbad"), awaitResultTimeout) must throwA[Exception]
}
"allow callers to discover commands " in {
val res = Await.result(zkActor ? MakeDiscoverable("base/path", "id", "testname", None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
res.asInstanceOf[Boolean] mustEqual true
}
"have default weight set to 0" in {
val basePath = "base/path"
val id = UUID.randomUUID().toString
val name = UUID.randomUUID().toString
Await.result(zkActor ? MakeDiscoverable(basePath, id, name, None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
val res2 = Await.result(zkActor ? QueryForInstances(basePath, name, Some(id)), awaitResultTimeout)
res2.asInstanceOf[ServiceInstance[WookieeServiceDetails]].getPayload.getWeight mustEqual 0
}
"update weight " in {
val basePath = "base/path"
val id = UUID.randomUUID().toString
val name = UUID.randomUUID().toString
Await.result(zkActor ? MakeDiscoverable(basePath, id, name, None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
Await.result(zkActor ? UpdateWeight(100, basePath, name, id, forceSet = false), awaitResultTimeout)
def result = {
val r = Await.result(zkActor ? QueryForInstances(basePath, name, Some(id)), awaitResultTimeout)
r.asInstanceOf[ServiceInstance[WookieeServiceDetails]]
}
result.getPayload.getWeight must be_==(100).eventually(2, 6 seconds)
}
"update weight in zookeeper right away if forceSet is true" in {
val basePath = "base/path"
val id = UUID.randomUUID().toString
val name = UUID.randomUUID().toString
Await.result(zkActor ? MakeDiscoverable(basePath, id, name, None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
Await.result(zkActor ? UpdateWeight(100, basePath, name, id, forceSet = true), awaitResultTimeout)
val res = Await.result(zkActor ? QueryForInstances(basePath, name, Some(id)), awaitResultTimeout).asInstanceOf[ServiceInstance[WookieeServiceDetails]]
res.getPayload.getWeight mustEqual 100
}
"not update weight in zookeeper right away if forceSet is false" in {
val basePath = "base/path"
val id = UUID.randomUUID().toString
val name = UUID.randomUUID().toString
Await.result(zkActor ? MakeDiscoverable(basePath, id, name, None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
Await.result(zkActor ? UpdateWeight(100, basePath, name, id, forceSet = false), awaitResultTimeout)
val res = Await.result(zkActor ? QueryForInstances(basePath, name, Some(id)), awaitResultTimeout).asInstanceOf[ServiceInstance[WookieeServiceDetails]]
res.getPayload.getWeight mustEqual 0
}
"update weight on a set interval " in {
val basePath = "base/path"
val id = UUID.randomUUID().toString
val name = UUID.randomUUID().toString
Await.result(zkActor ? MakeDiscoverable(basePath, id, name, None, 8080, new UriSpec("file://foo")), awaitResultTimeout)
Await.result(zkActor ? UpdateWeight(100, basePath, name, id, forceSet = false), awaitResultTimeout)
Thread.sleep(3000)
val res = Await.result(zkActor ? QueryForInstances(basePath, name, Some(id)), awaitResultTimeout).asInstanceOf[ServiceInstance[WookieeServiceDetails]]
res.getPayload.getWeight mustEqual 100
}
"use set weight interval defined in config" in {
Await.result(zkActor ? GetSetWeightInterval(), 3 second).asInstanceOf[Long] mustEqual 2
}
}
step {
TestKit.shutdownActorSystem(system)
zkServer.close()
}
def loadConfig: Config = {
ConfigFactory.parseString("""
discoverability {
set-weight-interval = 2s
}
wookiee-zookeeper {
quorum = "%s"
}
""".format(zkServer.getConnectString)
).withFallback(ConfigFactory.load()).resolve
}
}
| Webtrends/wookiee-zookeeper | src/test/scala/com/webtrends/harness/component/zookeeper/ZookeeperServiceSpec.scala | Scala | apache-2.0 | 8,604 |
/*
Copyright 2013 Ilya Lakhin (Илья Александрович Лахин)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package name.lakhin.eliah.projects
package papacarlo.test.utils
import scala.io.Source
import java.io.{FileWriter, File}
import net.liftweb.json.{NoTypeHints, Serialization}
final class Resources(
inputBase: String = Resources.DefaultResourceBase,
outputBase: String = Resources.DefaultResourceBase
) {
private implicit val formats = Serialization.formats(NoTypeHints)
def exist(category: String, name: String) =
getClass.getResource(inputBase + fileName(category, name)) != null
def input(category: String, name: String) : String =
try {
val filePath = "src/test/resources" + inputBase + fileName(category, name)
val textSource = scala.io.Source.fromFile(filePath)
val str = textSource.mkString
textSource.close
return str
} catch {
case _: java.io.FileNotFoundException => return ""
}
def update(category: String, name: String, content: String) {
try {
val resource = getClass.getResource(outputBase)
if (resource.getProtocol == "file") {
val file = new File(resource.getPath + fileName(category, name))
if (!file.exists()) {
val parent = file.getParentFile
if (!parent.exists()) parent.mkdirs()
file.createNewFile()
}
val writer = new FileWriter(file, false)
writer.write(content)
writer.close()
}
} catch {
case _: RuntimeException =>
}
}
def json[A](category: String, name: String)
(implicit mf : scala.reflect.Manifest[A]) =
Serialization.read[A](input(category, name))
private def fileName(category: String, name: String) = category + "/" + name
}
object Resources {
val DefaultResourceBase = "/fixtures/"
}
| Eliah-Lakhin/papa-carlo | src/test/scala/name.lakhin.eliah.projects/papacarlo/test/utils/Resources.scala | Scala | apache-2.0 | 2,370 |
/**
* _____ _ _ _____ _
* | __| |_ ___|_|___ ___| __|___| |_ _ ___ ___
* |__ | _| _| | | . |__ | . | | | | -_| _|
* |_____|_| |_| |_|_|_|_ |_____|___|_|\\_/|___|_|
* |___|
*
* File: ProgramSet.scala
* Author: Mikaël Mayer
* Date: 27.11.2013
* Purpose:Represent multiple programs in a single structure.
*/
package ch.epfl.lara.synthesis.stringsolver
import scala.collection.GenTraversableOnce
import scala.collection.mutable.PriorityQueue
import scala.collection.immutable.BitSet
import scala.collection.mutable.Queue
import scala.collection.mutable.ListBuffer
object ProgramSet {
import Program._
import scala.language._
import SubStrFlag._
import Weights._
/**
* Returns true if the identifier is used in this program.
*/
def uses(s: Any, w: Identifier): Boolean = s match {
case i: Identifier => i == w
case p: Product => p.productIterator.toList exists { case arg => uses(arg, w) }
case s: Set[_] => s exists { case arg => uses(arg, w) }
case _ => false
}
/**
* Implicit helpers.
*/
implicit class addCrossProduct[N](s: Set[N]) {
def x[M](t: Set[M]): Set[(N, M)] = for { x <- s; y <- t } yield (x, y)
}
implicit class addMappingTo[T](t: Set[T]) {
def ==>[A](w: T => Set[A]): Map[T, Set[A]] = (t.toList map { case el => el -> w(el).filterNot(_ == SEmpty)}).filterNot{case (key, value) => value.isEmpty}.toMap
}
implicit def combinations[T <: Program](s: List[ProgramSet[T]]): Stream[List[T]] = {
def rec(l: List[ProgramSet[T]], res: List[T]): Stream[List[T]] = l match {
case Nil => Stream(res.reverse)
case a::q => a flatMap { (prog: T) => rec(q, prog::res) }
}
rec(s, Nil)
}
def _2[A, B](t: Tuple2[A, B]) = t._2
def _1[A, B](t: Tuple2[A, B]) = t._1
/**
* Set of programs described in Programs.scala
*/
sealed trait ProgramSet[+A <: Program] extends Traversable[A] { self: Product =>
def foreach[T](f: A => T): Unit
def map[T](f: A => T): Stream[T]
def flatMap[T](f: A => GenTraversableOnce[T]) = map(f).flatten
private var cacheBest: Option[Any] = None
def takeBest: A = { if(cacheBest.isEmpty) cacheBest = Some(takeBestRaw); cacheBest.get.asInstanceOf[A]}
private var cacheNBest: Map[Int, Seq[(Int, Any)]] = Map()
def takeNBest(n: Int): Seq[(Int, A)] = { if(cacheNBest.isEmpty) cacheNBest += n-> takeNBestRaw(n: Int); cacheNBest(n).asInstanceOf[Seq[(Int, A)]]}
//def takeBestUsing(w: Identifier): A = takeBest
def takeBestRaw: A
def takeNBestRaw(n: Int): Seq[(Int, A)]
override def isEmpty: Boolean = this == SEmpty || ProgramSet.sizePrograms(this) == 0
def sizePrograms = ProgramSet.sizePrograms(this)
override def toIterable: Iterable[A] = map((i: A) =>i)
override def toString = this.getClass().getName().replaceAll(".*\\\\$","")+"("+self.productIterator.mkString(",")+")"
var weightMalus = 0
def examplePosition = 0 // Set only in SDag
}
def weighted[A <: Program](p: A): (Int, A) = (weight(p), p)
/**
* Set of switch expressions described in Programs.scala
*/
case class SSwitch(s: List[(Bool, STraceExpr)]) extends ProgramSet[Switch] {
def map[T](f: Switch => T): Stream[T] = {
for(t <- combinations(s map _2)) yield f(Switch(s map _1 zip t))
}
def foreach[T](f: Switch =>T): Unit = {
for(t <- combinations(s map _2)) f(Switch(s map _1 zip t))
}
def takeBestRaw = Switch((s map _1) zip ((s map _2) map (_.takeBest)))
def takeNBestRaw(n: Int) = StreamUtils.cartesianProduct(s.map(_2).map(_.takeNBestRaw(n).toStream)).map{ x=>
val (scores, progs) = x.unzip
(scores.sum, Switch(s map _1 zip progs))
}.sortBy(_1).take(n)
//override def takeBestUsing(w: Identifier) = Switch((s map _1) zip ((s map _2) map (_.takeBestUsing(w))))
}
/**
* Set of concatenate expressions described in Programs.scala
*/
type STraceExpr = ProgramSet[TraceExpr]
case class SDag[Node](ñ: Set[Node], ns: Node, nt: Node, ξ: Set[(Node, Node)], W: Map[(Node, Node), Set[SAtomicExpr]]) extends STraceExpr {
def foreach[T](f: TraceExpr => T): Unit = {
def rec(from: Node, path: List[AtomicExpr]): Unit = {
if(from == nt) f(Concatenate(path.reverse)) else
for(edge <- ξ; (n1, n2) = edge; if n1 == from; sa <- W(edge); a <- sa) {
rec(n2, a::path)
}
}
rec(ns, Nil)
}
def map[T](f: TraceExpr => T): Stream[T] = {
def rec(from: Node, path: List[AtomicExpr]): Stream[List[AtomicExpr]] = {
if(from == nt) Stream(path.reverse) else
for(edge <- ξ.toStream; (n1, n2) = edge; if n1 == from; sa <- W(edge); a: AtomicExpr <- sa; p <- rec(n2, a::path)) yield p
}
rec(ns, Nil).map(e => f(Concatenate(e)))
}
def neighbors(n: Node, n_weight: Int): Set[(Int, AtomicExpr, Node)] = {
for(e <- ξ if e._1 == n;
versions = W.getOrElse(e, Set.empty);
atomic <- versions.map(_.takeBest).toList.sortBy(w => weight(w)).headOption) yield {
(-weight(atomic) + n_weight, atomic, e._2)
}
}
def takeBestRaw = {
var minProg = Map[Node, List[AtomicExpr]]()
var nodesToVisit = new PriorityQueue[(Int, List[AtomicExpr], Node)]()(Ordering.by[(Int, List[AtomicExpr], Node), Int](e => e._1))
nodesToVisit.enqueue((0, Nil, ns))
while(!(minProg contains nt) && !nodesToVisit.isEmpty) {
val (weight, path, node) = nodesToVisit.dequeue() // Takes the first node with the minimal path.
minProg += node -> path
for(e@(newweight, newAtomic, newNode) <- neighbors(node, weight)) {
nodesToVisit.find{ case (w, p, n) => n == newNode } match {
case Some((w, p, n)) => // New node already in nodes to visit.
if(newweight > w && !(minProg contains newNode)) {
nodesToVisit = nodesToVisit.filterNot{case (w, p, n) => n == newNode}
nodesToVisit.enqueue((newweight, path.asInstanceOf[List[AtomicExpr]] ++ List(newAtomic).asInstanceOf[List[AtomicExpr]], newNode))
} // Else we do nothing.
case None =>
nodesToVisit.enqueue((newweight, path.asInstanceOf[List[AtomicExpr]] ++ List(newAtomic).asInstanceOf[List[AtomicExpr]], newNode))
}
}
}
Concatenate(minProg(nt))//TODO : alternative.
}
def Nneighbors(quantity: Int, n: Node, prev_weight: Int): Option[(Seq[(Int, AtomicExpr)], Node)] = {
ξ.collectFirst[(Node, Node)]{ case e@(start, end) if start == n => e } map { e =>
val versions = W.getOrElse(e, Set.empty)
val possibilities = for(atomic <- versions.flatMap(_.takeNBest(quantity)).toList.sortBy(_1).take(quantity)) yield {
(-atomic._1 + prev_weight, atomic._2)
}
(possibilities, e._2)
}
}
def takeNBestRaw(quantity: Int): Seq[(Int, TraceExpr)] = {
var minProg = Map[Node, Seq[(Int, List[AtomicExpr])]]()
var nodesToVisit = new PriorityQueue[((Int, List[AtomicExpr]), Node)]()(Ordering.by[((Int, List[AtomicExpr]), Node), Int](e => e._1._1))
nodesToVisit.enqueue(((0, Nil), ns))
while(!(minProg.getOrElse(nt, Nil).length >= quantity) && !nodesToVisit.isEmpty) {
val ((weight, path), node) = nodesToVisit.dequeue() // Takes the first node with the minimal path.
minProg += node -> (((weight, path)) +: minProg.getOrElse(node, Nil))
for(e@(newWeightsAtomics, newNode) <- Nneighbors(quantity, node, weight)) {
// (newweight, newAtomic
for((newweight, newAtomic) <- newWeightsAtomics)
{
val alreadyLookingFor = nodesToVisit.toStream.filter{ case ((w, p), n) => n == newNode }
//val shouldBeAdded = alreadyLookingFor.lengthCompare(quantity) < 0 || newweight > alreadyLookingFor(quantity - 1)._1._1
//if(shouldBeAdded) { // We keep only the best quantity.
nodesToVisit.enqueue(((newweight, path ++ List[AtomicExpr](newAtomic)), newNode))
var i = 0
nodesToVisit = nodesToVisit.filterNot {
i += 1
_._2 == newNode && i >= quantity // We still keep the first quantity best, we remove the rest.
}
//}
}
}
}
minProg(nt).map(x => (x._1, Concatenate(x._2)))
}
/*def neighborsUsing(n: Node, n_weight: Int, w: Identifier): Set[(Int, AtomicExpr, Node)] = {
for(e <- ξ if e._1 == n;
versions = W.getOrElse(e, Set.empty);
atomic <- versions.map(_.takeBest).toList.sortBy(w => weight(w)).headOption) yield {
(-weight(atomic) + n_weight, atomic, e._2)
}
}
override def takeBestUsing(w: Identifier) = {
var minProg = Map[Node, List[AtomicExpr]]()
var weights = Map[Node, Int]()
var nodesToVisit = new PriorityQueue[(Int, List[AtomicExpr], Node)]()(Ordering.by[(Int, List[AtomicExpr], Node), Int](e => e._1))
nodesToVisit.enqueue((0, Nil, ns))
while(!(minProg contains nt) && !nodesToVisit.isEmpty) {
val (weight, path, node) = nodesToVisit.dequeue() // Takes the first node with the minimal path.
minProg += node -> path
for(e@(newweight, newAtomic, newNode) <- neighbors(node, weight)) {
nodesToVisit.find{ case (w, p, n) => n == newNode } match {
case Some((w, p, n)) => // New node already in nodes to visit.
if(newweight > w && !(minProg contains newNode)) {
nodesToVisit = nodesToVisit.filterNot{case (w, p, n) => n == newNode}
nodesToVisit.enqueue((newweight, path.asInstanceOf[List[AtomicExpr]] ++ List(newAtomic).asInstanceOf[List[AtomicExpr]], newNode))
} // Else we do nothing.
case None =>
nodesToVisit.enqueue((newweight, path.asInstanceOf[List[AtomicExpr]] ++ List(newAtomic).asInstanceOf[List[AtomicExpr]], newNode))
}
}
}
Concatenate(minProg(nt))//TODO : alternative.
}*/
def reduce: SDag[Int] = {
val nodeMapping = ñ.toList.sortBy({ case (a: Int,b: Int) => a+b case _ => 1 }).zipWithIndex.toMap
var ñ2 = nodeMapping.values.toSet
val ns2 = nodeMapping(ns)
val nt2 = nodeMapping(nt)
var ξ2 = ξ map { case (n, m) => (nodeMapping.getOrElse(n, -1), nodeMapping.getOrElse(m, -1))} filterNot {
case (e1, e2) => e1 == -1 || e2 == -1
}
var finished = false
while(!finished) { // Remove non reachable nodes
finished = true
val uselessNodes = ñ2 filter { n =>
n != ns2 && !(ξ2 exists { case (n1, n2) => n2 == n}) ||
n != nt2 && !(ξ2 exists { case (n1, n2) => n1 == n})
}
if(!uselessNodes.isEmpty) {
ñ2 = ñ2 -- uselessNodes
ξ2 = ξ2 filterNot { case (n1, n2) => (uselessNodes contains n1) || (uselessNodes contains n2) }
finished = false
}
}
val W2 = for(((e1, e2), v) <- W;
edge = (nodeMapping.getOrElse(e1, -1), nodeMapping.getOrElse(e2, -1));
if ξ2 contains edge)
yield (edge -> v)
SDag(ñ2, ns2, nt2, ξ2, W2)
}
var mIndex = 0
override def examplePosition = mIndex
def examplePosition_=(i: Int) = mIndex = i
def setIndex(i: Int): this.type = { examplePosition = i; this }
}
type SAtomicExpr = ProgramSet[AtomicExpr]
/**
* Set of Loop expressions described in Programs.scala
*/
case class SLoop(i: Identifier, e: STraceExpr, separator: Option[ConstStr]) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
for(prog: TraceExpr <- e.toStream) yield f(Loop(i, prog, separator))
}
def foreach[T](f: AtomicExpr => T): Unit = {
for(prog <- e) f(Loop(i, prog, separator))
}
def takeBestRaw = Loop(i, e.takeBest, separator)//.withAlternative(this.toIterable)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = {
e.takeNBest(n).map(x => weighted(Loop(i, x._2, separator)))
}
}
/**
* Set of SubStr expressions described in Programs.scala
*/
case class SSubStr(vi: StringVariable, p1: Set[SPosition], p2: Set[SPosition], methods: SSubStrFlag) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
for(pp1 <- p1.toStream; ppp1: Position <- pp1; pp2 <- p2; ppp2: Position <- pp2; method: SubStrFlag <- methods) yield f(SubStr(vi, ppp1, ppp2, method))
}
def foreach[T](f: AtomicExpr => T): Unit = {
for(pp1 <- p1; ppp1 <- pp1; pp2 <- p2; ppp2 <- pp2; method <- methods) f(SubStr(vi, ppp1, ppp2, method))
}
def takeBestRaw = SubStr(vi, p1.toList.map(_.takeBest).sortBy(weight(_)(true)).head, p2.toList.map(_.takeBest).sortBy(weight(_)(false)).head.withWeightMalus(this.weightMalus), methods.takeBest)//.withAlternative(this.toIterable)
private var corresponding_string: (String, String, Int, Int) = ("", "", 0, -1)
def setPos(from: String, s: String, start: Int, end: Int) = corresponding_string = (from, s, start, end)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = {
val left = p1.flatMap(_.takeNBest(n)).toSeq.sortBy(_._1).take(n)
val right = p2.flatMap(_.takeNBest(n)).toSeq.sortBy(_._1).take(n)
val method = methods.takeNBest(n)
StreamUtils.cartesianProduct(Seq(left.toStream, right.toStream, method.toStream)).take(n).map{
case Seq((leftScore, l), (rightScore, r), (mScore, m)) => weighted(SubStr(vi, l.asInstanceOf[Position], r.asInstanceOf[Position], m.asInstanceOf[SubStrFlag]))
} take n
}
}
def isCommonSeparator(s: String) = s match {
case "," | "/" | " " | "-"| "#" | ";" | ", " | "; " | "\\t" | " " | ". " | "." | ":" | "|" | "_" | ", " | "; " => true
case _ => false
}
/**
* Applies special conversion to a string
*/
case class SSpecialConversion(s: SSubStr, converters: Set[SpecialConverter]) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
for(ss <- s.toStream; converter <- converters) yield f(SpecialConversion(ss.asInstanceOf[SubStr], converter))
}
def foreach[T](f: AtomicExpr => T): Unit = {
for(ss <- s.toStream; converter <- converters) f(SpecialConversion(ss.asInstanceOf[SubStr], converter))
}
def takeBestRaw = SpecialConversion(s.takeBest.asInstanceOf[SubStr], converters.toList.sortBy(weight(_)(true)).head)
private var corresponding_string: (String, String, Int, Int) = ("", "", 0, -1)
def setPos(from: String, s: String, start: Int, end: Int) = corresponding_string = (from, s, start, end)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = {
val sbest = s.takeNBest(n).toStream
val converterBest = converters.toList.map(x => (-weight(x)(true), x)).sortBy(_1).take(n).toStream
StreamUtils.cartesianProduct(Seq(sbest, converterBest)).take(n).map {
case Seq((sScore, s), (convScore, converter)) => weighted(SpecialConversion(s.asInstanceOf[SubStr], converter.asInstanceOf[SpecialConverter]))
}
}
}
/**
* Sets of integers for number decomposition.
* The best one is the greatest one in this implementation
*/
type SInt = ProgramSet[IntLiteral]
case class SIntSemiLinearSet(start: Int, step: Int, max: Int) extends SInt {
def map[T](f: IntLiteral => T): Stream[T] = {
if(step > 0)
for(i <- start to max by step toStream) yield f(i)
else if(start <= max)
Stream(f(start))
else Stream.empty
}
def foreach[T](f: IntLiteral => T): Unit = {
if(step > 0)
for(i <- start to max by step toStream) f(i)
else if(start <= max)
Stream(f(start))
else Stream.empty
}
def takeBestRaw = if(step == 0) IntLiteral(start) else IntLiteral(start+step*((max-start)/step))
def apply(elem: Int): Boolean = elem >= start && elem <= max && (step == 0 && start == elem || step != 0 && (elem-start)%step == 0)
override def takeNBestRaw(n: Int): Seq[(Int, IntLiteral)] = {
if(step == 0) Seq((0, IntLiteral(start))) else {
(start to max by step).reverse.zipWithIndex.map{ x => weighted(IntLiteral(x._1))} take n
}
}
}
/*case class SAnyInt(default: Int) extends SInt {
def map[T](f: IntLiteral => T): Stream[T] = {
Stream(f(default))
}
def foreach[T](f: IntLiteral => T): Unit = {f(default)}
def takeBestRaw = IntLiteral(default)
}*/
/**
* Used to match any program on this string variable
* Useful to intersect with working sub-expressions.
*/
/*case class SAny(vi: PrevStringNumber) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
Stream(f(SubStr2(vi, NumTok, 1)))
}
def foreach[T](f: AtomicExpr => T): Unit = {
f(SubStr2(vi, NumTok, 1))
}
def takeBestRaw = SubStr2(vi, NumTok, 1)
}*/
/**
* Set of SubStr expressions described in Programs.scala
*/
case class SNumber(a: SAtomicExpr, length: SInt, offset: Int) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
for(pp1: AtomicExpr <- a.toStream; l: IntLiteral <- length) yield f(NumberMap(pp1.asInstanceOf[SubStr], l.k, offset))
}
def foreach[T](f: AtomicExpr => T): Unit = {
for(pp1: AtomicExpr <- a; l <- length) f(NumberMap(pp1.asInstanceOf[SubStr], l.k, offset))
}
def takeBestRaw = NumberMap(a.takeBest.asInstanceOf[SubStr], length.takeBest.k, offset)//.withAlternative(this.toIterable)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = {
StreamUtils.cartesianProduct(Seq(a.takeNBest(n).toStream, length.takeNBest(n).toStream)).take(n) map {
case Seq((aScore, a), (lengthScore, length)) => weighted(NumberMap(a.asInstanceOf[SubStr], length.asInstanceOf[IntLiteral].k, offset))
}
}
}
/**
* Creates a counter set from a number and its position
*/
object SCounter {
//
def fromExample(number: String, position: Int): SCounter = {
val numberValue = number.toInt
val possibleLengths = (if(number(0) != '0' && position != 0) {// It means that the generated length might be lower.
// Except if the position is the first one, because counters are increasing.
SIntSemiLinearSet(1, 1, number.length)
} else SIntSemiLinearSet(number.length, 1, number.length))
val possibleStarts = if(position == 0) {
SIntSemiLinearSet(numberValue, 1, numberValue)
} else {
SIntSemiLinearSet(numberValue % position, position, numberValue)
}
SCounter(possibleLengths, possibleStarts, numberValue, position)
}
}
/**
* Step = (index - start) / count if the division is applicable
* Except if count = 0, step can be anything from 1 to infinity.
*/
case class SCounter(length: SInt, starts: SInt, index: Int, count: Int) extends SAtomicExpr {
assert(length != SEmpty)
assert(starts != SEmpty)
def map[T](f: AtomicExpr => T): Stream[T] = {
for(l <- length.toStream; s: IntLiteral <- starts; step <- if(count == 0) Stream.from(1) else List((index - s.k)/count)) yield f(Counter(l.k, s.k, step))
}
def foreach[T](f: AtomicExpr => T): Unit = {
for(l <- length.toStream; s: IntLiteral <- starts; step <- if(count == 0) Stream.from(1) else List((index - s.k)/count)) f(Counter(l.k, s.k, step))
}
def takeBestRaw = Counter(length.takeBest.k, starts.takeBest.k, if(count == 0) 1 else (index - starts.takeBest.k)/count)//.withAlternative(this.toIterable)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = {
StreamUtils.cartesianProduct(Seq(length.takeNBest(n).toStream, starts.takeNBest(n).toStream)).take(n) map {
case Seq((lengthScore, length), (startsScore, start)) =>
weighted(Counter(length.k, start.k, if(count == 0) 1 else (index - start.k)/count))
}
}
}
/**
* Set of Constant string expressions described in Programs.scala
*/
case class SConstStr(s: String) extends SAtomicExpr {
def map[T](f: AtomicExpr => T): Stream[T] = {
Stream(f(ConstStr(s)))
}
def foreach[T](f: AtomicExpr => T): Unit = {
f(ConstStr(s))
}
def takeBestRaw = ConstStr(s)
override def takeNBestRaw(n: Int): Seq[(Int, AtomicExpr)] = Seq(weighted(takeBest))
}
type SPosition = ProgramSet[Position]
/**
* Set of Constant positions described in Programs.scala
*/
case class SCPos(k: Int) extends SPosition {
def map[T](f: Position => T): Stream[T] = {
Stream(f(CPos(k)))
}
def foreach[T](f: Position => T): Unit = {
f(CPos(k))
}
def takeBestRaw = CPos(k)
override def takeNBestRaw(n: Int): Seq[(Int, Position)] = Seq(weighted(takeBest))
}
/**
* Set of regexp positions described in Programs.scala
*/
case class SPos(r1: SRegExp, r2: SRegExp, c: SIntegerExpr) extends SPosition {
def map[T](f: Position => T): Stream[T] = {
for(rr1: RegExp <- r1.toStream; rr2: RegExp <- r2; cc <- c) yield f(Pos(rr1, rr2, cc))
}
def foreach[T](f: Position => T): Unit = {
for(rr1 <- r1; rr2 <- r2; cc <- c) f(Pos(rr1, rr2, cc))
}
def takeBestRaw = Pos(r1.takeBest, r2.takeBest, c.toList.sortBy(weight).head)
//var index = 0 // Index at which this position was computed
override def takeNBestRaw(n: Int): Seq[(Int, Position)] = {
StreamUtils.cartesianProduct(Seq(
r1.takeNBest(n).toStream,
r2.takeNBest(n).toStream,
c.toList.sortBy(weight).toStream)) map {
case Seq((_, rr1), (_, rr2), cc: IntLiteral) => weighted(Pos(rr1.asInstanceOf[RegExp], rr2.asInstanceOf[RegExp], cc.k))
}
}
}
type SRegExp = ProgramSet[RegExp]
/**
* Set of regexp described in Programs.scala
*/
case class STokenSeq(s: List[SToken]) extends SRegExp {
assert(s forall (_.sizePrograms != 0))
def map[T](f: RegExp => T): Stream[T] = {
for(t <- combinations(s)) yield f(TokenSeq(t))
}
def foreach[T](f: RegExp =>T): Unit = {
for(t <- combinations(s)) f(TokenSeq(t))
}
def takeBestRaw = TokenSeq(s map (_.takeBest))
override def takeNBestRaw(n: Int): Seq[(Int, RegExp)] = {
StreamUtils.cartesianProduct(s.map(_.takeNBest(n).toStream)).take(n) map {
x => weighted(TokenSeq(x.map(_2)))
}
}
}
/**
* Empty set for everything
*/
case object SEmpty extends ProgramSet[Nothing] with Iterable[Nothing] {
def map[T](f: Nothing => T): Stream[T] = ???
override def foreach[T](f: Nothing => T): Unit = ???
def takeBestRaw = throw new Error("No program found")
def iterator = Nil.toIterator
override def toIterable = Nil
override def isEmpty = true
override def takeNBestRaw(n: Int): Seq[(Int, Nothing)] = Seq()
}
type SIntegerExpr = Set[IntegerExpr]
object SToken {
def apply(t: Token*)(l: List[Token]): SToken = SToken(t.toList)(l)
def apply(s: Traversable[Token])(l: List[Token]): SToken = {
assert(s forall (i => l.indexOf(i) >= 0))
if(s.size == 1) {
val i = l.indexOf(s.head)
SToken(1L << i)(l)
} else if(s.size == 2) {
val mask = s.toList.map(i => 1L << l.indexOf(i)).reduce(_ | _)
SToken(mask)(l)
} else {
val (newMask, _) = ((0L, 1L) /: l) { case ((res, inc), token) => if(s exists (_ == token)) (res + inc, inc << 1) else (res, inc << 1)}
SToken(newMask)(l)
}
}
}
/**
* Set of tokens represented as a 1 at position i in binary format if the token is in the set, 0 otherwise
* // Works if there are less than 64 tokens.
*/
case class SToken(mask: Long)(val l: List[Token]) extends ProgramSet[Token] {
def intersect(other: SToken): SToken = {
if(other.l eq l) {
val intersection_mask = mask & other.mask
SToken(intersection_mask)(l)
} else {
val intersection_list = (l ++ other.l).distinct
val tokens1 = toIterable.toSet
val tokens2 = other.toIterable.toSet
val tokens = tokens1 intersect tokens2
val (newMask, _) = ((0L, 1L) /: intersection_list) { case ((res, inc), token) => if(tokens(token)) (res + inc, inc << 1) else (res, inc << 1)}
SToken(newMask)(intersection_list)
}
}
override def sizePrograms = java.lang.Long.bitCount(mask)
def map[T](f: Token => T): Stream[T] = {
def rec(m: Long, l: List[Token]): Stream[T] = l match {
case Nil => Stream.empty
case a::b if (m & 1) != 0 => f(a) #:: rec(m >> 1, b)
case a::b => rec(m >> 1, b)
}
rec(mask, l)
}
override def foreach[T](f: Token =>T): Unit = {
def rec(m: Long, l: List[Token]): Unit = l match {
case Nil =>
case a::b if (m & 1) != 0 => f(a); rec(m >> 1, b)
case a::b => rec(m >> 1, b)
}
rec(mask, l)
}
override def isEmpty = size == 0
def takeBestRaw = map((i: Token) => i).toList.sortBy(weight).head
def contains(t: Token): Boolean = ((1L << l.indexOf(t)) & mask) != 0
override def toString = "SToken("+this.toList.mkString(",")+")"
override def takeNBestRaw(n: Int): Seq[(Int, Token)] = {
map((i: Token) => i).toList.sortBy(weight).take(n).map(weighted)
}
}
/**
* Constructor for set of flags for SSubStr
*/
object SSubStrFlag {
def apply(s: Traversable[SubStrFlag]): SSubStrFlag = {
//assert(s forall (i => s.indexOf(i) >= 0))
val l = SubStrFlag.registered
if(s.size == 1) {
val i = l.indexOf(s.head)
SSubStrFlag(1L << i)
} else if(s.size == 2) {
val mask = s.toList.map(i => 1L << l.indexOf(i)).reduce(_ | _)
SSubStrFlag(mask)
} else {
val (newMask, _) = ((0L, 1L) /: l) { case ((res, inc), token) => if(s exists (_ == token)) (res + inc, inc << 1) else (res, inc << 1)}
SSubStrFlag(newMask)
}
}
}
case class SSubStrFlag(mask: Long) extends ProgramSet[SubStrFlag] with Traversable[SubStrFlag] {
def intersect(other: SSubStrFlag): SSubStrFlag = if(other.mask == mask) this else SSubStrFlag(other.mask & mask)
override def sizePrograms = java.lang.Long.bitCount(mask)
def map[T](f: SubStrFlag => T): Stream[T] = {
def rec(m: Long, id: Int = 0): Stream[T] = if(m == 0) Stream.empty else if((m & 1) == 1) f(SubStrFlag(id)) #:: rec(m >> 1, id + 1) else rec(m >> 1, id + 1)
rec(mask)
}
override def foreach[T](f: SubStrFlag =>T): Unit = {
def rec(m: Long, id: Int = 0): Unit = if(m == 0) Stream.empty else if((m & 1) == 1) { f(SubStrFlag(id)) ; rec(m >> 1, id + 1)} else rec(m >> 1, id + 1)
rec(mask)
}
override def isEmpty = mask == 0
def takeBestRaw = map((i: SubStrFlag) => i).toList.sortBy(weight).head
override def toString = "SSubStrFlag("+this.toList.mkString(",")+")"
override def takeNBestRaw(n: Int): Seq[(Int, SubStrFlag)] = {
map((i: SubStrFlag) => i).toList.sortBy(weight).take(n).map(weighted)
}
}
case class IntersectParam(unify: Option[Identifier], index1: Int, index2: Int, iterateInput: Boolean = true, useIndexForPosition: Boolean = false) {
var timeout = false
}
/**
* Intersection function
*/
def intersect(ss: Set[SAtomicExpr], tt: Set[SAtomicExpr])(implicit unify: IntersectParam): Set[SAtomicExpr] = {
for(s <- ss; t <- tt; r <- result(intersectAtomicExpr(s, t))) yield r
}
def result[T <: Program](a: ProgramSet[T]): Option[ProgramSet[T]] = if(sizePrograms(a)==0) None else Some(a)
def intersect(p1: STraceExpr, p2: STraceExpr)(implicit unify: IntersectParam = IntersectParam(None, 0, 0)): STraceExpr = (p1, p2) match {
case (p1: SDag[_], p2: SDag[_]) =>
intersectDag(p1, p2)
case _ => SEmpty
}
def intersectDag[Node1, Node2, Node3](p1: SDag[Node1], p2: SDag[Node2])(implicit unify: IntersectParam): STraceExpr = (p1, p2) match {
case (s1@SDag(ñ1, n1s, n1t, ξ1, w1),
s2@SDag(ñ2, n2s, n2t, ξ2, w2)) =>
//println(s"Intersecting two dags of size: ${s1.ñ.size} and ${s2.ñ.size}")
//println("computing edges...")
val W12f = { (arg : ((Node1, Node2), (Node1, Node2))) => arg match { case ((n1, n2), (np1, np2)) =>
for(f1 <- w1(n1, np1) if !unify.timeout; f2 <- w2(n2, np2)) yield {
intersectAtomicExpr(f1, f2)
}
}}
var W12 = Map[((Node1, Node2), (Node1, Node2)), Set[SAtomicExpr]]()
var edges = Set[((Node1, Node2), (Node1, Node2))]()
var nodesVisited = Set[(Node1, Node2)]()
var nodesToVisit = Queue[(Node1, Node2)]((n1s, n2s))
var nodesToVisitEnd = Queue[(Node1, Node2)]((n1t, n2t))
//println("Grouping edges...")
val edgeMap11 = ξ1.groupBy(_1)
val edgeMap12 = ξ1.groupBy(_2)
val edgeMap21 = ξ2.groupBy(_1)
val edgeMap22 = ξ2.groupBy(_2)
//println("Gathering edges...")
val edgeMap = new {
def getOrElse(n1n2: (Node1, Node2), orElse: Iterable[((Node1, Node2), (Node1, Node2))]) = {
for((_, n12) <- edgeMap11.getOrElse(n1n2._1, Set.empty).iterator; (_, n22) <- edgeMap21.getOrElse(n1n2._2, Set.empty))
yield (n1n2, (n12, n22))
}
}
val edgeMapEnd = new {
def getOrElse(n1n2: (Node1, Node2), orElse: Iterable[((Node1, Node2), (Node1, Node2))]) = {
for((n12, _) <- edgeMap12.getOrElse(n1n2._1, Set.empty).iterator; (n22, _) <- edgeMap22.getOrElse(n1n2._2, Set.empty))
yield ((n12, n22), n1n2)
}
}
var i = 1;
var emptyEdges = Set[((Node1, Node2), (Node1, Node2))]()
// Alternate between nodes to visit on the end and on the start.
while(!(nodesToVisitEnd.isEmpty || nodesToVisit.isEmpty) && !unify.timeout) {
//println(s"Level $i - starting edges (${nodesToVisit.length} to visit)")
//println(s"Nodes to visit start: ${nodesToVisit.size}", s"Nodes to visit end: ${nodesToVisitEnd.size}")
val nFirst = nodesToVisit.dequeue()
nodesVisited += nFirst
for(newEdge <- edgeMap.getOrElse(nFirst, Set.empty) if !(W12 contains newEdge) && !(emptyEdges(newEdge));
e = newEdge._2) {
val res = W12f(newEdge).filterNot(_ == SEmpty)
if(!res.isEmpty) {
//println(s"Found expression for edge $newEdge")
edges += newEdge
W12 += newEdge -> res
if(!(nodesVisited contains e) && !(nodesToVisit contains e))
nodesToVisit.enqueue(e)
} else {
emptyEdges += newEdge
}
}
//println(s"Level $i - ending edges (${nodesToVisitEnd.length} to visit)")
val nLast = nodesToVisitEnd.dequeue()
nodesVisited += nLast
for(newEdge <- edgeMapEnd.getOrElse(nLast, Set.empty) if !(W12 contains newEdge) && !(emptyEdges(newEdge));
e = newEdge._1) {
val res = W12f(newEdge).filterNot(_ == SEmpty)
if(!res.isEmpty) {
//println(s"Found expression for edge $newEdge")
edges += newEdge
W12 += newEdge -> res
if(!(nodesVisited contains e) && !(nodesToVisitEnd contains e))
nodesToVisitEnd.enqueue(e)
} else {
emptyEdges += newEdge
}
}
i += 1
}
val ñ = nodesVisited ++ (nodesToVisitEnd intersect nodesToVisit)
val ξ12final = edges
if(!nodesVisited((n1t, n2t))) SEmpty else
if(ξ12final.size != 0) {
val res = SDag[(Node1, Node2)](ñ, (n1s, n2s), (n1t, n2t), ξ12final, W12).reduce
if(sizeDag(res) == 0) SEmpty else res
} else SEmpty
}
def notEmpty[T <: Program](a: ProgramSet[T]): Option[ProgramSet[T]] = if(a == SEmpty) None else Some(a)
def intersectAtomicExpr(a: SAtomicExpr, b: SAtomicExpr)(implicit unify: IntersectParam = IntersectParam(None, 0, 0)): SAtomicExpr = if(a eq b) a else ((a, b) match {
case (SSpecialConversion(a, b), SSpecialConversion(c, d)) =>
val ss = intersectAtomicExpr(a, c)
if(sizePrograms(ss) > 0) {
val i = b intersect d
if(i.size > 0) {
SSpecialConversion(ss.asInstanceOf[SSubStr], i)
} else SEmpty
} else SEmpty
case (SLoop(i1, e1, sep1), SLoop(i2, e2, sep2)) if sep1 == sep2 =>
val be2 = replaceSTraceExpr(e2){ case l@Linear(a, i, b) => if(i == i2) Linear(a, i1, b) else l }
val intersectBody = intersect(e1, be2)
if(!intersectBody.isEmpty) {
SLoop(i1, intersectBody, sep1)
} else SEmpty
case (SConstStr(aa), SConstStr(bb)) if aa == bb => a
//case (SConstStr(aa), SConstStr(bb)) if aa.isNumber == bb.isNumber => a
case (SSubStr(InputString(vi@IntLiteral(i)), pj, pk, m1), SSubStr(InputString(vj@IntLiteral(j)), pl, pm, m2)) =>
if(i == j || (unify.unify.isDefined && ((i == j + 1) || (i == j - 1)) && unify.iterateInput)) {
val mm = m1 intersect m2
if(mm.isEmpty) SEmpty else {
val pp1 = (for(p1 <- pj; p2 <- pl; res <- notEmpty(intersectPos(p1, p2))) yield res)
if(pp1.isEmpty) SEmpty else {
val pp2 = (for(p1 <- pk; p2 <- pm; res <- notEmpty(intersectPos(p1, p2))) yield res)
if(pp2.isEmpty) SEmpty else {
if(i == j) SSubStr(InputString(vi), pp1, pp2, mm)
else if(i == j - 1 && unify.unify.isDefined) SSubStr(InputString(Linear(1, unify.unify.get, i)), pp1, pp2, mm)
else if(i == j + 1 && unify.unify.isDefined) SSubStr(InputString(Linear(1, unify.unify.get, j)), pp1, pp2, mm)
else SEmpty
}
}
}
} else SEmpty
case (SSubStr(InputString(vi: Linear), pj, pk, m1), SSubStr(InputString(vj: Linear), pl, pm, m2)) =>
if(vi == vj) {
val mm = m1 intersect m2
val pp1 = (for(p1 <- pj; p2 <- pl; res <- notEmpty(intersectPos(p1, p2))) yield res)
val pp2 = (for(p1 <- pk; p2 <- pm; res <- notEmpty(intersectPos(p1, p2))) yield res)
if(pp1.isEmpty || pp2.isEmpty || mm.isEmpty) SEmpty else {
SSubStr(InputString(vi), pp1, pp2, mm)
}
} else SEmpty
case (SNumber(ss1, l1, o1), SNumber(ss2, l2, o2)) if(o1 == o2)=>
//val s = intersectIntSet(s1, s2)
val l = intersectIntSet(l1, l2)
if(sizePrograms(l) > 0) {
val ss = intersectAtomicExpr(ss1, ss2)
if(sizePrograms(ss)>0)
SNumber(ss.asInstanceOf[SSubStr], l, o1)
else SEmpty
} else SEmpty
case (SCounter(l1, s1, i1, c1), SCounter(l2, s2, i2, c2)) =>
val s = intersectIntSet(s1, s2)
val l = intersectIntSet(l1, l2)
if(sizePrograms(l) > 0 && sizePrograms(s) > 0) {
if(c1 == c2) {
if(i1 == i2)
SCounter(l, s, i1, c1)
else
SEmpty
} else if(i1 == i2) {
SEmpty
} else {
if((i2 - i1) % (c2 - c1) != 0) SEmpty else {
val newStep = Math.abs((i2 - i1)/(c1-c2))
val newStart = i1 - c1 * newStep
val s2 = intersectIntSet(s, SIntSemiLinearSet(newStart, 0, newStart))
s2 match {
case si : SIntSemiLinearSet =>
if(c2 != 0) {
SCounter(l, s2, i2, c2)
} else if(c1 != 0)
SCounter(l, s2, i1, c1)
else SEmpty
case _ =>
SEmpty
}
}
}
} else SEmpty
case _ => SEmpty
})
final val INDEX_IDENTIFIER = Identifier("index")
def intersectPos(p1: SPosition, p2: SPosition)(implicit unify: IntersectParam): SPosition = (p1, p2) match {
case (SCPos(k1), SCPos(k2)) if k1 == k2 => p1
case (SCPos(0), p2@SPos(r21,r22,c2)) if unify.useIndexForPosition => // Possible unification with an index.
val newCC: SIntegerExpr = if(unify.unify.isEmpty) {
c2 flatMap {
case IntLiteral(k2) if k2 > 0 =>
val i1 = unify.index1
val i2 = unify.index2
if(i1 == 0 && i2 != 0 && k2%i2 == 0) {
val increment = k2/i2
val first = 0
List(Linear(increment, INDEX_IDENTIFIER, first))
} else Nil
case _ => Nil
}
} else {
c2 flatMap {
case IntLiteral(k2) if k2 > 0 =>
val increment = k2
val first = 0
List(Linear(increment, unify.unify.get, first))
case _ => Nil
}
}
if(newCC.isEmpty) SEmpty else {
SPos(r21, r22, newCC)
}
case (p1@SPos(r11, r12, c1), p2@SPos(r21, r22, c2)) =>
val r2 = intersectRegex(r12,r22)
if(r2 == SEmpty) return SEmpty
val r1 = intersectRegex(r11,r21)
if(r1 == SEmpty) return SEmpty
val c: SIntegerExpr = if(unify.unify.isEmpty) {
//c1 intersect c2
// TODO : Better intersection (currently n²)
val res = ((c1 x c2) flatMap {
case (a, b) if a == b => List(a)
case (a@Linear(k1, INDEX_IDENTIFIER, k), IntLiteral(k2)) if k1 * unify.index2 + k == k2 => List(a)
case (IntLiteral(k2), a@Linear(k1, INDEX_IDENTIFIER, k)) if k1 * unify.index1 + k == k2 => List(a)
case (IntLiteral(k1), IntLiteral(k2)) if unify.useIndexForPosition =>
val i1 = unify.index1
val i2 = unify.index2
if(i2 != i1 && (k2 - k1)%(i2 - i1) == 0) {
val increment = (k2-k1)/(i2-i1)
val start = k1 - i1*increment
if(start >= 0 && increment > 0 || start < 0 && increment < 0) {
List(Linear(increment, INDEX_IDENTIFIER, start))
} else Nil
} else Nil
case _ => Nil
}).toSet
res: SIntegerExpr
} else {
val res = ((c1 x c2) flatMap {
case (a, b) if a == b => List(a)
case (IntLiteral(k1), IntLiteral(k2)) =>
if(k1 < k2 && k1 >= 0) List(Linear((k2-k1), unify.unify.get, k1):IntegerExpr)
else if(k2 < k1 && k1 < 0) List(Linear((k2-k1), unify.unify.get, k1):IntegerExpr)
//else if(k2 < k1 && k2 >= 0) List(Linear((k1-k2), unify.get, k2):IntegerExpr)
else Nil
case _ => Nil
}).toSet
res: SIntegerExpr
}
if(r1 == SEmpty || r2 == SEmpty || c.isEmpty) SEmpty else {
SPos(r1, r2, c)
}
case _ => SEmpty
}
def gcd(a: Int, b: Int): Int = if (a == 0) b else gcd(b%a, a)
def extendGcd(a: Int, b: Int, s: Int = 0, t: Int= 1, old_s: Int = 1, old_t: Int = 0)(r: Int = b, old_r: Int = a): (Int, Int) = {
if(r == 0) { (old_s, old_t)
} else { val quotient = (old_r / r)
extendGcd(a, b, old_s - quotient*s, old_t - quotient*t, s, t)(old_r - quotient*r, r)
}
}
def intersectIntSet(p1: SInt, p2: SInt)(implicit unify: IntersectParam = IntersectParam(None, 0, 0)): SInt = (p1, p2) match {
case (p1@SIntSemiLinearSet(start1, step1, max1), p2@SIntSemiLinearSet(start2, step2, max2)) =>
// Multiple cases.
val newMax = Math.min(max1, max2)
if(step1 == 0 || step2 == 0) {
if(step1 == 0) {
if(p2(start1)) p1 else SEmpty
} else { // step2 == 0
if(p1(start2)) p2 else SEmpty
}
} else if(step1 == step2) {
if(start1 == start2) {
if(max1 <= max2) p1 else p2
} else if((start2 - start1) % step1 == 0){
val newStart = Math.max(start2, start1)
if(newStart <= newMax) {
SIntSemiLinearSet(newStart, step1, newMax)
} else SEmpty
} else SEmpty
} else { // both steps are different. Will find the first one greater than the two starts.
// Find a, b such that start1 + a*step1 == start2 + b*step2
// It means that a*step1-b*step2=start2-start1
val gcd2 = gcd(step1, step2)
if((start2 - start1) % gcd2 != 0) SEmpty else {
val c1 = step1/gcd2
val c2 = step2/gcd2
val i = (start2 - start1)/gcd2
// Solve a*c1+b*c2 == 1 with bezout.
val (a_wo_i, b_wo_i) = extendGcd(c1, c2)()
val a = a_wo_i * i
val b = b_wo_i * i
// Now start1 + a * step1 == start2 + b * step2
val newStep = step1 * step2 / gcd2 // The LCM is the new step.
val possibleStart = start1 + a*step1
val maxStart = Math.max(start1, start2)
val base = maxStart - possibleStart
val startI = (base + ((((newStep - base) % newStep) + newStep)%newStep))/newStep
val newStart = possibleStart + newStep*startI
if(newStart <= newMax)
SIntSemiLinearSet(newStart, newStep, newMax)
else SEmpty
}
}
/*case (SAnyInt(default), b) => b
case (a, SAnyInt(default)) => a*/
case (SEmpty, _) => SEmpty
case (_, SEmpty) => SEmpty
}
def intersectRegex(r1: SRegExp, r2: SRegExp): SRegExp = (r1, r2) match {
case (STokenSeq(s1), STokenSeq(s2)) if s1.length == s2.length =>
var i1 = s1
var i2 = s2
var res = ListBuffer[SToken]()
while(i1 != Nil && i2 != Nil) {
val tmp = i1.head intersect i2.head
if(tmp.sizePrograms == 0) return SEmpty
res += tmp
i1 = i1.tail
i2 = i2.tail
}
if(i1 != Nil || i2 != Nil) return SEmpty // should not happen
STokenSeq(res.toList)
case _ => SEmpty
}
def unify(s1: STraceExpr, s2: STraceExpr, w: Identifier, index1: Int, index2: Int, iterateInput: Boolean) = intersect(s1, s2)(unify=IntersectParam(Some(w), index1, index2, iterateInput))
/**
* Size function
*/
def sizePrograms(p: ProgramSet[T forSome { type T <: Program} ]): Long = p match {
case SNumber(s, digits, offset) => sizePrograms(s)*digits.size
case SCounter(length, start, index, count) => if(count == 0) 100 else sizePrograms(start)*sizePrograms(length)
case SSwitch(conds) => (1L /: (conds map _2 map sizePrograms)) (_ * _)
case dag@SDag(ñ1, n1s, n1t, ξ1, w1) => sizeDag(dag)
case SSubStr(vi, pj, pk, mm) => (pj.toList map sizePrograms).sum * (pk.toList map sizePrograms).sum * mm.sizePrograms
case SLoop(w, e, _) => sizePrograms(e)
case SConstStr(s) => 1
case SCPos(k) => 1
case SPos(r1, r2, c) => sizePrograms(r1) * sizePrograms(r2) * c.size
case STokenSeq(tseq) => (1L /: (tseq map { (t:SToken) => t.size})) (_ * _)
case s@ SToken(_) => s.size
case SEmpty => 0
case SSpecialConversion(a, b) => sizePrograms(a) * b.size
/*case SAny(_) => 1
case SAnyInt(i) => 1*/
case SIntSemiLinearSet(start, offset, max) => if(offset == 0) 1 else (max - start)/offset + 1
case s @ SSubStrFlag(mask) => s.size
}
def sizeDag[Node](p1: SDag[Node]): Long = {
var sizeNode = Map[Node, Long](p1.ns -> 1)
def rec(fromN: Node): Long = {
if(sizeNode contains fromN) sizeNode(fromN) else {
val res = (for(np <- p1.ñ.toList) yield {
val pre_sum = (for(f <- p1.W.getOrElse((np, fromN),Set.empty)) yield sizePrograms(f))
val sum = if(pre_sum exists { i => i >= Integer.MAX_VALUE }) Integer.MAX_VALUE else {
Math.min(Integer.MAX_VALUE, pre_sum.sum)
}
if(sum != 0) {
Math.min(sum * rec(np), Integer.MAX_VALUE)
} else 0
}).sum
sizeNode += fromN -> res
res
}
}
rec(p1.nt)
}
/**
* Replace routines (for intersection of loops)
*/
/**
* Replace routines
*/
def replaceSTraceExpr(e: STraceExpr)(implicit w: Linear => Linear): STraceExpr = e match {
case SDag(n, ns, nt, e, ww) => SDag(n, ns, nt, e, ww.mapValues(_.map(v => replaceSAtomicExpr(v)(w))))
case e => e
}
def replaceSAtomicExpr(e: SAtomicExpr)(implicit w: Linear => Linear): SAtomicExpr = e match {
case SSubStr(vi, p1, p2, m) => SSubStr(replaceStringVariable(vi)(w), p1.map(t=>replaceSPosition(t)(w)), p2.map(t=>replaceSPosition(t)(w)), m)
case SConstStr(s) => e
case SLoop(w2, _, separator) if w2 == w => e
case SLoop(w2, e, separator) => SLoop(w2, replaceSTraceExpr(e)(w), separator)
case SNumber(s, l, o) => SNumber(replaceSAtomicExpr(s)(w), l, o)
case e => e
}
def replaceSPosition(e: SPosition)(implicit w: Linear => Linear): SPosition = e match {
case SPos(p1, p2, t) => SPos(p1, p2, replaceSIntegerExpr(t)(w))
case _ => e
}
def replaceStringVariable(e: StringVariable)(implicit w: Linear => Linear): StringVariable = e match {
case InputString(i) => InputString(replaceIntegerExpr(i)(w))
//case PrevStringNumber(i) => PrevStringNumber(replaceIntegerExpr(i)(w))
case e => e
}
def replaceSIntegerExpr(e: SIntegerExpr)(implicit w: Linear => Linear): SIntegerExpr = e.map(t => replaceIntegerExpr(t)(w))
def replaceIntegerExpr(e: IntegerExpr)(implicit w: Linear => Linear): IntegerExpr = e match {
case e @ Linear(i, v, j) => w(e)
case e => e
}
// addToEveryOccurence
}
| MikaelMayer/StringSolver | src/main/scala/ch/epfl/lara/synthesis/stringsolver/ProgramSet.scala | Scala | gpl-2.0 | 45,219 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package scriptedtest
import java.util.concurrent.{ Executors, TimeUnit, TimeoutException }
import scala.collection.mutable
import scala.concurrent.duration._
import sbt.internal.scripted._
private[sbt] object BatchScriptRunner {
type States = mutable.HashMap[StatementHandler, StatementHandler#State]
}
/** Defines an alternative script runner that allows batch execution. */
private[sbt] class BatchScriptRunner extends ScriptRunner with AutoCloseable {
import BatchScriptRunner.States
private[this] val service = Executors.newCachedThreadPool()
/** Defines a method to run batched execution.
*
* @param statements The list of handlers and statements.
* @param states The states of the runner. In case it's empty, inherited apply is called.
*/
def apply(statements: List[(StatementHandler, Statement)], states: States): Unit = {
if (states.isEmpty) super.apply(statements)
else statements.foreach(st => processStatement(st._1, st._2, states))
}
def initStates(states: States, handlers: Seq[StatementHandler]): Unit =
handlers.foreach(handler => states(handler) = handler.initialState)
def cleanUpHandlers(handlers: Seq[StatementHandler], states: States): Unit = {
for (handler <- handlers; state <- states.get(handler)) {
try handler.finish(state.asInstanceOf[handler.State])
catch { case _: Exception => () }
}
}
private val timeout = 5.minutes
def processStatement(handler: StatementHandler, statement: Statement, states: States): Unit = {
val state = states(handler).asInstanceOf[handler.State]
val nextStateFuture = service.submit(
() =>
try Right(handler(statement.command, statement.arguments, state))
catch { case e: Exception => Left(e) }
)
try {
nextStateFuture.get(timeout.toMillis, TimeUnit.MILLISECONDS) match {
case Left(err) =>
if (statement.successExpected) {
err match {
case t: TestFailed =>
throw new TestException(statement, "Command failed: " + t.getMessage, null)
case _ => throw new TestException(statement, "Command failed", err)
}
} else
()
case Right(s) =>
if (statement.successExpected)
states(handler) = s
else
throw new TestException(statement, "Command succeeded but failure was expected", null)
}
} catch {
case e: TimeoutException => throw new TestException(statement, "Command timed out", e)
}
}
override def close(): Unit = service.shutdown()
}
| sbt/sbt | scripted-sbt-redux/src/main/scala/sbt/scriptedtest/BatchScriptRunner.scala | Scala | apache-2.0 | 2,740 |
package fly.play.s3
import scala.concurrent.{Await, Awaitable}
import scala.concurrent.duration.DurationInt
import org.specs2.execute.AsResult
import org.specs2.mutable.Specification
import org.specs2.specification.core.Fragment
import play.api.test.Helpers.running
import play.api.Application
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.ws.WSClient
trait S3SpecSetup extends Specification {
def testBucketName(implicit app: Application) =
app.configuration.getOptional[String]("testBucketName").getOrElse(sys.error("Could not find testBucketName in configuration"))
def fakeApplication(additionalConfiguration: Map[String, _ <: Any] = Map.empty) =
new GuiceApplicationBuilder().configure(additionalConfiguration).build()
implicit class InAppExample(s: String) {
def inApp[T: AsResult](r: => T): Fragment =
s in running(fakeApplication()) {
r
}
}
def await[T](a: Awaitable[T]): T =
Await.result(a, 120.seconds)
def noException[T](a: Awaitable[T]) =
await(a) must not(throwA[Throwable])
}
| Rhinofly/play-s3 | src/test/scala/fly/play/s3/S3SpecSetup.scala | Scala | mit | 1,076 |
package org.scalafmt.rewrite
import scala.meta._
import scala.meta.tokens.Token
import org.scalafmt.config.ScalafmtConfig
import org.scalafmt.internal.FormatToken
import org.scalafmt.internal.FormatTokens
import org.scalafmt.internal.FormatWriter
object RemoveEmptyDocstrings
extends FormatTokensRewrite.Rule
with FormatTokensRewrite.RuleFactory {
import FormatTokensRewrite._
override def enabled(implicit style: ScalafmtConfig): Boolean =
style.docstrings.removeEmpty
override def create(ftoks: FormatTokens): FormatTokensRewrite.Rule = this
override def onToken(implicit
ft: FormatToken,
style: ScalafmtConfig
): Option[Replacement] = {
val skip = ft.right.is[Token.Comment] &&
FormatWriter.isEmptyDocstring(ft.meta.right.text)
if (skip) Some(removeToken) else None
}
override def onRight(lt: Replacement, hasFormatOff: Boolean)(implicit
ft: FormatToken,
style: ScalafmtConfig
): Option[(Replacement, Replacement)] = None
}
| scalameta/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/rewrite/RemoveEmptyDocstrings.scala | Scala | apache-2.0 | 1,002 |
package uk.co.mattthomson.coursera.ggp.gresley.gdl
import uk.co.mattthomson.coursera.ggp.gresley.gdl.FactTag._
import com.twitter.util.Memoize
case class GameDescription(statements: Seq[Statement]) {
lazy val constantFacts = {
val simpleFacts = statements
.collect { case f: Fact => f }
.toSet
.groupBy { f: Fact => f.tag }
.toMap
val rules: Set[Rule] = statements
.collect { case c: Rule => c }
.filter { c => c.conditions.forall(_.isInstanceOf[FactCondition]) }
.toSet
propagateRules(simpleFacts, rules)
}
lazy val roles = statements.collect { case Role(LiteralTerm(role)) => role }
lazy val initialState = new GameState(this, constantFacts.getOrElse(classOf[Init], Set()).map { case Init(fact) => fact })
lazy val baseFacts = constantFacts.getOrElse(classOf[Base], Set()).map { case Base(fact) => fact }
lazy val actions = constantFacts.getOrElse(classOf[Input], Set())
.map { case Input(Role(LiteralTerm(role)), action) => (role, action) }
.groupBy { case (role, _) => role }
.map { case (role, as) => (role, as.map { case (_, a) => a}.toList) }
.toMap
private lazy val allRules = statements
.collect { case c: Rule => c }
.groupBy(_.conclusion.tag)
val rules = Memoize(rulesUnmemoized)
private def rulesUnmemoized(conclusion: Fact) = allRules
.getOrElse(conclusion.tag, Seq())
.flatMap(_.bind(conclusion))
lazy val allFacts: Map[FactTag, Set[Fact]] = {
val relationRules = statements
.collect { case c: Rule => c }
.filter(_.conclusion.isInstanceOf[Relation])
.filterNot(r => constantFacts.contains(r.conclusion.tag))
findAllFacts(constantFacts, relationRules)
}
// TODO better way of doing this
def possibleValues(role: String) = (0 to 100).map(_.toString)
private def propagateRules(soFar: Map[FactTag, Set[Fact]], rules: Set[Rule]): Map[FactTag, Set[Fact]] = {
val updatedFacts = rules.foldLeft(soFar) { case (facts, rule) =>
val rules = rule.bind(facts)
if (rules.isEmpty) facts else {
val tag = rules.head.conclusion.tag
facts + (tag -> (facts.getOrElse(tag, Set()) ++ rules.map(_.conclusion)))
}
}
def totalSize(m: Map[_, Set[_]]) = m.map { case (_, v) => v.size}.sum
if (totalSize(soFar) == totalSize(updatedFacts)) soFar else propagateRules(updatedFacts, rules)
}
private def findAllFacts(soFar: Map[FactTag, Set[Fact]], rules: Seq[Rule]): Map[FactTag, Set[Fact]] = {
val updated = rules.foldLeft(soFar) { case (oldFacts, rule) => rule.updateWithConclusions(oldFacts) }
def totalSize(m: Map[_, Set[_]]) = m.map { case (_, v) => v.size }.sum
if (totalSize(soFar) == totalSize(updated)) soFar else findAllFacts(updated, rules)
}
}
object GameDescription {
def apply(gdl: String): GameDescription = {
val parser = new GdlParser
val statements = parser.parseAll(parser.game, gdl)
if (statements.successful) GameDescription(statements.get)
else throw new IllegalArgumentException(statements.toString)
}
}
| matt-thomson/gresley | src/main/scala/uk/co/mattthomson/coursera/ggp/gresley/gdl/GameDescription.scala | Scala | mit | 3,061 |
package com.sksamuel.elastic4s.search
import com.sksamuel.elastic4s.ElasticsearchClientUri
import com.sksamuel.elastic4s.http.{ElasticDsl, HttpClient}
import com.sksamuel.elastic4s.testkit.SharedElasticSugar
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{FlatSpec, Matchers}
class MultiSearchHttpTest
extends FlatSpec
with SharedElasticSugar
with Matchers
with ElasticDsl {
import com.sksamuel.elastic4s.jackson.ElasticJackson.Implicits._
val http = HttpClient(ElasticsearchClientUri("elasticsearch://" + node.ipAndPort))
"a multi search request" should "find matching documents for all queries" in {
http.execute {
createIndex("jtull")
}.await
http.execute {
bulk(
indexInto("jtull/albums") fields ("name" -> "aqualung") id 14,
indexInto("jtull/albums") fields ("name" -> "passion play") id 51
).refresh(RefreshPolicy.IMMEDIATE)
}.await
val resp = http.execute {
multi(
search("jtull") query matchQuery("name", "aqualung"),
search("jtull") query "passion",
search("jtull" / "albums") query matchAllQuery()
)
}.await
resp.responses.size shouldBe 3
resp.size shouldBe 3
resp.responses.head.hits.hits.head.id shouldBe "14"
resp.responses.tail.head.hits.hits.head.id shouldBe "51"
resp.responses.head.totalHits shouldBe 1
resp.responses.tail.head.totalHits shouldBe 1
resp.responses.last.totalHits shouldBe 2
}
}
| FabienPennequin/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/MultiSearchHttpTest.scala | Scala | apache-2.0 | 1,507 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.export.formats
import java.io.OutputStream
import org.geotools.data.simple.SimpleFeatureCollection
import org.locationtech.geomesa.features.avro.AvroDataFileWriter
import org.opengis.feature.simple.SimpleFeatureType
class AvroExporter(sft: SimpleFeatureType, os: OutputStream, compression: Int) extends FeatureExporter {
val writer = new AvroDataFileWriter(os, sft, compression)
override def export(fc: SimpleFeatureCollection): Option[Long] = {
writer.append(fc)
None
}
override def flush(): Unit = {
writer.flush()
os.flush()
}
override def close(): Unit = {
writer.close()
os.close()
}
}
| nagavallia/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/export/formats/AvroExporter.scala | Scala | apache-2.0 | 1,136 |
package com.twitter.finagle.stats
import com.twitter.app.GlobalFlag
import scala.collection.{Map, mutable}
import scala.util.matching.Regex
object format extends GlobalFlag[String](
"commonsmetrics",
"Format style for metric names (ostrich|commonsmetrics|commonsstats)"
) {
private[stats] val Ostrich = "ostrich"
private[stats] val CommonsMetrics = "commonsmetrics"
private[stats] val CommonsStats = "commonsstats"
}
/**
* If a histogram has no data collected (its count is 0), it can be
* beneficial to not export all the histogram details as there will
* be no interesting data there. When this flag is set to `false`,
* only the `count=0` is exported. When `true`, all of the details
* will be exported.
*/
object includeEmptyHistograms extends GlobalFlag[Boolean](
false,
"Include full histogram details when there are no data points")
/**
* Allows for customization of how stat names get formatted.
*/
private[stats] sealed trait StatsFormatter {
def apply(values: SampledValues): Map[String, Number] = {
val results = new mutable.HashMap[String, Number]()
results ++= values.gauges
results ++= values.counters
val includeEmpty = includeEmptyHistograms()
values.histograms.foreach { case (name, snapshot) =>
val count = snapshot.count
results += histoName(name, "count") -> count
if (count > 0 || includeEmpty) {
results += histoName(name, "sum") -> snapshot.sum
results += histoName(name, labelAverage) -> snapshot.avg
results += histoName(name, labelMin) -> snapshot.min
results += histoName(name, labelMax) -> snapshot.max
for (p <- snapshot.percentiles) {
val percentileName = histoName(name, labelPercentile(p.getQuantile))
results += percentileName -> p.getValue
}
}
}
results
}
/**
* Returns the full formatted name of histogram.
*
* @param name the "name" of the histogram
* @param component a single part of this histogram, for example the average,
* count, or a percentile.
*/
protected def histoName(name: String, component: String): String
/** Label applied for a given percentile, `p`, of a histogram */
protected def labelPercentile(p: Double): String
/** Label applied for the minimum of a histogram */
protected def labelMin: String
/** Label applied for the maximum of a histogram */
protected def labelMax: String
/** Label applied for the average of a histogram */
protected def labelAverage: String
}
private[stats] object StatsFormatter {
/**
* Uses the global flag, [[format]], to select the formatter used.
*/
def default: StatsFormatter =
format() match {
case format.Ostrich => Ostrich
case format.CommonsMetrics => CommonsMetrics
case format.CommonsStats => CommonsStats
}
/**
* The default behavior for formatting as done by Commons Metrics.
*
* See Commons Metrics' `Metrics.sample()`.
*/
object CommonsMetrics extends StatsFormatter {
protected def histoName(name: String, component: String): String =
s"$name.$component"
protected def labelPercentile(p: Double): String = {
// this has a strange quirk that p999 gets formatted as p9990
val gname: String = "p" + (p * 10000).toInt
if (3 < gname.length && ("00" == gname.substring(3))) {
gname.substring(0, 3)
} else {
gname
}
}
protected def labelMin: String = "min"
protected def labelMax: String = "max"
protected def labelAverage: String = "avg"
}
/**
* Replicates the behavior for formatting Ostrich stats.
*
* See Ostrich's `Distribution.toMap`.
*/
object Ostrich extends StatsFormatter {
protected def histoName(name: String, component: String): String =
s"$name.$component"
protected def labelPercentile(p: Double): String = {
p match {
case 0.5d => "p50"
case 0.9d => "p90"
case 0.95d => "p95"
case 0.99d => "p99"
case 0.999d => "p999"
case 0.9999d => "p9999"
case _ =>
val padded = (p * 10000).toInt
s"p$padded"
}
}
protected def labelMin: String = "minimum"
protected def labelMax: String = "maximum"
protected def labelAverage: String = "average"
}
/**
* Replicates the behavior for formatting Commons Stats stats.
*
* See Commons Stats' `Stats.getVariables()`.
*/
object CommonsStats extends StatsFormatter {
private[this] def inMegabytes(l: Number): Number = l.longValue() / 1048576L
private[this] def inSeconds(l: Number): Number = l.longValue() / 1000L
private[this] val gcCycles: Regex = "^jvm_gc_(.*)_cycles$".r
private[this] val gcMsec: Regex = "^jvm_gc_(.*)_msec$".r
override def apply(values: SampledValues): Map[String, Number] = {
val original = super.apply(values)
original.map {
case ("jvm_num_cpus", n) => "jvm_available_processors" -> n
case ("jvm_classes_current_loaded", n) => "jvm_class_loaded_count" -> n
case ("jvm_classes_total_loaded", n) => "jvm_class_total_loaded_count" -> n
case ("jvm_classes_total_unloaded", n) => "jvm_class_unloaded_count" -> n
case ("jvm_gc_msec", n) => "jvm_gc_collection_time_ms" -> n
case ("jvm_gc_cycles", n) => "jvm_gc_collection_count" -> n
case ("jvm_heap_committed", n) => "jvm_memory_heap_mb_committed" -> inMegabytes(n)
case ("jvm_heap_max", n) => "jvm_memory_heap_mb_max" -> inMegabytes(n)
case ("jvm_heap_used", n) => "jvm_memory_heap_mb_used" -> inMegabytes(n)
case ("jvm_nonheap_committed", n) => "jvm_memory_non_heap_mb_committed" -> inMegabytes(n)
case ("jvm_nonheap_max", n) => "jvm_memory_non_heap_mb_max" -> inMegabytes(n)
case ("jvm_nonheap_used", n) => "jvm_memory_non_heap_mb_used" -> inMegabytes(n)
case ("jvm_thread_count", n) => "jvm_threads_active" -> n
case ("jvm_thread_daemon_count", n) => "jvm_threads_daemon" -> n
case ("jvm_thread_peak_count", n) => "jvm_threads_peak" -> n
case ("jvm_start_time", n) => "jvm_time_ms" -> n
case ("jvm_uptime", n) => "jvm_uptime_secs" -> inSeconds(n)
case (gcCycles(gc), n) => s"jvm_gc_${gc}_collection_count" -> n
case (gcMsec(gc), n) => s"jvm_gc_${gc}_collection_time_ms" -> n
case kv => kv
}
}
protected def histoName(name: String, component: String): String =
s"${name}_$component"
protected def labelPercentile(p: Double): String =
s"${p * 100}_percentile".replace(".", "_")
protected def labelMin: String = "min"
protected def labelMax: String = "max"
protected def labelAverage: String = "avg"
}
}
| koshelev/finagle | finagle-stats/src/main/scala/com/twitter/finagle/stats/StatsFormatter.scala | Scala | apache-2.0 | 6,767 |
object Test {
abstract class Box {
val value: Int
}
implicit val a: Box = new Box {
val value= 1
}
def main(args: Array[String]): Unit = {
implicit val b: Box= new Box {
val value= 2
}
new Object {
new Test()
}
// compare with:
new Test()
}
}
class Test()(implicit x: Test.Box) {
println(x.value)
}
| som-snytt/dotty | tests/untried/neg/t6667b.scala | Scala | apache-2.0 | 361 |
package me.jairam.csv
import java.io.File
import me.jairam.schema.{
BOOLEAN,
DECIMAL,
DOUBLE,
DataSchema,
INT,
LONG,
STRING
}
import org.scalatest.{FlatSpec, Matchers}
class CsvReaderSpec extends FlatSpec with Matchers {
val validCsvDir = "schema_infer/valid_csv"
val validInput =
getClass.getClassLoader.getResource(s"$validCsvDir/input.csv").toURI
val validCustomOptsInput =
getClass.getClassLoader.getResource(s"$validCsvDir/custom_opts.csv").toURI
val validCustomEscapeInput = getClass.getClassLoader
.getResource(s"$validCsvDir/custom_escape_char.csv")
.toURI
val invalidCsv = getClass.getClassLoader
.getResource("schema_infer/invalid_csv/png_in_disguise.csv")
.toURI
"CsvReader" should "infer schema correctly with default options" in {
val reader = new CsvReader(new File(validInput))
val schema = reader.inferSchema()
val expected =
Seq(
DataSchema(columnName = "id",
dataType = DECIMAL,
precision = 32,
nullable = 1), // Check Decimal inference
DataSchema(columnName = "firstname", dataType = STRING, nullable = 0), // Check String inference
DataSchema(columnName = "lastname", dataType = STRING, nullable = 0),
DataSchema(columnName = "account", dataType = STRING, nullable = 0),
DataSchema(columnName = "balance", dataType = LONG, nullable = 0), // Check Long inference
DataSchema(columnName = "active", dataType = BOOLEAN, nullable = 0), // Check Boolean inference
DataSchema(columnName = "credit", dataType = DOUBLE, nullable = 1), // Check Float inference
DataSchema(columnName = "branch", dataType = INT, nullable = 0), // Check Int inference
DataSchema(columnName = "notes", dataType = STRING, nullable = 1) // Check that completely empty fields get marked as String
)
schema match {
case Right(s) =>
s should contain theSameElementsInOrderAs expected
case Left(err) =>
fail(err.msg)
}
}
it should "infer schema correctly for delimiter as pipe character" in {
val reader = new CsvReader(new File(validCustomOptsInput),
separator = '|',
quoteChar = '`')
val schema = reader.inferSchema()
val expected =
Seq(
DataSchema(columnName = "id", dataType = INT, nullable = 0),
DataSchema(columnName = "name", dataType = STRING, nullable = 0),
DataSchema(columnName = "age", dataType = INT, nullable = 0),
DataSchema(columnName = "quote", dataType = STRING, nullable = 0)
)
schema match {
case Right(s) =>
s should contain theSameElementsInOrderAs expected
case Left(err) =>
fail(err.msg)
}
}
it should "read csv with custom escape character" in {
val reader =
new CsvReader(new File(validCustomEscapeInput), escapeChar = '$')
val expected =
"id\nname\nage\nquote\n1\nhulk\n45\nHULK IS NOT AFRAID,...\"HULK IS STRONGEST ONE THERE IS!!!"
reader.rows() match {
case Right(rows) =>
rows.flatten.mkString("\n") shouldBe expected
case Left(err) =>
fail(err.msg)
}
}
it should "infer schema correctly for custom escape character" in {
val reader =
new CsvReader(new File(validCustomEscapeInput), escapeChar = '$')
val schema = reader.inferSchema()
val expected =
Seq(
DataSchema(columnName = "id", dataType = INT, nullable = 0),
DataSchema(columnName = "name", dataType = STRING, nullable = 0),
DataSchema(columnName = "age", dataType = INT, nullable = 0),
DataSchema(columnName = "quote", dataType = STRING, nullable = 0)
)
schema match {
case Right(s) =>
s should contain theSameElementsInOrderAs expected
case Left(err) =>
fail(err.msg)
}
}
"CsvReader" should "throw InvalidFileException if file is not csv or text file" in {
val reader = new CsvReader(new File(invalidCsv))
reader.inferSchema() match {
case Left(err) =>
err shouldBe a[CsvError]
case Right(_) =>
throw new Exception("This should not work")
}
}
}
| jairamc/csv-avro-converter | lib/src/test/scala/me/jairam/csv/CsvReaderSpec.scala | Scala | mit | 4,244 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.rdbms.fs
import slamdata.Predef._
import quasar.physical.rdbms.common._
import quasar.physical.rdbms.model.{AlterColumn, TableModel}
import doobie.free.connection.ConnectionIO
trait RdbmsCreate {
def ensureSchemaParents(schema: Schema): ConnectionIO[Unit]
def createSchema(schema: Schema): ConnectionIO[Unit]
def createTable(tablePath: TablePath, model: TableModel): ConnectionIO[Unit]
def alterTable(tablePath: TablePath, cols: Set[AlterColumn]): ConnectionIO[Unit]
}
| jedesah/Quasar | rdbms/src/main/scala/quasar/physical/rdbms/fs/RdbmsCreate.scala | Scala | apache-2.0 | 1,110 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.variable
import java.util.Arrays
/** A Domain for sequences of DiscreteValues.
The 'elementDomain' is abstract.
Typical usage for DiscreteValues with domain size of 10: object MyDomain extends DiscreteSeqDomain { val elementDomain = new DiscreteDomain(10) }
These are used, for example, for the 'z' indicator variables in Latent Dirichlet Allocation.
@author Andrew McCallum */
abstract class DiscreteSeqDomain extends Domain {
type Value <: Seq[DiscreteValue]
def elementDomain: DiscreteDomain
}
/** An abstract variable whose values are sequences of DiscreteValues.
The method 'domain' is abstract.
@author Andrew McCallum */
trait DiscreteSeqVar extends IndexedSeqVar[DiscreteValue] {
type Value <: IndexedSeq[DiscreteValue]
def domain: DiscreteSeqDomain
def intValue(seqIndex:Int): Int
def intValues: Array[Int]
def uniqueIntValues: Array[Int]
def discreteValues: IndexedSeq[DiscreteValue]
def length: Int
def apply(index:Int): DiscreteValue
}
/** An abstract variable whose values are sequences of DiscreteValues, and this sequence can be changed.
The method 'domain' is abstract.
@author Andrew McCallum */
trait MutableDiscreteSeqVar[A<:DiscreteValue] extends MutableVar with cc.factorie.util.ProtectedIntArrayBuffer with DiscreteSeqVar {
type Value <: IndexedSeq[A]
override def length = _length
override def apply(index: Int): A = domain.elementDomain.apply(_apply(index)).asInstanceOf[A]
def domain: DiscreteSeqDomain
def discreteValues: IndexedSeq[DiscreteValue] = new IndexedSeq[A] {
def length = _length
def apply(index:Int) = domain.elementDomain.apply(_apply(index)).asInstanceOf[A]
}
def value: Value = new IndexedSeq[A] {
private val arr = new Array[Any](_length)
_mapToArray(arr, (i:Int) => domain.elementDomain.apply(i)) // Do this so that it stays constant even if _array changes later
def length = arr.length
def apply(i:Int) = arr(i).asInstanceOf[A]
//_toSeq.map(i => domain.elementDomain.getValue(i)) // TODO make this more efficient
}.asInstanceOf[Value]
def set(newValue:Value)(implicit d:DiffList): Unit = _set(Array.tabulate(newValue.length)(i => newValue(i).intValue))
def trimCapacity(): Unit = _trimCapacity
def clear(): Unit = _clear()
def fill(newValue:Int): Unit = Arrays.fill(_array, newValue)
def appendInt(i:Int): Unit = _append(i)
def +=(e:A): Unit = appendInt(e.intValue)
def ++=(es:Iterable[A]): Unit = _appendAll(es.map(_.intValue))
def appendInts(xs:Iterable[Int]) = _appendAll(xs)
def appendInts(xs:Array[Int]) = _appendAll(xs)
def intValue(seqIndex:Int): Int = _apply(seqIndex)
def intValues: Array[Int] = _array
def uniqueIntValues: Array[Int] = _array.distinct.sorted
def set(seqIndex:Int, newValue:Int)(implicit d:DiffList): Unit = {
require(d eq null)
_update(seqIndex, newValue)
}
}
/** An variable whose values are sequences of DiscreteValues, and this sequence can be changed.
The method 'domain' is abstract.
These are used, for example, to store the 'z' indicator variables in Latent Dirichlet Allocation.
@author Andrew McCallum */
abstract class DiscreteSeqVariable extends MutableDiscreteSeqVar[DiscreteValue] {
type Value = IndexedSeq[DiscreteValue]
def this(initialValue:Seq[Int]) = { this(); /*_setCapacity(if (initialValue.length > 0) initialValue.length else 1);*/ if (initialValue.length > 0) _appendAll(initialValue.toArray) }
def this(initialValue:Array[Int]) = { this(); if (initialValue.length > 0) _appendAll(initialValue) }
def this(len:Int) = { this(); _setCapacity(len); _appendAll(Array.fill(len)(0)) }
}
/** Mix this trait into a DiscreteSeqVariable in order to be able to mark positions in the sequence as "breaks".
For example, this is used to mark which words in a Latent Dirichlet Allocation document had stopwords removed immediately before them;
this in turn is used in various phrase processing logic.
@author Andrew McCallum */
trait SeqBreaks {
/** Contains indices of the sequence positions which immediately follow breaks (e.g. removed stopwords) */
val breaks = new scala.collection.mutable.BitSet
}
| patverga/factorie | src/main/scala/cc/factorie/variable/DiscreteSeqVariable.scala | Scala | apache-2.0 | 4,928 |
package models.daos
import javax.inject.Inject
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.PasswordInfo
import com.mohiva.play.silhouette.persistence.daos.DelegableAuthInfoDAO
import play.api.db.slick.DatabaseConfigProvider
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
/**
* The DAO to store the password information.
*/
class PasswordInfoDAO @Inject() (protected val dbConfigProvider: DatabaseConfigProvider)
(implicit val ex: ExecutionContext,
val classTag: ClassTag[PasswordInfo]
)
extends DelegableAuthInfoDAO[PasswordInfo] with DAOSlick {
import profile.api._
protected def passwordInfoQuery(loginInfo: LoginInfo) = for {
dbLoginInfo <- loginInfoQuery(loginInfo)
dbPasswordInfo <- slickPasswordInfos if dbPasswordInfo.loginInfoId === dbLoginInfo.id
} yield dbPasswordInfo
// Use subquery workaround instead of join to get authinfo because slick only supports selecting
// from a single table for update/delete queries (https://github.com/slick/slick/issues/684).
protected def passwordInfoSubQuery(loginInfo: LoginInfo) =
slickPasswordInfos.filter(_.loginInfoId in loginInfoQuery(loginInfo).map(_.id))
protected def addAction(loginInfo: LoginInfo, authInfo: PasswordInfo) =
loginInfoQuery(loginInfo).result.head.flatMap { dbLoginInfo =>
slickPasswordInfos +=
DBPasswordInfo(authInfo.hasher, authInfo.password, authInfo.salt, dbLoginInfo.id.get)
}.transactionally
protected def updateAction(loginInfo: LoginInfo, authInfo: PasswordInfo) =
passwordInfoSubQuery(loginInfo).
map(dbPasswordInfo => (dbPasswordInfo.hasher, dbPasswordInfo.password, dbPasswordInfo.salt)).
update((authInfo.hasher, authInfo.password, authInfo.salt))
/**
* Finds the auth info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved auth info or None if no auth info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[PasswordInfo]] = {
db.run(passwordInfoQuery(loginInfo).result.headOption).map { dbPasswordInfoOption =>
dbPasswordInfoOption.map(dbPasswordInfo =>
PasswordInfo(dbPasswordInfo.hasher, dbPasswordInfo.password, dbPasswordInfo.salt))
}
}
/**
* Adds new auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be added.
* @param authInfo The auth info to add.
* @return The added auth info.
*/
def add(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] =
db.run(addAction(loginInfo, authInfo)).map(_ => authInfo)
/**
* Updates the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be updated.
* @param authInfo The auth info to update.
* @return The updated auth info.
*/
def update(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] =
db.run(updateAction(loginInfo, authInfo)).map(_ => authInfo)
/**
* Saves the auth info for the given login info.
*
* This method either adds the auth info if it doesn't exists or it updates the auth info
* if it already exists.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @return The saved auth info.
*/
def save(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] = {
val query = loginInfoQuery(loginInfo).joinLeft(slickPasswordInfos).on(_.id === _.loginInfoId)
val action = query.result.head.flatMap {
case (dbLoginInfo, Some(dbPasswordInfo)) => updateAction(loginInfo, authInfo)
case (dbLoginInfo, None) => addAction(loginInfo, authInfo)
}
db.run(action).map(_ => authInfo)
}
/**
* Removes the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be removed.
* @return A future to wait for the process to be completed.
*/
def remove(loginInfo: LoginInfo): Future[Unit] =
db.run(passwordInfoSubQuery(loginInfo).delete).map(_ => ())
}
| epot/Gifter | app/models/daos/PasswordInfoDAO.scala | Scala | mit | 4,401 |
package services
import java.time._
import akka.actor.ActorSystem
import generated.Tables._
import generated.enums.Status
import javax.inject.{Inject, Singleton}
import models._
import models.request.{Create, Filter, JobResult}
import org.jooq.impl.DSL
import org.jooq.{Update => _, _}
import play.api.db.Database
import play.api.libs.ws.WSClient
import play.api.{Configuration, Logger}
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
/**
* A database backed queue
*
* @param db database for queue state
* @param logStore conversion log store
*/
@Singleton
class DBQueue @Inject()(db: Database,
logStore: LogStore,
ws: WSClient,
configuration: Configuration,
clock: Clock,
actorSystem: ActorSystem)(implicit executionContext: ExecutionContext) extends Queue with Dispatcher {
private val logger = Logger(this.getClass)
private val timeout = java.time.Duration.ofMillis(configuration.getMillis("queue.timeout"))
actorSystem.scheduler.schedule(initialDelay = 10.seconds, interval = 1.minutes)(checkQueue)
private def checkQueue(): Unit = {
logger.debug("Check stale jobs")
db.withConnection { connection =>
// val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
// val nowMinusTimeout = getOffsetDateTime(LocalDateTime.now(clock).minus(timeout))
// val nullTimestamp: OffsetDateTime = null
// val nullString: String = null
// selectJob(sql)
// .where(JOB.FINISHED.isNull
// .and(JOB.PROCESSING.isNotNull)
// .and(JOB.PROCESSING.lt(nowMinusTimeout))
// )
// .fetch(Mappers.JobMapper)
// .asScala
// .filter { job => !pingWorker(job) }
// .foreach { job =>
// logger.info(s"Return ${job.id} back to queue")
// try {
// sql
// .update(JOB)
// .set(JOB.STATUS, Status.queue)
// .set(JOB.PROCESSING, nullTimestamp)
// .set(JOB.WORKER, nullString)
// .where(JOB.UUID.eq(job.id))
// .execute()
// } catch {
// case e: DataAccessException =>
// logger.error(s"Failed to return stale job to queue: ${e.getMessage}", e)
// }
// }
}
}
private def getOffsetDateTime(dateTime: LocalDateTime): OffsetDateTime = {
val offset = clock.getZone.getRules.getOffset(dateTime)
OffsetDateTime.of(dateTime, offset)
}
private def pingWorker(job: Job): Boolean = {
// WorkerStore.workers.get(job.worker.get).map { worker =>
// val workerUri = worker.uri.resolve("api/v1/status")
//// logger.debug(s"Check worker status: ${workerUri}")
// val req: Future[Boolean] = ws.url(workerUri.toString)
// .addHttpHeaders(AUTH_TOKEN_HEADER -> worker.token)
// .withRequestTimeout(10000.millis)
// .get()
// .map(_.status == http.Status.OK)
// val res: Boolean = Await.result(req, 10000.millis)
// return res
// }
return false
}
private def selectJob(sql: DSLContext): SelectJoinStep[Record6[String, String, String, /*String, Status,*/ Integer, OffsetDateTime, /* OffsetDateTime, String, */ OffsetDateTime]] = {
sql
.select(JOB.UUID, JOB.INPUT, JOB.OUTPUT,
// JOB.TRANSTYPE, JOB.STATUS,
JOB.PRIORITY,
JOB.CREATED,
// JOB.PROCESSING, JOB.WORKER,
JOB.FINISHED)
.from(JOB)
}
override def contents(filter: Filter): Seq[Job] = {
db.withConnection { connection =>
val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
//[string, string, string,int, date, date, status]
val query =
"""
SELECT
job.uuid,
job.input,
job.output,
job.priority,
job.created,
job.finished,
-- task.uuid,
-- task.transtype,
-- task.input,
-- task.output,
CASE
WHEN task.error
THEN 'error'
WHEN task.queue
THEN 'queue'
WHEN task.done
THEN 'done'
ELSE 'process'
END AS STATUS
-- task.id,
-- task.processing,
-- task.finished,
-- task.worker,
-- task.job,
-- task.position
FROM job
INNER JOIN (
SELECT
job,
bool_or('error' IN (status)) AS error,
bool_and('queue' IN (status)) AS queue,
bool_and('done' IN (status)) AS done
FROM task
GROUP BY job
) AS task
ON job.id = task.job
ORDER BY job.created;""";
val res = sql
.fetch(query)
.asInstanceOf[Result[Mappers.JobRecord]]
.map(Mappers.JobMapper)
.asScala
res
}
}
override def get(id: String): Option[Job] =
db.withConnection { connection =>
// val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
// val query = sql
// .select(JOB.UUID, JOB.INPUT, JOB.OUTPUT,
// // JOB.TRANSTYPE, JOB.STATUS,
// JOB.PRIORITY,
// JOB.CREATED,
// // JOB.PROCESSING, JOB.WORKER,
// JOB.FINISHED)
// .from(JOB)
// .leftJoin(TASK).on(TASK.JOB.eq(JOB.ID))
// .where(JOB.UUID.eq(id))
// val res = query
// .fetchOne(Mappers.JobMapper)
// Option(res)
None
}
override def log(id: String, offset: Int): Option[Seq[String]] =
logStore.get(id)
override def add(create: Create): Job = {
val job = create.toJob
db.withConnection { connection =>
val created = LocalDateTime.now(clock)
val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
// job
val query = sql
.insertInto(JOB,
JOB.UUID, JOB.CREATED, JOB.INPUT, JOB.OUTPUT, JOB.PRIORITY
//, JOB.TRANSTYPE
)
.values(job.id, OffsetDateTime.of(created, ZoneOffset.UTC),
job.input, job.output, job.priority
//, job.transtype
)
val res = query
.returning(JOB.UUID, JOB.ID
//, JOB.STATUS
)
.fetchOne()
val jobID = res.getId
// tasks
val taskQuery = job.transtype.zipWithIndex.map {
case (task, i) =>
sql
.insertInto(TASK,
TASK.UUID, TASK.TRANSTYPE, TASK.STATUS, TASK.JOB, TASK.POSITION
//, JOB.TRANSTYPE
)
.values(
task.id, task.transtype, Status.valueOf(task.status.toString), jobID, i + 1
)
}.asJavaCollection
val taskRes = sql
.batch(taskQuery)
.execute()
job.copy(
id = res.getUuid
)
}
}
// override def update(update: Update): Option[Task] = {
// db.withConnection { connection =>
// // logger.info("update")
// // val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
// // val now = OffsetDateTime.now(clock)
// // val query = sql
// // .update(TASK)
// // .set(TASK.STATUS, Status.valueOf(update.status.get.toString))
// // .set(update.status.get match {
// // case StatusString.Queue => TASK.CREATED
// // case StatusString.Process => TASK.PROCESSING
// // case StatusString.Done => TASK.FINISHED
// // case StatusString.Error => TASK.FINISHED
// // }, now)
// // .where(TASK.UUID.eq(update.id))
// // val res = query
// // .returning(TASK.UUID, TASK.STATUS)
// // .execute()
// None
// }
// }
override def request(transtypes: List[String], worker: Worker): Option[Task] =
db.withConnection { connection =>
logger.debug("Request work for transtypes " + transtypes.mkString(", "))
val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
val now = OffsetDateTime.now(clock)
val prev = TASK.as("prev")
val first = TASK.as("first")
val query: Select[_ <: Record1[Integer]] = sql.select(first.ID)
.from(first)
.leftJoin(JOB).on(JOB.ID.eq(first.JOB))
.where(first.STATUS.eq(Status.queue)
.and(first.TRANSTYPE.in(transtypes.asJava))
.and(first.POSITION.eq(1)
.or(
first.JOB.in(
// sql.fetchExists(
// exists(
sql.select(prev.JOB)
.from(prev)
.where(prev.JOB.eq(first.JOB)
.and(prev.STATUS.eq(Status.done))
.and(prev.POSITION.eq(first.POSITION.sub(1)))
)
)
)
)
)
.orderBy(JOB.PRIORITY.desc(), JOB.CREATED.asc(), first.POSITION.asc())
.limit(1)
val update = sql
.update(TASK)
.set(TASK.STATUS, Status.process)
.set(TASK.PROCESSING, now)
.set(TASK.WORKER, worker.id)
.where(TASK.ID.in(query))
val res = update
.returning(TASK.UUID, TASK.ID, TASK.JOB, TASK.POSITION, TASK.STATUS, TASK.PROCESSING, TASK.TRANSTYPE)
.fetchOne()
if (res == null) {
return None
}
val lastPosition: Int = sql
.select(DSL.max(TASK.POSITION))
.from(TASK)
.where(TASK.JOB.eq(res.getJob))
.fetchOne()
.value1()
val (input: String, output: Option[String], jobId: String) = res.getPosition.intValue() match {
case 1 => {
val job = sql
.select(JOB.OUTPUT, JOB.INPUT, JOB.UUID)
.from(JOB)
.where(JOB.ID.eq(res.getJob))
.fetchOne()
(
job.get(JOB.INPUT),
if (lastPosition == 1) Some(job.get(JOB.OUTPUT)) else None,
job.get(JOB.UUID)
)
}
case position => {
val prevTask = sql
.select(TASK.OUTPUT, JOB.OUTPUT, JOB.UUID)
.from(TASK)
.join(JOB).on(TASK.JOB.eq(JOB.ID))
.where(TASK.JOB.eq(res.getJob)
.and(TASK.POSITION.eq(res.getPosition - 1)))
.fetchOne()
(
prevTask.get(TASK.OUTPUT),
if (lastPosition == position) Some(prevTask.get(JOB.OUTPUT)) else None,
prevTask.get(JOB.UUID)
)
}
}
val resourceUpdate = sql
.update(TASK)
.set(TASK.INPUT, input)
.set(TASK.OUTPUT, output.getOrElse(null))
.where(TASK.ID.eq(res.getId))
.execute()
logger.debug(s"Request got back ${update}")
resourceUpdate match {
case 0 => None
case _ =>
val task = Task(
res.getUuid,
jobId,
Some(input),
output,
res.getTranstype,
Map.empty,
StatusString.parse(res.getStatus),
// res.getPriority.intValue(),
// res.getCreated.toLocalDateTime,
Some(res.getProcessing.toLocalDateTime),
Some(worker.id),
None //Some(res.getFinished.toLocalDateTime)
)
Some(task)
}
}
// FIXME this should return a Try or Option
override def submit(res: JobResult): Task =
db.withConnection { connection =>
logger.info(s"Submit $res")
val sql = DSL.using(connection, SQLDialect.POSTGRES_9_4)
val now = OffsetDateTime.now(clock)
sql
.update(TASK)
.set(TASK.STATUS, Status.valueOf(res.task.status.toString))
.set(TASK.OUTPUT, res.task.output.orNull)
.set(TASK.FINISHED, now)
.where(TASK.UUID.eq(res.task.id))
.execute()
logStore.add(res.task.id, res.log)
res.task
}
}
private object Mappers {
type ReviewStatus = String
type CommentError = String
type JobRecord = Record7[
// UUId
String,
// Input
String,
// Output
String,
// Priority
Integer,
// Created
OffsetDateTime,
// Finished
OffsetDateTime,
// Status
String]
object JobMapper extends RecordMapper[JobRecord, Job] {
@Override
def map(c: JobRecord): Job = {
Job(
id = c.value1,
input = c.value2,
output = c.value3,
transtype = List.empty,
priority = c.value4.intValue(),
created = c.value5.toLocalDateTime,
finished = Option(c.value6).map(_.toLocalDateTime),
status = StatusString(c.value7)
)
}
}
// object JobStatusMapper extends RecordMapper[Record3[String, String, Status], JobStatus] {
// @Override
// def map(c: Record3[String, String, Status]): JobStatus = {
// JobStatus(
// c.value1,
// Option(c.value2),
// StatusString.parse(c.value3.toString)
// )
// }
// }
}
| kuhnuri/kuhnuri-queue | app/services/DBQueue.scala | Scala | apache-2.0 | 13,426 |
package org.jetbrains.plugins.scala.lang.psi.stubs.impl
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubBase, StubElement}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.stubs.ScPackagingStub
/**
* @author ilyas
*/
class ScPackagingStubImpl(parent: StubElement[_ <: PsiElement],
elementType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
private val parentPackageNameRef: StringRef,
private val packageNameRef: StringRef,
val isExplicit: Boolean)
extends StubBase[ScPackaging](parent, elementType) with ScPackagingStub {
def parentPackageName: String = StringRef.toString(parentPackageNameRef)
def packageName: String = StringRef.toString(packageNameRef)
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScPackagingStubImpl.scala | Scala | apache-2.0 | 941 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.