code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{HashMap, HashSet}
import scala.concurrent.Future
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.executor.ExecutorLogUrlHandler
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Network._
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT_NAME
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils, Utils}
/**
* A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*/
private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
extends ExecutorAllocationClient with SchedulerBackend with Logging {
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
protected val totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
protected val totalRegisteredExecutors = new AtomicInteger(0)
protected val conf = scheduler.sc.conf
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
private val _minRegisteredRatio =
math.min(1, conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).getOrElse(0.0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
private val maxRegisteredWaitingTimeNs = TimeUnit.MILLISECONDS.toNanos(
conf.get(SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME))
private val createTimeNs = System.nanoTime()
// Accessing `executorDataMap` in the inherited methods from ThreadSafeRpcEndpoint doesn't need
// any protection. But accessing `executorDataMap` out of the inherited methods must be
// protected by `CoarseGrainedSchedulerBackend.this`. Besides, `executorDataMap` should only
// be modified in the inherited methods from ThreadSafeRpcEndpoint with protection by
// `CoarseGrainedSchedulerBackend.this`.
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors for each ResourceProfile requested by the cluster
// manager, [[ExecutorAllocationManager]]
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private val requestedTotalExecutorsPerResourceProfile = new HashMap[ResourceProfile, Int]
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet; maps
// the executor ID to whether it was explicitly killed by the driver (and thus shouldn't
// be considered an app-related failure). Visible for testing only.
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private[scheduler] val executorsPendingToRemove = new HashMap[String, Boolean]
// Executors that have been lost, but for which we don't yet know the real exit reason.
private val executorsPendingLossReason = new HashSet[String]
// Executors which are being decommissioned
protected val executorsPendingDecommission = new HashSet[String]
// A map of ResourceProfile id to map of hostname with its possible task number running on it
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var rpHostToLocalTaskCount: Map[Int, Map[String, Int]] = Map.empty
// The number of pending tasks per ResourceProfile id which is locality required
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var numLocalityAwareTasksPerResourceProfileId = Map.empty[Int, Int]
// The num of current max ExecutorId used to re-register appMaster
@volatile protected var currentExecutorIdCounter = 0
// Current set of delegation tokens to send to executors.
private val delegationTokens = new AtomicReference[Array[Byte]]()
// The token manager used to create security tokens.
private var delegationTokenManager: Option[HadoopDelegationTokenManager] = None
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
class DriverEndpoint extends IsolatedRpcEndpoint with Logging {
override val rpcEnv: RpcEnv = CoarseGrainedSchedulerBackend.this.rpcEnv
protected val addressToExecutorId = new HashMap[RpcAddress, String]
// Spark configuration sent to executors. This is a lazy val so that subclasses of the
// scheduler can modify the SparkConf object before this view is created.
private lazy val sparkProperties = scheduler.sc.conf.getAll
.filter { case (k, _) => k.startsWith("spark.") }
.toSeq
private val logUrlHandler: ExecutorLogUrlHandler = new ExecutorLogUrlHandler(
conf.get(UI.CUSTOM_EXECUTOR_LOG_URL))
override def onStart(): Unit = {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.get(SCHEDULER_REVIVE_INTERVAL).getOrElse(1000L)
reviveThread.scheduleAtFixedRate(() => Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def receive: PartialFunction[Any, Unit] = {
case StatusUpdate(executorId, taskId, state, data, resources) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
val rpId = executorInfo.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorInfo.freeCores += taskCpus
resources.foreach { case (k, v) =>
executorInfo.resourcesInfo.get(k).foreach { r =>
r.release(v.addresses)
}
}
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
s"from unknown executor with ID $executorId")
}
}
case ReviveOffers =>
makeOffers()
case KillTask(taskId, executorId, interruptThread, reason) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(
KillTask(taskId, executorId, interruptThread, reason))
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case KillExecutorsOnHost(host) =>
scheduler.getExecutorsAliveOnHost(host).foreach { exec =>
killExecutors(exec.toSeq, adjustTargetNumExecutors = false, countFailures = false,
force = true)
}
case UpdateDelegationTokens(newDelegationTokens) =>
updateDelegationTokens(newDelegationTokens)
case RemoveExecutor(executorId, reason) =>
// We will remove the executor's state and cannot restore it. However, the connection
// between the driver and the executor may be still alive so that the executor won't exit
// automatically, so try to tell the executor to stop itself. See SPARK-13519.
executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor))
removeExecutor(executorId, reason)
case DecommissionExecutor(executorId) =>
logError(s"Received decommission executor message ${executorId}.")
decommissionExecutor(executorId)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
case LaunchedExecutor(executorId) =>
executorDataMap.get(executorId).foreach { data =>
data.freeCores = data.totalCores
}
makeOffers(executorId)
case e =>
logError(s"Received unexpected message. ${e}")
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls,
attributes, resources, resourceProfileId) =>
if (executorDataMap.contains(executorId)) {
context.sendFailure(new IllegalStateException(s"Duplicate executor ID: $executorId"))
} else if (scheduler.nodeBlacklist.contains(hostname) ||
isBlacklisted(executorId, hostname)) {
// If the cluster manager gives us an executor on a blacklisted node (because it
// already started allocating those resources before we informed it of our blacklist,
// or if it ignored our blacklist), then we reject that executor immediately.
logInfo(s"Rejecting $executorId as it has been blacklisted.")
context.sendFailure(new IllegalStateException(s"Executor is blacklisted: $executorId"))
} else {
// If the executor's rpc env is not listening for incoming connections, `hostPort`
// will be null, and the client connection should be used to contact the executor.
val executorAddress = if (executorRef.address != null) {
executorRef.address
} else {
context.senderAddress
}
logInfo(s"Registered executor $executorRef ($executorAddress) with ID $executorId, " +
s" ResourceProfileId $resourceProfileId")
addressToExecutorId(executorAddress) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val resourcesInfo = resources.map { case (rName, info) =>
// tell the executor it can schedule resources up to numSlotsPerAddress times,
// as configured by the user, or set to 1 as that is the default (1 task/resource)
val numParts = scheduler.sc.resourceProfileManager
.resourceProfileFromId(resourceProfileId).getNumSlotsPerAddress(rName, conf)
(info.name, new ExecutorResourceInfo(info.name, info.addresses, numParts))
}
val data = new ExecutorData(executorRef, executorAddress, hostname,
0, cores, logUrlHandler.applyPattern(logUrls, attributes), attributes,
resourcesInfo, resourceProfileId)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (currentExecutorIdCounter < executorId.toInt) {
currentExecutorIdCounter = executorId.toInt
}
}
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
// Note: some tests expect the reply to come after we put the executor in the map
context.reply(true)
}
case StopDriver =>
context.reply(true)
stop()
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorEndpoint.send(StopExecutor)
}
context.reply(true)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
context.reply(true)
case DecommissionExecutor(executorId) =>
logError(s"Received decommission executor message ${executorId}.")
decommissionExecutor(executorId)
context.reply(true)
case RetrieveSparkAppConfig(resourceProfileId) =>
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(resourceProfileId)
val reply = SparkAppConfig(
sparkProperties,
SparkEnv.get.securityManager.getIOEncryptionKey(),
Option(delegationTokens.get()),
rp)
context.reply(reply)
case e =>
logError(s"Received unexpected ask ${e}")
}
// Make fake resource offers on all executors
private def makeOffers(): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
val activeExecutors = executorDataMap.filterKeys(isExecutorActive)
val workOffers = activeExecutors.map {
case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId)
}.toIndexedSeq
scheduler.resourceOffers(workOffers, true)
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
addressToExecutorId
.get(remoteAddress)
.foreach(removeExecutor(_, SlaveLost("Remote RPC client disassociated. Likely due to " +
"containers exceeding thresholds, or network issues. Check driver logs for WARN " +
"messages.")))
}
// Make fake resource offers on just one executor
private def makeOffers(executorId: String): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
if (isExecutorActive(executorId)) {
val executorData = executorDataMap(executorId)
val workOffers = IndexedSeq(
new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId))
scheduler.resourceOffers(workOffers, false)
} else {
Seq.empty
}
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
// Launch tasks returned by a set of resource offers
private def launchTasks(tasks: Seq[Seq[TaskDescription]]): Unit = {
for (task <- tasks.flatten) {
val serializedTask = TaskDescription.encode(task)
if (serializedTask.limit() >= maxRpcMessageSize) {
Option(scheduler.taskIdToTaskSetManager.get(task.taskId)).foreach { taskSetMgr =>
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
s"${RPC_MESSAGE_MAX_SIZE.key} (%d bytes). Consider increasing " +
s"${RPC_MESSAGE_MAX_SIZE.key} or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit(), maxRpcMessageSize)
taskSetMgr.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
val executorData = executorDataMap(task.executorId)
// Do resources allocation here. The allocated resources will get released after the task
// finishes.
val rpId = executorData.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorData.freeCores -= taskCpus
task.resources.foreach { case (rName, rInfo) =>
assert(executorData.resourcesInfo.contains(rName))
executorData.resourcesInfo(rName).acquire(rInfo.addresses)
}
logDebug(s"Launching task ${task.taskId} on executor id: ${task.executorId} hostname: " +
s"${executorData.executorHost}.")
executorData.executorEndpoint.send(LaunchTask(new SerializableBuffer(serializedTask)))
}
}
}
// Remove a disconnected slave from the cluster
private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
logDebug(s"Asked to remove executor $executorId with reason $reason")
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
val killed = CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingLossReason -= executorId
executorsPendingDecommission -= executorId
executorsPendingToRemove.remove(executorId).getOrElse(false)
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, if (killed) ExecutorKilled else reason)
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason.toString))
case None =>
// SPARK-15262: If an executor is still alive even after the scheduler has removed
// its metadata, we may receive a heartbeat from that executor and tell its block
// manager to reregister itself. If that happens, the block manager master will know
// about the executor, but the scheduler will not. Therefore, we should remove the
// executor from the block manager when we hit this case.
scheduler.sc.env.blockManager.master.removeExecutorAsync(executorId)
logInfo(s"Asked to remove non-existent executor $executorId")
}
}
// Remove a lost worker from the cluster
private def removeWorker(workerId: String, host: String, message: String): Unit = {
logDebug(s"Asked to remove worker $workerId with reason $message")
scheduler.workerRemoved(workerId, host, message)
}
/**
* Mark a given executor as decommissioned and stop making resource offers for it.
*/
private def decommissionExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
// Only bother decommissioning executors which are alive.
if (isExecutorActive(executorId)) {
executorsPendingDecommission += executorId
true
} else {
false
}
}
if (shouldDisable) {
logInfo(s"Starting decommissioning executor $executorId.")
try {
scheduler.executorDecommission(executorId)
} catch {
case e: Exception =>
logError(s"Unexpected error during decommissioning ${e.toString}", e)
}
logInfo(s"Finished decommissioning executor $executorId.")
if (conf.get(STORAGE_DECOMMISSION_ENABLED)) {
try {
logInfo("Starting decommissioning block manager corresponding to " +
s"executor $executorId.")
scheduler.sc.env.blockManager.master.decommissionBlockManagers(Seq(executorId))
} catch {
case e: Exception =>
logError("Unexpected error during block manager " +
s"decommissioning for executor $executorId: ${e.toString}", e)
}
logInfo(s"Acknowledged decommissioning block manager corresponding to $executorId.")
}
} else {
logInfo(s"Skipping decommissioning of executor $executorId.")
}
shouldDisable
}
/**
* Stop making resource offers for the given executor. The executor is marked as lost with
* the loss reason still pending.
*
* @return Whether executor should be disabled
*/
protected def disableExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
if (isExecutorActive(executorId)) {
executorsPendingLossReason += executorId
true
} else {
// Returns true for explicitly killed executors, we also need to get pending loss reasons;
// For others return false.
executorsPendingToRemove.contains(executorId)
}
}
if (shouldDisable) {
logInfo(s"Disabling executor $executorId.")
scheduler.executorLost(executorId, LossReasonPending)
}
shouldDisable
}
}
val driverEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME, createDriverEndpoint())
protected def minRegisteredRatio: Double = _minRegisteredRatio
override def start(): Unit = {
if (UserGroupInformation.isSecurityEnabled()) {
delegationTokenManager = createTokenManager()
delegationTokenManager.foreach { dtm =>
val ugi = UserGroupInformation.getCurrentUser()
val tokens = if (dtm.renewalEnabled) {
dtm.start()
} else {
val creds = ugi.getCredentials()
dtm.obtainDelegationTokens(creds)
if (creds.numberOfTokens() > 0 || creds.numberOfSecretKeys() > 0) {
SparkHadoopUtil.get.serialize(creds)
} else {
null
}
}
if (tokens != null) {
updateDelegationTokens(tokens)
}
}
}
}
protected def createDriverEndpoint(): DriverEndpoint = new DriverEndpoint()
def stopExecutors(): Unit = {
try {
if (driverEndpoint != null) {
logInfo("Shutting down all executors")
driverEndpoint.askSync[Boolean](StopExecutors)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop(): Unit = {
reviveThread.shutdownNow()
stopExecutors()
delegationTokenManager.foreach(_.stop())
try {
if (driverEndpoint != null) {
driverEndpoint.askSync[Boolean](StopDriver)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver endpoint", e)
}
}
/**
* Reset the state of CoarseGrainedSchedulerBackend to the initial state. Currently it will only
* be called in the yarn-client mode when AM re-registers after a failure.
* Visible for testing only.
* */
protected[scheduler] def reset(): Unit = {
val executors: Set[String] = synchronized {
requestedTotalExecutorsPerResourceProfile.clear()
executorDataMap.keys.toSet
}
// Remove all the lingering executors that should be removed but not yet. The reason might be
// because (1) disconnected event is not yet received; (2) executors die silently.
executors.foreach { eid =>
removeExecutor(eid, SlaveLost("Stale executor after cluster manager re-registered."))
}
}
override def reviveOffers(): Unit = {
driverEndpoint.send(ReviveOffers)
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
driverEndpoint.send(KillTask(taskId, executorId, interruptThread, reason))
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
/**
* Called by subclasses when notified of a lost worker. It just fires the message and returns
* at once.
*/
protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
driverEndpoint.send(RemoveExecutor(executorId, reason))
}
protected def removeWorker(workerId: String, host: String, message: String): Unit = {
driverEndpoint.send(RemoveWorker(workerId, host, message))
}
/**
* Called by subclasses when notified of a decommissioning executor.
*/
private[spark] def decommissionExecutor(executorId: String): Unit = {
if (driverEndpoint != null) {
logInfo("Propagating executor decommission to driver.")
driverEndpoint.send(DecommissionExecutor(executorId))
}
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.nanoTime() - createTimeNs) >= maxRegisteredWaitingTimeNs) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTimeNs(ns)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
private def numExistingExecutors: Int = synchronized { executorDataMap.size }
override def getExecutorIds(): Seq[String] = synchronized {
executorDataMap.keySet.toSeq
}
override def isExecutorActive(id: String): Boolean = synchronized {
executorDataMap.contains(id) &&
!executorsPendingToRemove.contains(id) &&
!executorsPendingLossReason.contains(id) &&
!executorsPendingDecommission.contains(id)
}
override def maxNumConcurrentTasks(rp: ResourceProfile): Int = synchronized {
val cpusPerTask = ResourceProfile.getTaskCpusOrDefaultForProfile(rp, conf)
val executorsWithResourceProfile = executorDataMap.values.filter(_.resourceProfileId == rp.id)
executorsWithResourceProfile.map(_.totalCores / cpusPerTask).sum
}
// this function is for testing only
def getExecutorAvailableResources(
executorId: String): Map[String, ExecutorResourceInfo] = synchronized {
executorDataMap.get(executorId).map(_.resourcesInfo).getOrElse(Map.empty)
}
// this function is for testing only
def getExecutorResourceProfileId(executorId: String): Int = synchronized {
val execDataOption = executorDataMap.get(executorId)
execDataOption.map(_.resourceProfileId).getOrElse(ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID)
}
/**
* Request an additional number of executors from the cluster manager. This is
* requesting against the default ResourceProfile, we will need an API change to
* allow against other profiles.
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
val response = synchronized {
val defaultProf = scheduler.sc.resourceProfileManager.defaultResourceProfile
val numExisting = requestedTotalExecutorsPerResourceProfile.getOrElse(defaultProf, 0)
requestedTotalExecutorsPerResourceProfile(defaultProf) = numExisting + numAdditionalExecutors
// Account for executors pending to be added or removed
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param resourceProfileIdToNumExecutors The total number of executors we'd like to have per
* ResourceProfile. The cluster manager shouldn't kill any
* running executor to reach this number, but, if all
* existing executors were to die, this is the number
* of executors we'd want to be allocated.
* @param numLocalityAwareTasksPerResourceProfileId The number of tasks in all active stages that
* have a locality preferences per
* ResourceProfile. This includes running,
* pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
final override def requestTotalExecutors(
resourceProfileIdToNumExecutors: Map[Int, Int],
numLocalityAwareTasksPerResourceProfileId: Map[Int, Int],
hostToLocalTaskCount: Map[Int, Map[String, Int]]
): Boolean = {
val totalExecs = resourceProfileIdToNumExecutors.values.sum
if (totalExecs < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$totalExecs from the cluster manager. Please specify a positive number!")
}
val resourceProfileToNumExecutors = resourceProfileIdToNumExecutors.map { case (rpid, num) =>
(scheduler.sc.resourceProfileManager.resourceProfileFromId(rpid), num)
}
val response = synchronized {
this.requestedTotalExecutorsPerResourceProfile.clear()
this.requestedTotalExecutorsPerResourceProfile ++= resourceProfileToNumExecutors
this.numLocalityAwareTasksPerResourceProfileId = numLocalityAwareTasksPerResourceProfileId
this.rpHostToLocalTaskCount = hostToLocalTaskCount
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return a future whose evaluation indicates whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill the specified executors.
*
* @param executorIds identifiers of executors to kill
* @param adjustTargetNumExecutors whether the target number of executors be adjusted down
* after these executors have been killed
* @param countFailures if there are tasks running on the executors when they are killed, whether
* those failures be counted to task failure limits?
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
final override def killExecutors(
executorIds: Seq[String],
adjustTargetNumExecutors: Boolean,
countFailures: Boolean,
force: Boolean): Seq[String] = {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val response = withLock {
val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains)
unknownExecutors.foreach { id =>
logWarning(s"Executor to kill $id does not exist!")
}
// If an executor is already pending to be removed, do not kill it again (SPARK-9795)
// If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552)
val executorsToKill = knownExecutors
.filter { id => !executorsPendingToRemove.contains(id) }
.filter { id => force || !scheduler.isExecutorBusy(id) }
executorsToKill.foreach { id => executorsPendingToRemove(id) = !countFailures }
logInfo(s"Actual list of executor(s) to be killed is ${executorsToKill.mkString(", ")}")
// If we do not wish to replace the executors we kill, sync the target number of executors
// with the cluster manager to avoid allocating new ones. When computing the new target,
// take into account executors that are pending to be added or removed.
val adjustTotalExecutors =
if (adjustTargetNumExecutors) {
executorsToKill.foreach { exec =>
val rpId = executorDataMap(exec).resourceProfileId
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
if (requestedTotalExecutorsPerResourceProfile.isEmpty) {
// Assume that we are killing an executor that was started by default and
// not through the request api
requestedTotalExecutorsPerResourceProfile(rp) = 0
} else {
val requestedTotalForRp = requestedTotalExecutorsPerResourceProfile(rp)
requestedTotalExecutorsPerResourceProfile(rp) = math.max(requestedTotalForRp - 1, 0)
}
}
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
} else {
Future.successful(true)
}
val killExecutors: Boolean => Future[Boolean] =
if (executorsToKill.nonEmpty) {
_ => doKillExecutors(executorsToKill)
} else {
_ => Future.successful(false)
}
val killResponse = adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread)
killResponse.flatMap(killSuccessful =>
Future.successful (if (killSuccessful) executorsToKill else Seq.empty[String])
)(ThreadUtils.sameThread)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Kill the given list of executors through the cluster manager.
* @return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill all executors on a given host.
* @return whether the kill request is acknowledged.
*/
final override def killExecutorsOnHost(host: String): Boolean = {
logInfo(s"Requesting to kill any and all executors on host ${host}")
// A potential race exists if a new executor attempts to register on a host
// that is on the blacklist and is no no longer valid. To avoid this race,
// all executor registration and killing happens in the event loop. This way, either
// an executor will fail to register, or will be killed when all executors on a host
// are killed.
// Kill all the executors on this host in an event loop to ensure serialization.
driverEndpoint.send(KillExecutorsOnHost(host))
true
}
/**
* Create the delegation token manager to be used for the application. This method is called
* once during the start of the scheduler backend (so after the object has already been
* fully constructed), only if security is enabled in the Hadoop configuration.
*/
protected def createTokenManager(): Option[HadoopDelegationTokenManager] = None
/**
* Called when a new set of delegation tokens is sent to the driver. Child classes can override
* this method but should always call this implementation, which handles token distribution to
* executors.
*/
protected def updateDelegationTokens(tokens: Array[Byte]): Unit = {
SparkHadoopUtil.get.addDelegationTokens(tokens, conf)
delegationTokens.set(tokens)
executorDataMap.values.foreach { ed =>
ed.executorEndpoint.send(UpdateDelegationTokens(tokens))
}
}
protected def currentDelegationTokens: Array[Byte] = delegationTokens.get()
/**
* Checks whether the executor is blacklisted. This is called when the executor tries to
* register with the scheduler, and will deny registration if this method returns true.
*
* This is in addition to the blacklist kept by the task scheduler, so custom implementations
* don't need to check there.
*/
protected def isBlacklisted(executorId: String, hostname: String): Boolean = false
// SPARK-27112: We need to ensure that there is ordering of lock acquisition
// between TaskSchedulerImpl and CoarseGrainedSchedulerBackend objects in order to fix
// the deadlock issue exposed in SPARK-27112
private def withLock[T](fn: => T): T = scheduler.synchronized {
CoarseGrainedSchedulerBackend.this.synchronized { fn }
}
}
private[spark] object CoarseGrainedSchedulerBackend {
val ENDPOINT_NAME = "CoarseGrainedScheduler"
}
| ConeyLiu/spark | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 38,640 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.examples.recommendation
import org.apache.predictionio.controller.PDataSource
import org.apache.predictionio.controller.EmptyEvaluationInfo
import org.apache.predictionio.controller.EmptyActualResult
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.Event
import org.apache.predictionio.data.store.PEventStore
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import grizzled.slf4j.Logger
case class DataSourceEvalParams(kFold: Int, queryNum: Int)
case class DataSourceParams(
appName: String,
evalParams: Option[DataSourceEvalParams]) extends Params
class DataSource(val dsp: DataSourceParams)
extends PDataSource[TrainingData,
EmptyEvaluationInfo, Query, ActualResult] {
@transient lazy val logger = Logger[this.type]
def getRatings(sc: SparkContext): RDD[Rating] = {
val eventsRDD: RDD[Event] = PEventStore.find(
appName = dsp.appName,
entityType = Some("user"),
eventNames = Some(List("rate", "buy")), // read "rate" and "buy" event
// targetEntityType is optional field of an event.
targetEntityType = Some(Some("item")))(sc)
val ratingsRDD: RDD[Rating] = eventsRDD.map { event =>
val rating = try {
val ratingValue: Double = event.event match {
case "rate" => event.properties.get[Double]("rating")
case "buy" => 4.0 // map buy event to rating value of 4
case _ => throw new Exception(s"Unexpected event ${event} is read.")
}
// entityId and targetEntityId is String
Rating(event.entityId,
event.targetEntityId.get,
ratingValue)
} catch {
case e: Exception => {
logger.error(s"Cannot convert ${event} to Rating. Exception: ${e}.")
throw e
}
}
rating
}.cache()
ratingsRDD
}
override
def readTraining(sc: SparkContext): TrainingData = {
new TrainingData(getRatings(sc))
}
override
def readEval(sc: SparkContext)
: Seq[(TrainingData, EmptyEvaluationInfo, RDD[(Query, ActualResult)])] = {
require(!dsp.evalParams.isEmpty, "Must specify evalParams")
val evalParams = dsp.evalParams.get
val kFold = evalParams.kFold
val ratings: RDD[(Rating, Long)] = getRatings(sc).zipWithUniqueId
ratings.cache
(0 until kFold).map { idx => {
val trainingRatings = ratings.filter(_._2 % kFold != idx).map(_._1)
val testingRatings = ratings.filter(_._2 % kFold == idx).map(_._1)
val testingUsers: RDD[(String, Iterable[Rating])] = testingRatings.groupBy(_.user)
(new TrainingData(trainingRatings),
new EmptyEvaluationInfo(),
testingUsers.map {
case (user, ratings) => (Query(user, evalParams.queryNum), ActualResult(ratings.toArray))
}
)
}}
}
}
case class Rating(
user: String,
item: String,
rating: Double
)
class TrainingData(
val ratings: RDD[Rating]
) extends Serializable {
override def toString = {
s"ratings: [${ratings.count()}] (${ratings.take(2).toList}...)"
}
}
| PredictionIO/PredictionIO | examples/scala-parallel-recommendation/customize-serving/src/main/scala/DataSource.scala | Scala | apache-2.0 | 3,942 |
package org.puma.analyzer.filter
import scala.collection.mutable.ListBuffer
import org.puma.analyzer.NgramExtractor
/**
* Project: puma
* Package: org.puma.analyzer
*
* Author: Sergio Álvarez
* Date: 01/2014
*/
class BigramsFilter(filter: ExtractorFilter) extends ExtractorFilterDecorator(filter) {
def this() = this(new SimpleTermExtractorFilter())
def extract(tweet: String): List[List[String]] = {
val results = filter.extract(tweet).to[ListBuffer] // initializing with previous extraction
NgramExtractor.extract(tweet, 2).foreach(ngram => results += ngram)
results.toList
}
def field: String = "text"
}
| sergio-alvarez/puma | src/main/scala/org/puma/analyzer/filter/BigramsFilter.scala | Scala | apache-2.0 | 638 |
package mesosphere.marathon.storage.migration.legacy.legacy
import akka.stream.scaladsl.Sink
import com.codahale.metrics.MetricRegistry
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.bus.TaskStatusUpdateTestHelper
import mesosphere.marathon.core.task.state.MarathonTaskStatus
import mesosphere.marathon.core.task.tracker.impl.{ MarathonTaskStatusSerializer, TaskSerializer }
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state.MarathonTaskState
import mesosphere.marathon.storage.LegacyInMemConfig
import mesosphere.marathon.storage.repository.TaskRepository
import mesosphere.marathon.test.{ MarathonActorSupport, MarathonSpec }
import org.apache.mesos
import org.apache.mesos.Protos.TaskStatus
import org.scalatest.{ GivenWhenThen, Matchers }
import scala.concurrent.ExecutionContext
class MigrationTo1_2Test extends MarathonSpec with GivenWhenThen with Matchers with MarathonActorSupport {
import mesosphere.marathon.state.PathId._
implicit val ctx = ExecutionContext.global
class Fixture {
implicit lazy val metrics = new Metrics(new MetricRegistry)
lazy val config = LegacyInMemConfig(25)
lazy val store = config.store
lazy val taskRepo = TaskRepository.legacyRepository(config.entityStore[MarathonTaskState])
lazy val migration = new MigrationTo1_2(Some(config))
def create(key: String, bytes: IndexedSeq[Byte]): Unit = {
store.create(key, bytes).futureValue
}
def store(key: String, state: MarathonTaskState): Unit = {
taskRepo.store.store(key, state).futureValue
}
}
test("should remove deployment version nodes, but keep deployment nodes") {
Given("some deployment version nodes, a proper deployment node and an unrelated node")
val f = new Fixture
f.create("deployment:265fe17c-2979-4ab6-b906-9c2b34f9c429:2016-06-23T22:16:03.880Z", IndexedSeq.empty)
f.create("deployment:42c6b840-5a4b-4110-a7d9-d4835f7499b9:2016-06-13T18:47:15.862Z", IndexedSeq.empty)
f.create("deployment:fcabfa75-7756-4bc8-94b3-c9d5b2abd38c", IndexedSeq.empty)
f.create("foo:bar", IndexedSeq.empty)
When("migrating")
f.migration.migrate().futureValue
Then("the deployment version nodes are removed, all other nodes are kept")
val nodeNames: Seq[String] = f.store.allIds().futureValue
nodeNames should contain theSameElementsAs Seq("deployment:fcabfa75-7756-4bc8-94b3-c9d5b2abd38c", "foo:bar")
}
test("should migrate tasks and add calculated MarathonTaskStatus to stored tasks") {
Given("some tasks without MarathonTaskStatus")
val f = new Fixture
f.store("/running1", makeMarathonTaskState("/running1", mesos.Protos.TaskState.TASK_RUNNING))
f.store("/running2", makeMarathonTaskState("/running2", mesos.Protos.TaskState.TASK_RUNNING))
f.store("/running3", makeMarathonTaskState("/running3", mesos.Protos.TaskState.TASK_RUNNING, marathonTaskStatus = Some(MarathonTaskStatus.Running)))
f.store("/unreachable1", makeMarathonTaskState("/unreachable1", mesos.Protos.TaskState.TASK_LOST, Some(TaskStatus.Reason.REASON_RECONCILIATION)))
f.store("/gone1", makeMarathonTaskState("/gone1", mesos.Protos.TaskState.TASK_LOST, Some(TaskStatus.Reason.REASON_CONTAINER_LAUNCH_FAILED)))
When("migrating")
f.migration.migrate().futureValue
Then("the tasks should all have a MarathonTaskStatus according their initial mesos task status")
val storedTasks = f.taskRepo.all().map(TaskSerializer.toProto).runWith(Sink.seq)
storedTasks.futureValue.foreach {
task =>
task.getMarathonTaskStatus should not be null
val serializedTask = TaskSerializer.fromProto(task)
val expectedStatus = MarathonTaskStatus(serializedTask.mesosStatus.getOrElse(fail("Task has no mesos task status")))
val currentStatus = MarathonTaskStatusSerializer.fromProto(task.getMarathonTaskStatus)
currentStatus should be equals expectedStatus
}
}
private def makeMarathonTaskState(taskId: String, taskState: mesos.Protos.TaskState, maybeReason: Option[TaskStatus.Reason] = None, marathonTaskStatus: Option[MarathonTaskStatus] = None): MarathonTaskState = {
val mesosStatus = TaskStatusUpdateTestHelper.makeMesosTaskStatus(Task.Id.forRunSpec(taskId.toPath), taskState, maybeReason = maybeReason)
val builder = MarathonTask.newBuilder()
.setId(taskId)
.setStatus(mesosStatus)
.setHost("abc")
.setStagedAt(1)
if (marathonTaskStatus.isDefined) {
builder.setMarathonTaskStatus(MarathonTaskStatusSerializer.toProto(marathonTaskStatus.get))
}
MarathonTaskState(builder.build())
}
}
| timcharper/marathon | src/test/scala/mesosphere/marathon/storage/migration/legacy/legacy/MigrationTo1_2Test.scala | Scala | apache-2.0 | 4,687 |
package io.youi.image
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Future, Promise}
class HTMLImage(private[image] val img: html.Image) extends Image {
override val width: Double = img.width
override val height: Double = img.height
override def draw(context: Context, x: Double, y: Double, width: Double, height: Double): Unit = {
context.drawImage(img)(x, y, width, height)
}
// TODO: make this top-level to Image
def source: String = img.src
override def resize(width: Double, height: Double): Future[Image] = resize(width, height, ImageResizer.Smooth)
def resize(width: Double, height: Double, resizer: ImageResizer): Future[Image] = if (this.width == width && this.height == height) {
Future.successful(this)
} else {
ResizedHTMLImage(this, width, height, resizer)
}
override def resizeTo(canvas: html.Canvas, width: Double, height: Double, resizer: ImageResizer): Future[html.Canvas] = {
ResizedHTMLImage.resizeTo(this, canvas, width, height, resizer)
}
override def isVector: Boolean = false
override def toDataURL: Future[String] = ImageUtility.toDataURL(img)
override def dispose(): Unit = {}
override def toString: String = s"HTMLImage($width x $height)"
}
object HTMLImage {
def apply(url: URL): Future[HTMLImage] = {
val img = dom.create[html.Image]("img")
img.src = url.toString
apply(img)
}
def apply(img: html.Image): Future[HTMLImage] = if (img.width > 0 && img.height > 0) {
Future.successful(new HTMLImage(img))
} else {
val promise = Promise[HTMLImage]
val listener: js.Function1[Event, _] = (_: Event) => {
promise.success(new HTMLImage(img))
}
img.addEventListener("load", listener)
val future = promise.future
future.onComplete(_ => img.removeEventListener("load", listener))
future
}
} | outr/youi | ui/js/src/main/scala/io/youi/image/HTMLImage.scala | Scala | mit | 1,866 |
/*
* Copyright © 2015 Lukas Rosenthaler, Benjamin Geer, Ivan Subotic,
* Tobias Schweizer, André Kilchenmann, and Sepideh Alassi.
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.messages.v1.responder.ontologymessages
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import org.knora.webapi._
import org.knora.webapi.messages.v1.responder.standoffmessages.StandoffDataTypeClasses
import org.knora.webapi.messages.v1.responder.usermessages.UserProfileV1
import org.knora.webapi.messages.v1.responder.{KnoraRequestV1, KnoraResponseV1}
import spray.json._
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Messages
/**
* An abstract trait representing a message that can be sent to `OntologyResponderV1`.
*/
sealed trait OntologyResponderRequestV1 extends KnoraRequestV1
/**
* Requests that all ontologies in the repository are loaded. This message must be sent only once, when the application
* starts, before it accepts any API requests. A successful response will be a [[LoadOntologiesResponse]].
*
* @param userProfile the profile of the user making the request.
*/
case class LoadOntologiesRequest(userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Indicates that all ontologies were loaded.
*/
case class LoadOntologiesResponse() extends KnoraResponseV1 {
def toJsValue = JsObject(Map("result" -> JsString("Ontologies loaded.")))
}
/**
* Requests all available information about a list of ontology entities (resource classes and/or properties). A successful response will be an
* [[EntityInfoGetResponseV1]].
*
* @param resourceClassIris the IRIs of the resource entities to be queried.
* @param propertyIris the IRIs of the property entities to be queried.
* @param userProfile the profile of the user making the request.
*/
case class EntityInfoGetRequestV1(resourceClassIris: Set[IRI] = Set.empty[IRI], propertyIris: Set[IRI] = Set.empty[IRI], userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents assertions about one or more ontology entities (resource classes and/or properties).
*
* @param resourceEntityInfoMap a [[Map]] of resource entity IRIs to [[ResourceEntityInfoV1]] objects.
* @param propertyEntityInfoMap a [[Map]] of property entity IRIs to [[PropertyEntityInfoV1]] objects.
*/
case class EntityInfoGetResponseV1(resourceEntityInfoMap: Map[IRI, ResourceEntityInfoV1],
propertyEntityInfoMap: Map[IRI, PropertyEntityInfoV1])
/**
* Requests all available information about a list of ontology entities (standoff classes and/or properties). A successful response will be an
* [[StandoffEntityInfoGetResponseV1]].
*
* @param standoffClassIris the IRIs of the resource entities to be queried.
* @param standoffPropertyIris the IRIs of the property entities to be queried.
* @param userProfile the profile of the user making the request.
*/
case class StandoffEntityInfoGetRequestV1(standoffClassIris: Set[IRI] = Set.empty[IRI], standoffPropertyIris: Set[IRI] = Set.empty[IRI], userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents assertions about one or more ontology entities (resource classes and/or properties).
*
* @param standoffClassEntityInfoMap a [[Map]] of resource entity IRIs to [[StandoffClassEntityInfoV1]] objects.
* @param standoffPropertyEntityInfoMap a [[Map]] of property entity IRIs to [[StandoffPropertyEntityInfoV1]] objects.
*/
case class StandoffEntityInfoGetResponseV1(standoffClassEntityInfoMap: Map[IRI, StandoffClassEntityInfoV1],
standoffPropertyEntityInfoMap: Map[IRI, StandoffPropertyEntityInfoV1])
/**
* Requests information about all standoff classes that are a subclass of a data type standoff class. A successful response will be an
* [[StandoffClassesWithDataTypeGetResponseV1]].
*
* @param userProfile the profile of the user making the request.
*/
case class StandoffClassesWithDataTypeGetRequestV1(userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents assertions about all standoff classes that are a subclass of a data type standoff class.
*
* @param standoffClassEntityInfoMap a [[Map]] of resource entity IRIs to [[StandoffClassEntityInfoV1]] objects.
*/
case class StandoffClassesWithDataTypeGetResponseV1(standoffClassEntityInfoMap: Map[IRI, StandoffClassEntityInfoV1])
/**
* Requests information about all standoff property entities. A successful response will be an
* [[StandoffAllPropertyEntitiesGetResponseV1]].
*
* @param userProfile the profile of the user making the request.
*/
case class StandoffAllPropertyEntitiesGetRequestV1(userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents assertions about all standoff all standoff property entities.
*
* @param standoffAllPropertiesEntityInfoMap a [[Map]] of resource entity IRIs to [[StandoffPropertyEntityInfoV1]] objects.
*/
case class StandoffAllPropertyEntitiesGetResponseV1(standoffAllPropertiesEntityInfoMap: Map[IRI, StandoffPropertyEntityInfoV1])
/**
* Requests information about a resource type and its possible properties. A successful response will be a
* [[ResourceTypeResponseV1]].
*
* @param resourceTypeIri the IRI of the resource type to be queried.
* @param userProfile the profile of the user making the request.
*/
case class ResourceTypeGetRequestV1(resourceTypeIri: IRI, userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents the Knora API v1 JSON response to a request for information about a resource type.
*
* @param restype_info basic information about the resource type.
*/
case class ResourceTypeResponseV1(restype_info: ResTypeInfoV1) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.resourceTypeResponseV1Format.write(this)
}
/**
* Checks whether a Knora resource or value class is a subclass of (or identical to) another class. This message is used
* internally by Knora, and is not part of Knora API v1. A successful response will be a [[CheckSubClassResponseV1]].
*
* @param subClassIri the IRI of the subclass.
* @param superClassIri the IRI of the superclass.
*/
case class CheckSubClassRequestV1(subClassIri: IRI, superClassIri: IRI) extends OntologyResponderRequestV1
/**
* Represents a response to a [[CheckSubClassRequestV1]].
*
* @param isSubClass `true` if the requested inheritance relationship exists.
*/
case class CheckSubClassResponseV1(isSubClass: Boolean)
/**
* Requests all existing named graphs.
* This corresponds to the concept of vocabularies in the SALSAH prototype.
*
* @param userProfile the profile of the user making the request.
*
*/
case class NamedGraphsGetRequestV1(userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents the Knora API V1 response to a [[NamedGraphsGetRequestV1]].
* It contains all the existing named graphs.
*
* @param vocabularies all the existing named graphs.
*/
case class NamedGraphsResponseV1(vocabularies: Seq[NamedGraphV1]) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.namedGraphsResponseV1Format.write(this)
}
/**
* Requests all resource classes that are defined in the given named graph.
*
* @param namedGraph the named graph for which the resource classes shall be returned.
* @param userProfile the profile of the user making the request.
*/
case class ResourceTypesForNamedGraphGetRequestV1(namedGraph: Option[IRI], userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents the Knora API V1 response to a [[ResourceTypesForNamedGraphGetRequestV1]].
* It contains all the resource classes for a named graph.
*
* @param resourcetypes the resource classes for the queried named graph.
*/
case class ResourceTypesForNamedGraphResponseV1(resourcetypes: Seq[ResourceTypeV1]) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.resourceTypesForNamedGraphResponseV1Format.write(this)
}
/**
* Requests all property types that are defined in the given named graph.
* If the named graph is not set, the property types of all named graphs are requested.
*
* @param namedGraph the named graph to query for or None if all the named graphs should be queried.
* @param userProfile the profile of the user making the request.
*/
case class PropertyTypesForNamedGraphGetRequestV1(namedGraph: Option[IRI], userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents the Knora API V1 response to a [[PropertyTypesForNamedGraphGetRequestV1]].
* It contains all property types for the requested named graph.
*
* @param properties the property types for the requested named graph.
*/
case class PropertyTypesForNamedGraphResponseV1(properties: Seq[PropertyDefinitionInNamedGraphV1]) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.propertyTypesForNamedGraphResponseV1Format.write(this)
}
/**
* Gets all property types that are defined for the given resource class.
*
* @param resourceClassIri the Iri of the resource class to query for.
* @param userProfile the profile of the user making the request.
*/
case class PropertyTypesForResourceTypeGetRequestV1(resourceClassIri: IRI, userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Represents the Knora API V1 response to a [[PropertyTypesForResourceTypeGetRequestV1]].
* It contains all the property types for the requested resource class.
*
* @param properties the property types for the requested resource class.
*/
case class PropertyTypesForResourceTypeResponseV1(properties: Vector[PropertyDefinitionV1]) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.propertyTypesForResourceTypeResponseV1Format.write(this)
}
/**
* Requests information about the subclasses of a Knora resource class. A successful response will be
* a [[SubClassesGetResponseV1]].
*
* @param resourceClassIri the IRI of the Knora resource class.
* @param userProfile the profile of the user making the request.
*/
case class SubClassesGetRequestV1(resourceClassIri: IRI, userProfile: UserProfileV1) extends OntologyResponderRequestV1
/**
* Provides information about the subclasses of a Knora resource class.
*
* @param subClasses a list of [[SubClassInfoV1]] representing the subclasses of the specified class.
*/
case class SubClassesGetResponseV1(subClasses: Seq[SubClassInfoV1]) extends KnoraResponseV1 {
def toJsValue = ResourceTypeV1JsonProtocol.subClassesGetResponseV1Format.write(this)
}
/**
* Requests information about the ontology entities in the specified named graph. A successful response will be a
* [[NamedGraphEntityInfoV1]].
*
* @param namedGraphIri the IRI of the named graph.
* @param userProfile the profile of the user making the request.
*/
case class NamedGraphEntityInfoRequestV1(namedGraphIri: IRI, userProfile: UserProfileV1) extends OntologyResponderRequestV1
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Components of messages
/**
* Represents information about a subclass of a resource class.
*
* @param id the IRI of the subclass.
* @param label the `rdfs:label` of the subclass.
*/
case class SubClassInfoV1(id: IRI, label: String)
/**
* Represents a predicate that is asserted about a given ontology entity, and the objects of that predicate.
*
* @param ontologyIri the IRI of the ontology in which the assertions occur.
* @param objects the objects of the predicate that have no language codes.
* @param objectsWithLang the objects of the predicate that have language codes: a Map of language codes to literals.
*/
case class PredicateInfoV1(predicateIri: IRI, ontologyIri: IRI, objects: Set[String], objectsWithLang: Map[String, String])
object Cardinality extends Enumeration {
type Cardinality = Value
val MayHaveOne = Value(0, "0-1")
val MayHaveMany = Value(1, "0-n")
val MustHaveOne = Value(2, "1")
val MustHaveSome = Value(3, "1-n")
val valueMap: Map[String, Value] = values.map(v => (v.toString, v)).toMap
/**
* Given the name of a value in this enumeration, returns the value. If the value is not found, throws an
* [[InconsistentTriplestoreDataException]].
*
* @param name the name of the value.
* @return the requested value.
*/
def lookup(name: String): Value = {
valueMap.get(name) match {
case Some(value) => value
case None => throw InconsistentTriplestoreDataException(s"Cardinality not found: $name")
}
}
/**
* Converts information about an OWL cardinality restriction to a [[Value]] of this enumeration.
*
* @param propertyIri the IRI of the property that the OWL cardinality applies to.
* @param owlCardinalityIri the IRI of the OWL cardinality, which must be a member of the set
* [[OntologyConstants.Owl.cardinalityOWLRestrictions]]. Qualified and unqualified
* cardinalities are treated as equivalent.
* @param owlCardinalityValue the integer value associated with the cardinality.
* @return a [[Value]].
*/
def owlCardinality2KnoraCardinality(propertyIri: IRI, owlCardinalityIri: IRI, owlCardinalityValue: Int): Value = {
owlCardinalityIri match {
case OntologyConstants.Owl.MinCardinality =>
if (owlCardinalityValue == 0) {
Cardinality.MayHaveMany
} else if (owlCardinalityValue == 1) {
Cardinality.MustHaveSome
} else {
throw new InconsistentTriplestoreDataException(s"Invalid cardinality restriction $owlCardinalityIri $owlCardinalityValue for $propertyIri")
}
case OntologyConstants.Owl.Cardinality if owlCardinalityValue == 1 =>
Cardinality.MustHaveOne
case OntologyConstants.Owl.MaxCardinality if owlCardinalityValue == 1 =>
Cardinality.MayHaveOne
case _ =>
// if none of the cases above match, the data is inconsistent
throw new InconsistentTriplestoreDataException(s"Invalid cardinality restriction $owlCardinalityIri $owlCardinalityValue for $propertyIri")
}
}
}
/**
* Represents information about either a resource or a property entity.
* It is extended by [[ResourceEntityInfoV1]] and [[PropertyEntityInfoV1]].
*
*/
sealed trait EntityInfoV1 {
val predicates: Map[IRI, PredicateInfoV1]
/**
* Returns an object for a given predicate. If requested, attempts to return the object in the user's preferred
* language, in the system's default language, or in any language, in that order.
*
* @param predicateIri the IRI of the predicate.
* @param preferredLangs the user's preferred language and the system's default language.
* @return an object for the predicate, or [[None]] if this entity doesn't have the specified predicate, or
* if the predicate has no objects.
*/
def getPredicateObject(predicateIri: IRI, preferredLangs: Option[(String, String)] = None): Option[String] = {
// Does the predicate exist?
predicates.get(predicateIri) match {
case Some(predicateInfo) =>
// Yes. Were preferred languages specified?
preferredLangs match {
case Some((userLang, defaultLang)) =>
// Yes. Is the object available in the user's preferred language?
predicateInfo.objectsWithLang.get(userLang) match {
case Some(objectInUserLang) =>
// Yes.
Some(objectInUserLang)
case None =>
// The object is not available in the user's preferred language. Is it available
// in the system default language?
predicateInfo.objectsWithLang.get(defaultLang) match {
case Some(objectInDefaultLang) =>
// Yes.
Some(objectInDefaultLang)
case None =>
// The object is not available in the system default language. Is it available
// without a language tag?
predicateInfo.objects.headOption match {
case Some(objectWithoutLang) =>
// Yes.
Some(objectWithoutLang)
case None =>
// The object is not available without a language tag. Return it in
// any other language.
predicateInfo.objectsWithLang.values.headOption
}
}
}
case None =>
// Preferred languages were not specified. Take the first object without a language tag.
predicateInfo.objects.headOption
}
case None => None
}
}
/**
* Returns all the objects specified for a given predicate.
*
* @param predicateIri the IRI of the predicate.
* @return the predicate's objects, or an empty set if this entity doesn't have the specified predicate.
*/
def getPredicateObjects(predicateIri: IRI): Set[String] = {
predicates.get(predicateIri) match {
case Some(predicateInfo) =>
predicateInfo.objects
case None => Set.empty[String]
}
}
}
/**
* Represents the assertions about a given resource class.
*
* @param resourceClassIri the IRI of the resource class.
* @param ontologyIri the IRI of the ontology in which the resource class.
* @param predicates a [[Map]] of predicate IRIs to [[PredicateInfoV1]] objects.
* @param cardinalities a [[Map]] of properties to [[Cardinality.Value]] objects representing the resource class's
* cardinalities on those properties.
* @param linkProperties a [[Set]] of IRIs of properties of the resource class that point to other resources.
* @param linkValueProperties a [[Set]] of IRIs of properties of the resource class
* that point to `LinkValue` objects.
* @param fileValueProperties a [[Set]] of IRIs of properties of the resource class
* that point to `FileValue` objects.
*/
case class ResourceEntityInfoV1(resourceClassIri: IRI,
ontologyIri: IRI,
predicates: Map[IRI, PredicateInfoV1],
cardinalities: Map[IRI, Cardinality.Value],
linkProperties: Set[IRI],
linkValueProperties: Set[IRI],
fileValueProperties: Set[IRI]) extends EntityInfoV1
/**
* Represents the assertions about a given standoff class.
*
* @param standoffClassIri the IRI of the standoff class.
* @param ontologyIri the IRI of the ontology in which the standoff class is defined.
* @param predicates a [[Map]] of predicate IRIs to [[PredicateInfoV1]] objects.
* @param cardinalities a [[Map]] of property IRIs to [[Cardinality.Value]] objects.
*/
case class StandoffClassEntityInfoV1(standoffClassIri: IRI,
ontologyIri: IRI,
predicates: Map[IRI, PredicateInfoV1],
cardinalities: Map[IRI, Cardinality.Value],
dataType: Option[StandoffDataTypeClasses.Value] = None) extends EntityInfoV1
/**
* Represents the assertions about a given property.
*
* @param propertyIri the IRI of the queried property.
* @param ontologyIri the IRI of the ontology in which the property is defined.
* @param isLinkProp `true` if the property is a subproperty of `knora-base:hasLinkTo`.
* @param isLinkValueProp `true` if the property is a subproperty of `knora-base:hasLinkToValue`.
* @param isFileValueProp `true` if the property is a subproperty of `knora-base:hasFileValue`.
* @param predicates a [[Map]] of predicate IRIs to [[PredicateInfoV1]] objects.
*/
case class PropertyEntityInfoV1(propertyIri: IRI,
ontologyIri: IRI,
isLinkProp: Boolean,
isLinkValueProp: Boolean,
isFileValueProp: Boolean,
predicates: Map[IRI, PredicateInfoV1]) extends EntityInfoV1
/**
* Represents the assertions about a given standoff property.
*
* @param standoffPropertyIri the IRI of the queried standoff property.
* @param ontologyIri the IRI of the ontology in which the standoff property is defined.
* @param predicates a [[Map]] of predicate IRIs to [[PredicateInfoV1]] objects.
* @param isSubPropertyOf a [[Set]] of IRIs representing this standoff property's super properties.
*/
case class StandoffPropertyEntityInfoV1(standoffPropertyIri: IRI,
ontologyIri: IRI,
predicates: Map[IRI, PredicateInfoV1],
isSubPropertyOf: Set[IRI]) extends EntityInfoV1
/**
* Represents the assertions about a given named graph entity.
*
* @param namedGraphIri the Iri of the named graph.
* @param resourceClasses the resource classes defined in the named graph.
* @param propertyIris the properties defined in the named graph.
*/
case class NamedGraphEntityInfoV1(namedGraphIri: IRI,
resourceClasses: Set[IRI],
propertyIris: Set[IRI])
/**
* Represents information about a resource type.
*
* @param name the IRI of the resource type.
* @param label the label of the resource type.
* @param description a description of the resource type.
* @param iconsrc an icon representing the resource type.
* @param properties a list of definitions of properties that resources of this type can have.
*/
case class ResTypeInfoV1(name: IRI,
label: Option[String],
description: Option[String],
iconsrc: Option[String],
properties: Seq[PropertyDefinitionV1])
/**
* Represents information about a property type. It is extended by [[PropertyDefinitionV1]]
* and [[PropertyDefinitionInNamedGraphV1]].
*/
trait PropertyDefinitionBaseV1 {
val id: IRI
val name: IRI
val label: Option[String]
val description: Option[String]
val vocabulary: IRI
val valuetype_id: IRI
val attributes: Option[String]
val gui_name: Option[String]
}
/**
* Describes a property type that resources of some particular type can have.
*
* @param id the IRI of the property definition.
* @param name the IRI of the property definition.
* @param label the label of the property definition.
* @param description a description of the property definition.
* @param vocabulary the IRI of the vocabulary (i.e. the named graph) that the property definition belongs to.
* @param occurrence the cardinality of this property: 1, 1-n, 0-1, or 0-n.
* @param valuetype_id the IRI of a subclass of `knora-base:Value`, representing the type of value that this property contains.
* @param attributes HTML attributes to be used with the property's GUI element.
* @param gui_name the IRI of a named individual of type `salsah-gui:Guielement`, representing the type of GUI element
* that should be used for inputting values for this property.
*/
case class PropertyDefinitionV1(id: IRI,
name: IRI,
label: Option[String],
description: Option[String],
vocabulary: IRI,
occurrence: String,
valuetype_id: IRI,
attributes: Option[String],
gui_name: Option[String],
guiorder: Option[Int] = None) extends PropertyDefinitionBaseV1
/**
* Describes a property type that a named graph contains.
*
* @param id the IRI of the property definition.
* @param name the IRI of the property definition.
* @param label the label of the property definition.
* @param description a description of the property definition.
* @param vocabulary the IRI of the vocabulary (i.e. the named graph) that the property definition belongs to.
* @param valuetype_id the IRI of a subclass of `knora-base:Value`, representing the type of value that this property contains.
* @param attributes HTML attributes to be used with the property's GUI element.
* @param gui_name the IRI of a named individual of type `salsah-gui:Guielement`, representing the type of GUI element
* that should be used for inputting values for this property.
*/
case class PropertyDefinitionInNamedGraphV1(id: IRI,
name: IRI,
label: Option[String],
description: Option[String],
vocabulary: IRI,
valuetype_id: IRI,
attributes: Option[String],
gui_name: Option[String]) extends PropertyDefinitionBaseV1
/**
* Represents a named graph (corresponds to a vocabulary in the SALSAH prototype).
*
* @param id the id of the named graph.
* @param shortname the short name of the named graph.
* @param longname the full name of the named graph.
* @param description a description of the named graph.
* @param project_id the project belonging to the named graph.
* @param uri the Iri of the named graph.
* @param active indicates if this is named graph the user's project belongs to.
*/
case class NamedGraphV1(id: IRI,
shortname: String,
longname: String,
description: String,
project_id: IRI,
uri: IRI,
active: Boolean) {
def toJsValue = ResourceTypeV1JsonProtocol.namedGraphV1Format.write(this)
}
/**
* Represents a resource class and its properties.
*
* @param id the IRI of the resource class.
* @param label the label of the resource class.
* @param properties the properties of the resource class.
*/
case class ResourceTypeV1(id: IRI, label: String, properties: Vector[PropertyTypeV1]) {
def toJsValue = ResourceTypeV1JsonProtocol.resourceTypeV1Format.write(this)
}
/**
* Represents a property type.
*
* @param id the IRI of the property type.
* @param label the label of the property type.
*/
case class PropertyTypeV1(id: IRI, label: String) {
def toJsValue = ResourceTypeV1JsonProtocol.propertyTypeV1Format.write(this)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// JSON formatting
/**
* A spray-json protocol for generating Knora API v1 JSON providing data about resources and their properties.
*/
object ResourceTypeV1JsonProtocol extends SprayJsonSupport with DefaultJsonProtocol with NullOptions {
implicit val propertyDefinitionV1Format: JsonFormat[PropertyDefinitionV1] = jsonFormat10(PropertyDefinitionV1)
implicit val propertyDefinitionInNamedGraphV1Format: JsonFormat[PropertyDefinitionInNamedGraphV1] = jsonFormat8(PropertyDefinitionInNamedGraphV1)
implicit val resTypeInfoV1Format: JsonFormat[ResTypeInfoV1] = jsonFormat5(ResTypeInfoV1)
implicit val resourceTypeResponseV1Format: RootJsonFormat[ResourceTypeResponseV1] = jsonFormat1(ResourceTypeResponseV1)
implicit val namedGraphV1Format: RootJsonFormat[NamedGraphV1] = jsonFormat7(NamedGraphV1)
implicit val namedGraphsResponseV1Format: RootJsonFormat[NamedGraphsResponseV1] = jsonFormat1(NamedGraphsResponseV1)
implicit val propertyTypeV1Format: RootJsonFormat[PropertyTypeV1] = jsonFormat2(PropertyTypeV1)
implicit val resourceTypeV1Format: RootJsonFormat[ResourceTypeV1] = jsonFormat3(ResourceTypeV1)
implicit val resourceTypesForNamedGraphResponseV1Format: RootJsonFormat[ResourceTypesForNamedGraphResponseV1] = jsonFormat1(ResourceTypesForNamedGraphResponseV1)
implicit val propertyTypesForNamedGraphResponseV1Format: RootJsonFormat[PropertyTypesForNamedGraphResponseV1] = jsonFormat1(PropertyTypesForNamedGraphResponseV1)
implicit val propertyTypesForResourceTypeResponseV1Format: RootJsonFormat[PropertyTypesForResourceTypeResponseV1] = jsonFormat1(PropertyTypesForResourceTypeResponseV1)
implicit val subClassInfoV1Format: JsonFormat[SubClassInfoV1] = jsonFormat2(SubClassInfoV1)
implicit val subClassesGetResponseV1Format: RootJsonFormat[SubClassesGetResponseV1] = jsonFormat1(SubClassesGetResponseV1)
}
| nie-ine/Knora | webapi/src/main/scala/org/knora/webapi/messages/v1/responder/ontologymessages/OntologyMessagesV1.scala | Scala | agpl-3.0 | 30,820 |
package skinny.engine
import javax.servlet._
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import skinny.engine.base.MainThreadLocalEverywhere
import skinny.engine.context.SkinnyEngineContext
import skinny.engine.routing.RoutingDsl
import skinny.engine.util.UriDecoder
import scala.util.DynamicVariable
/**
* An implementation of the SkinnyEngine DSL in a filter. You may prefer a filter
* to a SkinnyEngineServlet if:
*
* $ - you are sharing a URL space with another servlet or filter and want to
* delegate unmatched requests. This is very useful when migrating
* legacy applications one page or resource at a time.
*
*
* Unlike a SkinnyEngineServlet, does not send 404 or 405 errors on non-matching
* routes. Instead, it delegates to the filter chain.
*
* If in doubt, extend SkinnyEngineServlet instead.
*
* @see SkinnyEngineServlet
*/
trait SkinnyEngineFilter
extends Filter
with SkinnyEngineFilterBase
with ThreadLocalFeatures {
}
| holycattle/skinny-framework | engine/src/main/scala/skinny/engine/SkinnyEngineFilter.scala | Scala | mit | 1,004 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import org.scalatest.{ Matchers, WordSpec }
import cascading.pipe.joiner._
import scala.collection.mutable.Buffer
class InnerProductJob(args: Args) extends Job(args) {
val l = args.getOrElse("left", "1").toInt
val r = args.getOrElse("right", "1").toInt
val j = args.getOrElse("joiner", "i") match {
case "i" => new InnerJoin
case "l" => new LeftJoin
case "r" => new RightJoin
case "o" => new OuterJoin
}
val in0 = Tsv("input0").read.mapTo((0, 1, 2) -> ('x1, 'y1, 's1)) { input: (Int, Int, Int) => input }
val in1 = Tsv("input1").read.mapTo((0, 1, 2) -> ('x2, 'y2, 's2)) { input: (Int, Int, Int) => input }
in0
.blockJoinWithSmaller('y1 -> 'y2, in1, leftReplication = l, rightReplication = r, joiner = j)
.map(('s1, 's2) -> 'score) { v: (Int, Int) =>
v._1 * v._2
}
.groupBy('x1, 'x2) { _.sum[Double]('score) }
.write(Tsv("output"))
}
class BlockJoinPipeTest extends WordSpec with Matchers {
"An InnerProductJob" should {
val in1 = List(("0", "0", "1"), ("0", "1", "1"), ("1", "0", "2"), ("2", "0", "4"))
val in2 = List(("0", "1", "1"), ("1", "0", "2"), ("2", "4", "5"))
val correctOutput = Set((0, 1, 2.0), (0, 0, 1.0), (1, 1, 4.0), (2, 1, 8.0))
def runJobWithArguments(left: Int = 1, right: Int = 1, joiner: String = "i")(callback: Buffer[(Int, Int, Double)] => Unit) {
JobTest(new InnerProductJob(_))
.source(Tsv("input0"), in1)
.source(Tsv("input1"), in2)
.arg("left", left.toString)
.arg("right", right.toString)
.arg("joiner", joiner)
.sink[(Int, Int, Double)](Tsv("output")) { outBuf =>
callback(outBuf)
}
.run
.finish
}
"correctly compute product with 1 left block and 1 right block" in {
runJobWithArguments() { outBuf =>
outBuf.toSet shouldBe correctOutput
}
}
"correctly compute product with multiple left and right blocks" in {
runJobWithArguments(left = 3, right = 7) { outBuf =>
outBuf.toSet shouldBe correctOutput
}
}
"correctly compute product with a valid LeftJoin" in {
runJobWithArguments(right = 7, joiner = "l") { outBuf =>
outBuf.toSet shouldBe correctOutput
}
}
"throw an exception when used with OuterJoin" in {
an[InvalidJoinModeException] should be thrownBy runJobWithArguments(joiner = "o") { _ => }
}
"throw an exception when used with an invalid LeftJoin" in {
an[InvalidJoinModeException] should be thrownBy runJobWithArguments(joiner = "l", left = 2) { _ => }
}
"throw an exception when used with an invalid RightJoin" in {
an[InvalidJoinModeException] should be thrownBy runJobWithArguments(joiner = "r", right = 2) { _ => }
}
}
}
| sriramkrishnan/scalding | scalding-core/src/test/scala/com/twitter/scalding/BlockJoinTest.scala | Scala | apache-2.0 | 3,354 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime.java8
@FunctionalInterface trait JFunction0$mcS$sp extends Function0[Any] with Serializable {
def apply$mcS$sp: Short
override def apply(): Any = scala.runtime.BoxesRunTime.boxToShort(apply$mcS$sp)
}
| martijnhoekstra/scala | src/library/scala/runtime/java8/JFunction0$mcS$sp.scala | Scala | apache-2.0 | 517 |
package com.ajjpj.adiagram.geometry
import com.ajjpj.adiagram.ADiagramSpec
class Vector2Spec extends ADiagramSpec {
"A Vector2" should "contain its dimensions and unit" in {
val v = Vector2(1, 2, LenUnit.pt)
v.x shouldBe (1.0 +- eps)
v.y shouldBe (2.0 +- eps)
v.unit shouldBe LenUnit.pt
}
it should "convert to another LenUnit" in {
val conv = Vector2(1, 2, LenUnit.inch).inUnit(LenUnit.pt)
conv.x shouldBe (72.0 +- eps)
conv.y shouldBe (144.0 +- eps)
conv.unit shouldBe LenUnit.pt
}
it should "calculate the half way to another Vector2" in {
val v1 = Vector2(1, 2, LenUnit.inch)
val v2 = Vector2(144, 288, LenUnit.pt)
val m = v1 halfWayTo v2
m.x shouldBe (1.5 +- eps)
m.y shouldBe (3.0 +- eps)
m.unit shouldBe LenUnit.inch
}
it should "calculate its inverse" in {
val v = Vector2(1, 2, LenUnit.mm).inverse
v.x shouldBe (-1.0 +- eps)
v.y shouldBe (-2.0 +- eps)
v.unit shouldBe LenUnit.mm
}
it should "add another vector" in {
val v1 = Vector2(1, 2, LenUnit.inch)
val v2 = Vector2(144, 288, LenUnit.pt)
val s = v1 + v2
s.x shouldBe (3.0 +- eps)
s.y shouldBe (6.0 +- eps)
s.unit shouldBe LenUnit.inch
}
it should "subtract another vector" in {
val v1 = Vector2(5, 3, LenUnit.inch)
val v2 = Vector2(144, 288, LenUnit.pt)
val s = v1 - v2
s.x shouldBe (3.0 +- eps)
s.y shouldBe (-1.0 +- eps)
s.unit shouldBe LenUnit.inch
}
}
| arnohaase/a-diagram | src/test/scala/com/ajjpj/adiagram/geometry/Vector2Spec.scala | Scala | apache-2.0 | 1,473 |
package me.flygare.routes
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.server.Directives._
import me.flygare.utils.HttpConnection
import me.flygare.handlers._
import me.flygare.models._
import me.flygare.utils.JsonSupport._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
object MainRouter extends HttpConnection {
val personHandler = new PersonHandler
val routes =
respondWithDefaultHeaders(RawHeader("Access-Control-Allow-Origin", "*"), RawHeader("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE"), RawHeader("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")) {
pathPrefix("dblogic") {
path("persons") {
get {
parameters('rows) {
(rows) =>
complete(personHandler.getPersons(rows.toInt))
}
} ~
post {
entity(as[Person]) {
person => {
personHandler.createPerson(person)
complete(s"The person you sent were: $person")
}
}
}
}
}
}
}
| flygare/Minopt | PersonService/src/main/scala/me/flygare/routes/MainRouter.scala | Scala | mit | 1,163 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.cluster.main
import org.apache.gearpump.cluster.client.ClientContext
import org.apache.gearpump.util.MasterClientCommand
// Internal tool to restart an application
object Replay extends MasterClientCommand with ArgumentsParser {
override val options: Array[(String, CLIOption[Any])] = Array(
"appid" -> CLIOption("<application id>", required = true),
// For document purpose only, OPTION_CONFIG option is not used here.
// OPTION_CONFIG is parsed by parent shell command "Gear" transparently.
Gear.OPTION_CONFIG -> CLIOption("custom configuration file", required = false,
defaultValue = None))
override val description = "Replay the application from current min clock(low watermark)"
def main(akkaConf: Config, args: Array[String]): Unit = {
val config = parse(args)
if (null != config) {
val client = ClientContext(akkaConf)
client.replayFromTimestampWindowTrailingEdge(config.getInt("appid"))
client.close()
}
}
}
| manuzhang/incubator-gearpump | core/src/main/scala/org/apache/gearpump/cluster/main/Replay.scala | Scala | apache-2.0 | 1,817 |
//
// Copyright 2013, Martin Pokorny <martin@truffulatree.org>
//
// This Source Code Form is subject to the terms of the Mozilla Public License,
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
// obtain one at http://mozilla.org/MPL/2.0/.
//
package org.truffulatree.scampi2
import scala.language.existentials
import scala.collection.mutable
import scala.ref.WeakReference
import org.bridj.Pointer
import java.io.{File => JFile}
trait FileComponent {
mpi2: Scampi2 with Mpi2LibraryComponent =>
case class FileView(
disp: mpi2.lib.MPI_Offset,
etype: mpi2.Datatype[_],
filetype: mpi2.Datatype[_],
datarep: String)
class File(
comm: mpi2.IntraComm,
filePath: JFile,
openMode: Seq[mpi2.FileMode.FileMode],
openInfo: mpi2.Info = mpi2.InfoNull)
extends mpi2.WithErrHandler {
protected final val handlePtr: Pointer[mpi2.lib.MPI_File] = {
val result = mpi2.allocateFile()
result.set(mpi2.lib.MPI_FILE_NULL)
result
}
protected[scampi2] final def handle = handlePtr(0)
mpi2.mpiCall(
mpi2.lib.MPI_File_open(
comm.handle,
Pointer.pointerToCString(filePath.getPath).as(classOf[Byte]),
mpi2.FileMode.amode(openMode),
openInfo.handle,
handlePtr),
CommException.curried(comm))
File.register(this)
type ErrHandlerType = FileErrHandler
protected var currentErrHandler: FileErrHandler = File.defaultErrHandler
errHandler = currentErrHandler
protected def mpiSetErrhandler(errhandler: mpi2.lib.MPI_Errhandler): Int =
mpi2.lib.MPI_File_set_errhandler(handle, errhandler)
protected final val selfException = mpi2.FileException.curried(this)
protected def mpiCall(c: => Int) = mpi2.mpiCall(c, selfException)
final def close() {
if (!isNull) {
File.remove(this)
mpiCall(mpi2.lib.MPI_File_close(handlePtr))
}
}
final def isNull: Boolean = handle == mpi2.lib.MPI_FILE_NULL
def size: mpi2.lib.MPI_Offset =
withOutVar { result: Pointer[mpi2.lib.MPI_Offset] =>
mpiCall(mpi2.lib.MPI_File_get_size(handle, result))
result(0)
}
def size_=(sz: mpi2.lib.MPI_Offset) {
mpiCall(mpi2.lib.MPI_File_set_size(handle, sz))
}
def preallocate(sz: mpi2.lib.MPI_Offset) {
mpiCall(mpi2.lib.MPI_File_preallocate(handle, sz))
}
def group: mpi2.Group = withOutVar { group: Pointer[mpi2.lib.MPI_Group] =>
mpiCall(mpi2.lib.MPI_File_get_group(handle, group))
Group(group(0))
}
def amode: Seq[mpi2.FileMode.FileMode] = withOutVar { flagsp: Pointer[Int] =>
mpiCall(mpi2.lib.MPI_File_get_amode(handle, flagsp))
var result = List.empty[mpi2.FileMode.FileMode]
val flags = flagsp(0)
var mode = 1
while (flags >= mode) {
if ((flags & mode) != 0) result = mpi2.FileMode(mode) :: result
mode <<= 1
}
result
}
def info: mpi2.Info = {
val result = new mpi2.Info
mpiCall(mpi2.lib.MPI_File_get_info(handle, result.handlePtr))
result
}
def info_=(info: mpi2.Info) {
mpiCall(mpi2.lib.MPI_File_set_info(handle, info.handle))
}
def view: FileView =
withOutVar { disp: Pointer[mpi2.lib.MPI_Offset] =>
val dts = mpi2.allocateDatatype(2)
val datarep = Pointer.allocateBytes(mpi2.lib.MPI_MAX_DATAREP_STRING + 1)
try {
val etype = dts
val filetype = dts.next(1)
mpiCall(
mpi2.lib.MPI_File_get_view(
handle,
disp,
etype,
filetype,
datarep.as(classOf[Byte])))
FileView(
disp(0),
mpi2.Datatype.lookup(etype(0)),
mpi2.Datatype.lookup(filetype(0)),
datarep.getCString)
} finally {
dts.release()
datarep.release()
}
}
def view_=(view: FileView) { setView(view) }
def setView(view: FileView, info: Info = InfoNull) {
withInString(view.datarep) { datarep: Pointer[Byte] =>
mpiCall(
mpi2.lib.MPI_File_set_view(
handle,
view.disp,
view.etype.handle,
view.filetype.handle,
datarep,
info.handle))
}
}
def readAt(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadAt(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def readAtAll(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_at_all(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadAtAll(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_at_all(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeAt(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteAt(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeAtAll(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_at_all(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteAtAll(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_at_all(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def ireadAt(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iread_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def iwriteAt(
offset: mpi2.lib.MPI_Offset,
buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iwrite_at(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def read(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doRead(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def readAll(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_all(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadAll(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_all(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def write(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWrite(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeAll(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_all(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteAll(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_all(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def iread(buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iread(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def iwrite(buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iwrite(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def seek(offset: mpi2.lib.MPI_Offset, whence: mpi2.Seek.Seek) {
mpiCall(mpi2.lib.MPI_File_seek(handle, offset, whence.id))
}
def position: mpi2.lib.MPI_Offset =
withOutVar { result: Pointer[mpi2.lib.MPI_Offset] =>
mpiCall(mpi2.lib.MPI_File_get_position(handle, result))
result(0)
}
def byteOffset(offset: mpi2.lib.MPI_Offset): mpi2.lib.MPI_Offset =
withOutVar { result: Pointer[mpi2.lib.MPI_Offset] =>
mpiCall(mpi2.lib.MPI_File_get_byte_offset(handle, offset, result))
result(0)
}
def readShared(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadShared(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeShared(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteShared(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def ireadShared(buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iread_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def iwriteShared(buff: mpi2.ValueBuffer[_]): mpi2.Request = {
val result = new mpi2.Request
mpiCall(
mpi2.lib.MPI_File_iwrite_shared(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
result.handlePtr))
result
}
def readOrdered(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_ordered(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadOrdered(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_ordered(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeOrdered(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_ordered(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteOrdered(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_ordered(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle,
mpi2.lib.MPI_STATUS_IGNORE))
}
def seekShared(offset: mpi2.lib.MPI_Offset, whence: mpi2.Seek.Seek) {
mpiCall(mpi2.lib.MPI_File_seek_shared(handle, offset, whence.id))
}
def positionShared: mpi2.lib.MPI_Offset =
withOutVar { result: Pointer[mpi2.lib.MPI_Offset] =>
mpiCall(mpi2.lib.MPI_File_get_position_shared(handle, result))
result(0)
}
def readAtAllBegin(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_at_all_begin(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def readAtAllEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_at_all_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadAtAllEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_at_all_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeAtAllBegin(offset: mpi2.lib.MPI_Offset, buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_at_all_begin(
handle,
offset,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def writeAtAllEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_at_all_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteAtAllEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_at_all_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def readAllBegin(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_all_begin(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def readAllEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_all_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadAllEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_all_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeAllBegin(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_all_begin(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def writeAllEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_all_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteAllEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_all_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def readOrderedBegin(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_ordered_begin(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def readOrderedEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_read_ordered_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doReadOrderedEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_read_ordered_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def writeOrderedBegin(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_ordered_begin(
handle,
buff.pointer,
buff.valueCount,
buff.datatype.handle))
}
def writeOrderedEnd(buff: mpi2.ValueBuffer[_]): mpi2.Status = {
val status = mpi2.newStatus()
mpiCall(
mpi2.lib.MPI_File_write_ordered_end(
handle,
buff.pointer,
Pointer.pointerTo(status(0))))
new mpi2.Status(status(0))
}
def doWriteOrderedEnd(buff: mpi2.ValueBuffer[_]) {
mpiCall(
mpi2.lib.MPI_File_write_ordered_end(
handle,
buff.pointer,
mpi2.lib.MPI_STATUS_IGNORE))
}
def typeExtent(datatype: mpi2.Datatype[_]): mpi2.lib.MPI_Aint =
withOutVar { result: Pointer[mpi2.lib.MPI_Aint] =>
mpiCall(
mpi2.lib.MPI_File_get_type_extent(handle, datatype.handle, result))
result(0)
}
def atomicity: Boolean = withOutVar { result: Pointer[Int] =>
mpiCall(mpi2.lib.MPI_File_get_atomicity(handle, result))
result(0) != 0
}
def atomicity_=(state: Boolean) {
mpiCall(mpi2.lib.MPI_File_set_atomicity(handle, if (state) 1 else 0))
}
def sync() {
mpiCall(mpi2.lib.MPI_File_sync(handle))
}
}
object File {
private val files: mutable.Map[mpi2.lib.MPI_File, WeakReference[File]] =
mutable.Map.empty
def register(file: File) {
files.synchronized {
require(!file.isNull, "Registered file may have a null handle")
files(file.handle) = WeakReference(file)
}
}
protected[scampi2] def remove(file: File) {
files.synchronized { files -= file.handle }
}
def lookup(file: mpi2.lib.MPI_File): Option[File] = files.synchronized {
if (files.contains(file)) {
files(file) match {
case WeakReference(f) if !f.isNull => Some(f)
case _ => None
}
} else None
}
def delete(filePath: JFile, info: Info = mpi2.InfoNull) {
withInString(filePath.getPath) { f: Pointer[Byte] =>
mpi2.mpiCall(mpi2.lib.MPI_File_delete(f, info.handle))
}
}
private var currentErrHandler: mpi2.FileErrHandler = FileErrHandler.Return
def defaultErrHandler: mpi2.FileErrHandler = synchronized {
currentErrHandler
}
def defaultErrHandler_=(eh: mpi2.FileErrHandler) {
synchronized {
currentErrHandler = eh
mpi2.mpiCall(
mpi2.lib.MPI_File_set_errhandler(mpi2.lib.MPI_FILE_NULL, eh.handle))
}
}
}
trait FileErrHandler extends mpi2.ErrHandler
// We use Option[File] arguments in FileUserErrHandler.fn so that
// FileUserErrHandler may be a default error handler.
class FileUserErrHandler(fn: Function2[Option[File], Int, (Option[File], Int)])
extends FileErrHandler
with mpi2.UserErrHandler {
// The error handler should only be called within the context of a
// an mpiCall function.
def handleError(file: Pointer[mpi2.lib.MPI_File], err: Pointer[Int]) {
fn(File.lookup(file(0)), err(0)) match {
case (Some(newfile), code) => {
file(0) = newfile.handle
err(0) = code
}
case (None, code) =>
err(0) = code
}
}
private val errhandlerFunction =
mpi2.lib.MPI_File_errhandler_function(handleError)
mpi2.mpiCall(
mpi2.lib.MPI_File_create_errhandler(
Pointer.pointerTo(errhandlerFunction),
handlePtr))
}
object FileErrHandler {
object Abort extends FileErrHandler {
handlePtr.set(mpi2.lib.MPI_ERRORS_ARE_FATAL)
}
object Return extends FileErrHandler {
handlePtr.set(mpi2.lib.MPI_ERRORS_RETURN)
}
}
}
| mpokorny/scampi | src/main/scala/org/truffulatree/scampi2/FileComponent.scala | Scala | mpl-2.0 | 21,911 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.time
/** Used to represent the named days of a typical calendar.
*
* @author Paul Bernard
*/
object Weekday extends Enumeration {
type Weekday = Value
val SUNDAY = Value(1)
val MONDAY = Value(2)
val TUESDAY = Value(3)
val WEDNESDAY = Value(4)
val THURSDAY = Value(5)
val FRIDAY = Value(6)
val SATURDAY = Value(7)
def valueOf(weekday: Int) : Weekday = weekday match {
case 1 => Weekday.SUNDAY
case 2 => Weekday.MONDAY
case 3 => Weekday.TUESDAY
case 4 => Weekday.WEDNESDAY
case 5 => Weekday.THURSDAY
case 6 => Weekday.FRIDAY
case 7 => Weekday.SATURDAY
case _ => throw new Exception("Valid units = 1 to 7")
}
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/time/Weekday.scala | Scala | apache-2.0 | 1,433 |
package org.jetbrains.sbt
package project.template
import java.awt.FlowLayout
import java.awt.event.{ActionEvent, ActionListener}
import java.io.File
import javax.swing.border.EmptyBorder
import javax.swing.{Box, JCheckBox, JLabel, JPanel}
import com.intellij.ide.util.projectWizard.{ModuleBuilder, ModuleWizardStep, SdkSettingsStep, SettingsStep}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.externalSystem.importing.ImportSpecBuilder
import com.intellij.openapi.externalSystem.service.execution.ProgressExecutionMode
import com.intellij.openapi.externalSystem.service.project.wizard.AbstractExternalModuleBuilder
import com.intellij.openapi.externalSystem.settings.{AbstractExternalSystemSettings, ExternalSystemSettingsListener}
import com.intellij.openapi.externalSystem.util.{ExternalSystemApiUtil, ExternalSystemUtil}
import com.intellij.openapi.fileEditor.FileDocumentManager
import com.intellij.openapi.module.{JavaModuleType, ModifiableModuleModel, Module, ModuleType}
import com.intellij.openapi.projectRoots.{JavaSdk, SdkTypeId}
import com.intellij.openapi.roots.ModifiableRootModel
import com.intellij.openapi.util.Condition
import com.intellij.openapi.util.io.FileUtil._
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.LocalFileSystem
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.project.Platform.{Dotty, Scala}
import org.jetbrains.plugins.scala.project.{Platform, Version, Versions}
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.settings.SbtProjectSettings
/**
* User: Dmitry Naydanov, Pavel Fatin
* Date: 11/23/13
*/
class SbtModuleBuilder extends AbstractExternalModuleBuilder[SbtProjectSettings](SbtProjectSystem.Id, new SbtProjectSettings) {
private var sbtVersion = Versions.DefaultSbtVersion
private var scalaPlatform = Platform.Default
private var scalaVersion = Versions.DefaultScalaVersion
def getModuleType: ModuleType[_ <: ModuleBuilder] = JavaModuleType.getModuleType
override def createModule(moduleModel: ModifiableModuleModel): Module = {
val root = getModuleFileDirectory.toFile
if (root.exists) {
createProjectTemplateIn(root, getName, scalaPlatform, scalaVersion, sbtVersion)
updateModulePath()
}
super.createModule(moduleModel)
}
// TODO customize the path in UI when IDEA-122951 will be implemented
private def updateModulePath() {
val file = getModuleFilePath.toFile
val path = file.getParent + "/" + Sbt.ModulesDirectory + "/" + file.getName.toLowerCase
setModuleFilePath(path)
}
override def modifySettingsStep(settingsStep: SettingsStep): ModuleWizardStep = {
val sbtVersionComboBox = new SComboBox()
val scalaPlatformComboBox = new SComboBox()
val scalaVersionComboBox = new SComboBox()
val sbtVersions = withProgressSynchronously("Fetching SBT versions")(_ => Versions.loadSbtVersions)
sbtVersionComboBox.setItems(sbtVersions)
scalaPlatformComboBox.setItems(Platform.Values)
scalaVersionComboBox.setTextRenderer(Version.abbreviate)
def loadScalaVersions(): Array[String] = {
def platform = scalaPlatformComboBox.getSelectedItem.asInstanceOf[Platform]
withProgressSynchronously(s"Fetching ${platform.name} versions")(_ => Versions.loadScalaVersions(platform))
}
scalaVersionComboBox.setItems(loadScalaVersions())
scalaPlatformComboBox.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = {
scalaVersionComboBox.setItems(loadScalaVersions())
}
})
val resolveClassifiersCheckBox = applyTo(new JCheckBox(SbtBundle("sbt.settings.sources")))(
_.setToolTipText("Download Scala standard library sources (useful for editing the source code)")
)
val resolveSbtClassifiersCheckBox = applyTo(new JCheckBox(SbtBundle("sbt.settings.sources")))(
_.setToolTipText("Download SBT sources (useful for editing the project definition)")
)
val step = new SdkSettingsStep(settingsStep, this, new Condition[SdkTypeId] {
def value(t: SdkTypeId): Boolean = t != null && t.isInstanceOf[JavaSdk]
}) {
override def updateDataModel() {
sbtVersion = sbtVersionComboBox.getSelectedItem.asInstanceOf[String]
scalaPlatform = scalaPlatformComboBox.getSelectedItem.asInstanceOf[Platform]
scalaVersion = scalaVersionComboBox.getSelectedItem.asInstanceOf[String]
settingsStep.getContext setProjectJdk myJdkComboBox.getSelectedJdk
getExternalProjectSettings.setResolveClassifiers(resolveClassifiersCheckBox.isSelected)
getExternalProjectSettings.setResolveJavadocs(false)
getExternalProjectSettings.setResolveSbtClassifiers(resolveSbtClassifiersCheckBox.isSelected)
getExternalProjectSettings.setUseAutoImport(false)
getExternalProjectSettings.setCreateEmptyContentRootDirectories(false)
}
}
resolveClassifiersCheckBox.setSelected(true)
resolveSbtClassifiersCheckBox.setSelected(false)
val sbtVersionPanel = applyTo(new JPanel(new FlowLayout(FlowLayout.LEFT, 0, 0)))(
_.add(sbtVersionComboBox),
_.add(resolveSbtClassifiersCheckBox)
)
val scalaVersionPanel = applyTo(new JPanel(new FlowLayout(FlowLayout.LEFT, 0, 0)))(
_.setBorder(new EmptyBorder(1, 0, 0, 0)),
_.add(scalaPlatformComboBox),
_.add(Box.createHorizontalStrut(4)),
_.add(scalaVersionComboBox),
_.add(resolveClassifiersCheckBox)
)
settingsStep.addSettingsField(SbtBundle("sbt.settings.sbt"), sbtVersionPanel)
settingsStep.addSettingsField(SbtBundle("sbt.settings.scala"), scalaVersionPanel)
// TODO Remove the label patching when the External System will use the concise and proper labels natively
Option(sbtVersionPanel.getParent).foreach { parent =>
parent.getComponents.toSeq.foreachDefined {
case label: JLabel if label.getText == "Project SDK:" =>
label.setText("JDK:")
label.setDisplayedMnemonic('J')
case label: JLabel if label.getText.startsWith("Project ") && label.getText.length > 8 =>
label.setText(label.getText.substring(8) |> (s => s.substring(0, 1).toUpperCase + s.substring(1)))
}
}
step
}
private def createProjectTemplateIn(root: File, name: String, platform: Platform, scalaVersion: String, sbtVersion: String) {
val buildFile = root / Sbt.BuildFile
val projectDir = root / Sbt.ProjectDirectory
val propertiesFile = projectDir / Sbt.PropertiesFile
if (!buildFile.createNewFile() ||
!projectDir.mkdir()) return
(root / "src" / "main" / "scala").mkdirs()
(root / "src" / "test" / "scala").mkdirs()
writeToFile(buildFile, SbtModuleBuilder.formatProjectDefinition(name, platform, scalaVersion))
writeToFile(propertiesFile, SbtModuleBuilder.formatSbtProperties(sbtVersion))
}
override def getNodeIcon = Sbt.Icon
override def setupRootModel(model: ModifiableRootModel) {
val contentPath = getContentEntryPath
if (StringUtil.isEmpty(contentPath)) return
val contentRootDir = contentPath.toFile
createDirectory(contentRootDir)
val fileSystem = LocalFileSystem.getInstance
val vContentRootDir = fileSystem.refreshAndFindFileByIoFile(contentRootDir)
if (vContentRootDir == null) return
model.addContentEntry(vContentRootDir)
model.inheritSdk()
val settings =
ExternalSystemApiUtil.getSettings(model.getProject, SbtProjectSystem.Id).
asInstanceOf[AbstractExternalSystemSettings[_ <: AbstractExternalSystemSettings[_, SbtProjectSettings, _],
SbtProjectSettings, _ <: ExternalSystemSettingsListener[SbtProjectSettings]]]
val externalProjectSettings = getExternalProjectSettings
externalProjectSettings.setExternalProjectPath(getContentEntryPath)
settings.linkProject(externalProjectSettings)
if (!externalProjectSettings.isUseAutoImport) {
FileDocumentManager.getInstance.saveAllDocuments()
ApplicationManager.getApplication.invokeLater(new Runnable() {
override def run(): Unit =
ExternalSystemUtil.refreshProjects(
new ImportSpecBuilder(model.getProject, SbtProjectSystem.Id)
.forceWhenUptodate()
.use(ProgressExecutionMode.IN_BACKGROUND_ASYNC)
)
})
}
}
}
private object SbtModuleBuilder {
def formatProjectDefinition(name: String, platform: Platform, scalaVersion: String): String = platform match {
case Scala =>
s"""name := "$name"
|
|version := "1.0"
|
|scalaVersion := "$scalaVersion"
"""
.stripMargin
case Dotty =>
s"""scalaVersion := "$scalaVersion"
|
|scalaOrganization := "ch.epfl.lamp"
|
|scalaBinaryVersion := "2.11"
|
|scalaOrganization in updateSbtClassifiers := (scalaOrganization in Global).value
|
|ivyScala ~= (_ map (_ copy (overrideScalaVersion = false)))
|
|libraryDependencies += "ch.epfl.lamp" % "dotty_2.11" % scalaVersion.value % "scala-tool"
|
|scalaCompilerBridgeSource := ("ch.epfl.lamp" % "dotty-sbt-bridge" % scalaVersion.value % "component").sources()"""
.stripMargin
}
def formatSbtProperties(sbtVersion: String) = s"sbt.version = $sbtVersion"
} | ilinum/intellij-scala | src/org/jetbrains/sbt/project/template/SbtModuleBuilder.scala | Scala | apache-2.0 | 9,447 |
package debop4s.core.io
import debop4s.core.AbstractCoreFunSuite
import debop4s.core.io.model.{Company, CompanyEntity}
class PicklingSerializerFunSuite extends AbstractCoreFunSuite {
val serializer = new PicklingSerializer()
test("case class serialize") {
val com = CompanyEntity(0, "구글", "google")
val ser = serializer.serialize(com)
val converted = serializer.deserialize(ser, classOf[CompanyEntity])
converted should not be null
converted shouldEqual com
}
//
// NOTE: Scala Pickling 은 Java 수형은 지원하지 않는다. Integer, Double, Float 등!!!
//
test("class serialize") {
intercept[ArrayIndexOutOfBoundsException] {
val com = new Company()
com.code = "HCT"
com.name = "HealthConnect"
com.employeeCount = 50
val ser = serializer.serialize(com)
val converted = serializer.deserialize(ser, classOf[CompanyEntity])
converted should not be null
converted shouldEqual com
}
}
}
| debop/debop4s | debop4s-core/src/test/scala/debop4s/core/io/PicklingSerializerFunSuite.scala | Scala | apache-2.0 | 992 |
package fpinscala.gettingstarted
// A comment!
/* Another comment */
/** A documentation comment */
object MyModule {
def abs(n: Int): Int =
if (n < 0) -n
else n
private def formatAbs(x: Int) = {
val msg = "The absolute value of %d is %d"
msg.format(x, abs(x))
}
def main1(args: Array[String]): Unit =
println(formatAbs(-42))
// A definition of factorial, using a local, tail recursive function
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if (n <= 0) acc
else go(n-1, n*acc)
go(n, 1)
}
// Another implementation of `factorial`, this time with a `while` loop
def factorial2(n: Int): Int = {
var acc = 1
var i = n
while (i > 0) { acc *= i; i -= 1 }
acc
}
// Exercise 1: Write a function to compute the nth fibonacci number
def fib(n: Int): Int = {
@annotation.tailrec
def go(b1: Int, b2: Int, p: Int): Int =
if (p == 0) b1
else go(b2, b1 + b2, p - 1)
go(0, 1, n)
}
def main(args: Array[String]) {
println(fib(0))
println(fib(1))
println(fib(2))
println(fib(3))
println(fib(4))
println(fib(5))
println(fib(6))
println(fib(7))
println(fib(8))
println(fib(9))
println(fib(10))
println(fib(11))
println(fib(12))
println(fib(13))
println(fib(14))
}
// This definition and `formatAbs` are very similar..
private def formatFactorial(n: Int) = {
val msg = "The absolute value of %d is %d."
msg.format(n, factorial(n))
}
// We can generalize `formatAbs` and `formatFactorial` to
// accept a _function_ as a parameter
def formatResult(name: String, n: Int, f: Int => Int) = {
val msg = "The %s of %d is %d."
msg.format(name, n, f(n))
}
}
object FormatAbsAndFactorial {
import MyModule._
// Now we can use our general `formatResult` function
// with both `abs` and `factorial`
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
}
}
// Functions get passed around so often in FP that it's
// convenient to have syntax for constructing a function
// *without* having to give it a name
object AnonymousFunctions {
import MyModule._
// Some examples of anonymous functions:
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
println(formatResult("increment", 7, (x: Int) => x + 1))
println(formatResult("increment2", 7, (x) => x + 1))
println(formatResult("increment3", 7, x => x + 1))
println(formatResult("increment4", 7, _ + 1))
println(formatResult("increment5", 7, x => { val r = x + 1; r }))
}
}
object MonomorphicBinarySearch {
// First, a binary search implementation, specialized to `Double`,
// another primitive type in Scala, representing 64-bit floating
// point numbers
// Ideally, we could generalize this to work for any `Array` type,
// so long as we have some way of comparing elements of the `Array`
def binarySearch(ds: Array[Double], key: Double): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val d = ds(mid2) // We index into an array using the same
// syntax as function application
if (d == key) mid2
else if (d > key) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, ds.length - 1)
}
}
object PolymorphicFunctions {
// Here's a polymorphic version of `binarySearch`, parameterized on
// a function for testing whether an `A` is greater than another `A`.
def binarySearch[A](as: Array[A], key: A, gt: (A,A) => Boolean): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val a = as(mid2)
val greater = gt(a, key)
if (!greater && !gt(key,a)) mid2
else if (greater) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, as.length - 1)
}
// Exercise 2: Implement a polymorphic function to check whether
// an `Array[A]` is sorted
def isSorted[A](as: Array[A], gt: (A,A) => Boolean): Boolean = {
@annotation.tailrec
def go(n: Int): Boolean = {
if (n >= as.length) true
else if (! gt(as(n),as(n-1))) false
else go(n+1)
}
go(1)
}
def main(args: Array[String]) {
println(isSorted(Array(0,1,4,6,30,34), (a: Int,b: Int) => a > b))
}
// Polymorphic functions are often so const cvxycyvrained by their type
// that they only have one implementation! Here's an example:
// Exercise 3: Implement `partial1`.
def partial1[A,B,C](a: A, f: (A,B) => C): B => C =
(b:B) => f(a, b)
val t = partial1(1, (a:Int,b:Int) => a + b)
val x = t(2)
println(x)
// Exercise 4: Implement `curry`.
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
def curry[A,B,C](f: (A, B) => C): A => (B => C) = {
//a => (b => f(a, b))
(a:A) => (b:B) => f(a,b)
}
// NB: The `Function2` trait has a `curried` method already
// Exercise 5: Implement `uncurry`
def uncurry[A,B,C](f: A => B => C): (A, B) => C = {
(a, b) => f(a)(b)
}
/*
NB: There is a method on the `Function` object in the standard library,
`Function.uncurried` that you can use for uncurrying.
Note that we can go back and forth between the two forms. We can curry
and uncurry and the two forms are in some sense "the same". In FP jargon,
we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
a term we inherit from category theory.
*/
// Exercise 6: Implement `compose`
def compose[A,B,C](f: B => C, g: A => B): A => C =
a => f(g(a))
case class Message(value: String){
}
case class Endpoint(prompt: String){
def send(m: Message) {
println(this.prompt + " " + m.value)
}
}
def route(m:Message): (Endpoint) => Unit =
{
(e: Endpoint) => e.send(m)
}
}
| fpinscala-muc/fpinscala-mhofsche | exercises/src/main/scala/fpinscala/gettingstarted/GettingStarted.scala | Scala | mit | 6,208 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.loss
import org.apache.spark.annotation.{DeveloperApi, Since}
/**
* :: DeveloperApi ::
* Class for absolute error loss calculation (for regression).
*
* The absolute (L1) error is defined as:
* |y - F(x)|
* where y is the label and F(x) is the model prediction for features x.
*/
@Since("1.2.0")
@DeveloperApi
object AbsoluteError extends Loss {
/**
* Method to calculate the gradients for the gradient boosting calculation for least
* absolute error calculation.
* The gradient with respect to F(x) is: sign(F(x) - y)
* @param prediction Predicted label.
* @param label True label.
* @return Loss gradient
*/
@Since("1.2.0")
override def gradient(prediction: Double, label: Double): Double = {
if (label - prediction < 0) 1.0 else -1.0
}
override private[spark] def computeError(prediction: Double, label: Double): Double = {
val err = label - prediction
math.abs(err)
}
}
| xieguobin/Spark_2.0.0_cn1 | mllib/tree/loss/AbsoluteError.scala | Scala | apache-2.0 | 1,770 |
package org.aja.tantra.examples.concurrency.threads
/**
* Created by mageswaran on 27/3/16.
*/
//Url: http://www.ibm.com/developerworks/library/j-jvmc1/index.html
import java.net.{Socket, ServerSocket}
import java.util.concurrent.{Executors, ExecutorService}
import java.util.Date
class NetworkService(port: Int, poolSize: Int) extends Runnable {
val serverSocket = new ServerSocket(port)
val pool: ExecutorService = Executors.newFixedThreadPool(poolSize)
def run() {
try {
while (true) {
// This will block until a connection comes in.
val socket = serverSocket.accept()
pool.execute(new Handler(socket))
}
} finally {
pool.shutdown()
}
}
}
class Handler(socket: Socket) extends Runnable {
def message = (Thread.currentThread.getName() + "\\n").getBytes
def run() {
socket.getOutputStream.write(message)
socket.getOutputStream.close()
}
}
object ExecutorsExamples extends App {
(new NetworkService(2020, 2)).run
}
| Mageswaran1989/aja | src/examples/scala/org/aja/tantra/examples/concurrency/threads/ExecutorsExamples.scala | Scala | apache-2.0 | 1,002 |
package uk.gov.digital.ho.proving.financialstatus.audit
import java.text.SimpleDateFormat
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.SerializationFeature
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.bson.Document
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.boot.actuate.audit.AuditEvent
trait LoggingAuditEventBsonMapper extends NewLineRemover {
private val LOGGER: Logger = LoggerFactory.getLogger(classOf[LoggingAuditEventBsonMapper])
val AUDIT_EVENT_LOG_MARKER: String = "AUDIT"
private val mapper: ObjectMapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
mapper.registerModule(new JavaTimeModule())
mapper.setDateFormat(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ"))
mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS)
mapper.enable(SerializationFeature.INDENT_OUTPUT)
private def jsonOf(event: AuditEvent): String = mapper.writeValueAsString(event)
def bsonOf(event: AuditEvent): Document = {
val json = jsonOf(event)
val jsonOnOneLine = removeNewlines(json)
LOGGER.info(s"$AUDIT_EVENT_LOG_MARKER: $jsonOnOneLine")
Document.parse(json)
}
}
trait NewLineRemover {
def removeNewlines(originalString: String): String = originalString.replaceAll("\\\\r\\\\n|\\\\r|\\\\n", " ")
}
| UKHomeOffice/pttg-fs-api | src/main/scala/uk/gov/digital/ho/proving/financialstatus/audit/LoggingAuditEventBsonMapper.scala | Scala | mit | 1,423 |
/*
* Copyright (c) 2013 Scott Abernethy.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package util
object FileUtil {
def abbr(in: String): String = {
val pathPart = in.lastIndexOf('/')
val filePart = if (pathPart > 0) in.substring(pathPart + 1) else in
filePart.take(3).toString.toLowerCase
}
// val zeroWidthSpace = "​"
val zeroWidthSpace = " "
def splitable(in: String): String = {
//in.replaceAll("\\\\.", "." + zeroWidthSpace).replaceAll("_", "_" + zeroWidthSpace).replaceAll("/", "/" + zeroWidthSpace)
in
}
}
| scott-abernethy/opener-of-the-way | app/util/FileUtil.scala | Scala | gpl-3.0 | 1,164 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait ScalaMainClassesResultFormats { self: sbt.internal.bsp.codec.ScalaMainClassesItemFormats with sjsonnew.BasicJsonProtocol =>
implicit lazy val ScalaMainClassesResultFormat: JsonFormat[sbt.internal.bsp.ScalaMainClassesResult] = new JsonFormat[sbt.internal.bsp.ScalaMainClassesResult] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.internal.bsp.ScalaMainClassesResult = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val items = unbuilder.readField[Vector[sbt.internal.bsp.ScalaMainClassesItem]]("items")
val originId = unbuilder.readField[Option[String]]("originId")
unbuilder.endObject()
sbt.internal.bsp.ScalaMainClassesResult(items, originId)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.internal.bsp.ScalaMainClassesResult, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("items", obj.items)
builder.addField("originId", obj.originId)
builder.endObject()
}
}
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/internal/bsp/codec/ScalaMainClassesResultFormats.scala | Scala | apache-2.0 | 1,309 |
package scala.implicits
/** A special class used to implement negation in implicit search.
*
* Consider the problem of using implicit `i1` for a query type `D` if an implicit
* for some other class `C` is available, and using an implicit `i2` if no implicit
* value of type `C` is available. If we do not want to prioritize `i1` and `i2` by
* putting them in different traits we can instead define the following:
*
* given i1: D(using ev: C) = ...
* given i2: D(using ev: Not[C]) = ...
*
* `Not` is treated specially in implicit search, similar to the way logical negation
* is treated in Prolog: The implicit search for `Not[C]` succeeds if and only if the implicit
* search for `C` fails.
*
* In Scala 2 this form of negation can be simulated by setting up a conditional
* ambiguous implicit and an unconditional fallback, the way it is done with the
* `default`, `amb1` and `amb2` methods below. Due to the way these two methods are
* defined, `Not` is also usable from Scala 2.
*
* In Dotty, ambiguity is a global error, and therefore cannot be used to implement negation.
* Instead, `Not` is treated natively in implicit search.
*/
final class Not[+T] private ()
trait LowPriorityNot {
/** A fallback method used to emulate negation in Scala 2 */
given default[T] as Not[T] = Not.value
}
object Not extends LowPriorityNot {
/** A value of type `Not` to signal a successful search for `Not[C]` (i.e. a failing
* search for `C`). A reference to this value will be explicitly constructed by Dotty's
* implicit search algorithm
*/
def value: Not[Nothing] = new Not[Nothing]()
/** One of two ambiguous methods used to emulate negation in Scala 2 */
given amb1[T](using ev: T) as Not[T] = ???
/** One of two ambiguous methods used to emulate negation in Scala 2 */
given amb2[T](using ev: T) as Not[T] = ???
}
| som-snytt/dotty | library/src/scala/implicits/Not.scala | Scala | apache-2.0 | 1,884 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hbase.examples
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Result, Scan, HTable}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.hbase.KeyColumn
import org.apache.spark.sql.hbase.util.{BinaryBytesUtils, HBaseKVHelper}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType}
/**
* This example explains how to write data to Hbase table for Astro backend schema.
* For example the following table is created in Astro with following schema.
* CREATE TABLE teacher(grade int, class int, subject string, teacher_name string,
* teacher_age int, PRIMARY KEY (grade, class, subject))
* MAPPED BY (hbase_teacher, COLS=[teacher_name=teachercf.name, teacher_age=teachercf.age]);
* Astro uses its own encode and decode of keys, so we should Astro APIs to convert them
*/
object ReadDataFromHbaseWithAstroSchema {
def main(args: Array[String]): Unit = {
val config = HBaseConfiguration.create
val table = new HTable(config, "hbase_teacher")
val scan = new Scan();
// Scanning the required columns
scan.addColumn(Bytes.toBytes("teachercf"), Bytes.toBytes("name"));
scan.addColumn(Bytes.toBytes("teachercf"), Bytes.toBytes("age"))
// Getting the scan result
val scanner = table.getScanner(scan);
// Reading values from scan result
var result = scanner.next()
while (result != null) {
//Get the row key
val rowKey = result.getRow
//Decode the row keys.The columns which are specified in PRIMARY KEY
val keys = HBaseKVHelper.decodingRawKeyColumns(rowKey,
Seq(KeyColumn("grade", IntegerType, 0),
KeyColumn("class", IntegerType, 1),
KeyColumn("subject", StringType, 2)))
val grade = BinaryBytesUtils.toInt(rowKey, keys(0)._1)
val _class = BinaryBytesUtils.toInt(rowKey, keys(1)._1)
val subject = BinaryBytesUtils.toUTF8String(rowKey, keys(2)._1, keys(2)._2)
//Decode column keys
val nameKey = result.getValue(Bytes.toBytes("teachercf"), Bytes.toBytes("name"))
val name = BinaryBytesUtils.toUTF8String(nameKey, 0, nameKey.length)
val age = BinaryBytesUtils.toInt(result.getValue(Bytes.toBytes("teachercf"), Bytes.toBytes("age")), 0)
//print the results
println("grade: "+grade+"|class: "+_class+"|subject: "+subject +"|name: "+name+"|age: "+age)
result = scanner.next()
}
}
}
| jackylk/astro | examples/src/main/scala/org/apache/spark/sql/hbase/examples/ReadDataFromHbaseWithAstroSchema.scala | Scala | apache-2.0 | 3,297 |
package com.equalinformation.poc.akka.scala.track1.tellaskforward
import akka.actor
import akka.actor.Actor
/**
* Created by bpupadhyaya on 2/12/16.
*/
class SampleForwardActor extends Actor {
def receive = {
case message: String => {
// actor.forward(message)
}
}
}
| bpupadhyaya/AkkaPOC | src/main/scala/com/equalinformation/poc/akka/scala/track1/tellaskforward/SampleForwardActor.scala | Scala | mit | 292 |
package user
case class LoginDetails (
email: String,
password: String
) | AntonFagerberg/ScalaComic | app/user/LoginDetails.scala | Scala | mit | 77 |
package com.twitter.finatra.validation
import com.twitter.finatra.conversions.time._
import com.twitter.finatra.validation.ValidationResult._
import org.joda.time.DateTime
object CommonMethodValidations {
def validateTimeRange(
startTime: Option[DateTime],
endTime: Option[DateTime],
startTimeProperty: String,
endTimeProperty: String): ValidationResult = {
val rangeDefined = startTime.isDefined && endTime.isDefined
val partialRange = !rangeDefined && (startTime.isDefined || endTime.isDefined)
if (rangeDefined)
validateTimeRange(startTime.get, endTime.get, startTimeProperty, endTimeProperty)
else if (partialRange)
Invalid(
"both %s and %s are required for a valid range".format(
startTimeProperty,
endTimeProperty))
else
Valid
}
def validateTimeRange(
startTime: DateTime,
endTime: DateTime,
startTimeProperty: String,
endTimeProperty: String): ValidationResult = {
ValidationResult(startTime < endTime,
"%s [%s] must be after %s [%s]".format(
endTimeProperty,
endTime.utcIso8601,
startTimeProperty,
startTime.utcIso8601))
}
} | nkhuyu/finatra | jackson/src/main/scala/com/twitter/finatra/validation/CommonMethodValidations.scala | Scala | apache-2.0 | 1,187 |
package mrtjp.core.inventory
import net.minecraft.inventory.{IInventory, ISidedInventory}
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.Direction
import net.minecraftforge.common.capabilities.Capability
import net.minecraftforge.common.util.LazyOptional
import net.minecraftforge.items.CapabilityItemHandler._
import net.minecraftforge.items.IItemHandler
import net.minecraftforge.items.wrapper.{EmptyHandler, SidedInvWrapper, InvWrapper => MCFInvWrapper}
import java.util.{HashMap => JHashMap}
trait TInventoryCapablilityTile extends TileEntity with IInventory {
private val globalCap: LazyOptional[IItemHandler] = LazyOptional.of(() => new MCFInvWrapper(this))
private val sideMap = new JHashMap[Direction, LazyOptional[_]]()
override def getCapability[T](cap: Capability[T], side: Direction): LazyOptional[T] =
if (cap == ITEM_HANDLER_CAPABILITY) {
if (side == null) {
return globalCap.cast()
}
sideMap.computeIfAbsent(side, s => {
LazyOptional.of(() => this match {
case inv: ISidedInventory =>
new SidedInvWrapper(inv, side)
case _ =>
EmptyHandler.INSTANCE
})
}).cast()
} else {
super.getCapability(cap, side)
}
}
| MrTJP/MrTJPCore | src/main/scala/mrtjp/core/inventory/TInventoryCapablilityTile.scala | Scala | lgpl-3.0 | 1,380 |
package isabelle.eclipse.ui.preferences
import isabelle.Outer_Syntax
import isabelle.eclipse.ui.editors.IsabellePartitions
/** Definitions of Isabelle syntax classes and translations from
* Isabelle markup/tokens/etc (objects below).
*
* @author Andrius Velykis
*/
object IsabelleSyntaxClasses {
val DEFAULT = IsabelleSyntaxClass("Others", "syntax.default")
val UNDEFINED = IsabelleSyntaxClass("Undefined", "syntax.undef")
val COMMENT = IsabelleSyntaxClass("Comments", "syntax.comment")
val INNER_COMMENT = IsabelleSyntaxClass("Inner Comment", "syntax.innerComment")
val VERBATIM = IsabelleSyntaxClass("Verbatim", "syntax.verbatim")
val STRING = IsabelleSyntaxClass("Strings", "syntax.string")
val INNER_STRING = IsabelleSyntaxClass("Inner Strings", "syntax.innerString")
val KEYWORD = IsabelleSyntaxClass("Keywords", "syntax.keyword")
val KEYWORD2 = IsabelleSyntaxClass("Keywords 2", "syntax.keyword2")
val OPERATOR = IsabelleSyntaxClass("Operators", "syntax.op")
val LITERAL = IsabelleSyntaxClass("Literals", "syntax.literal")
val DELIMITER = IsabelleSyntaxClass("Delimiters", "syntax.delimiter")
val TYPE = IsabelleSyntaxClass("Type Variables", "syntax.typevar")
val FREE = IsabelleSyntaxClass("Free Variables", "syntax.free")
val SKOLEM = IsabelleSyntaxClass("Skolem Variables", "syntax.skolem")
val BOUND = IsabelleSyntaxClass("Bound Variables", "syntax.bound")
val VAR = IsabelleSyntaxClass("Variables", "syntax.var")
val DYN_FACT = IsabelleSyntaxClass("Dynamic Facts", "syntax.dynFact")
val ANTIQ = IsabelleSyntaxClass("Antiquotations", "syntax.antiq")
// val CLASS = IsabelleSyntaxClass("Class Entities", "syntax.entity.class")
val DOCUMENT_SOURCE = IsabelleSyntaxClass("Document Source", "syntax.docSource")
val CMD = IsabelleSyntaxClass("Proof Commands", "syntax.cmd.cmd")
val CMD_SCRIPT = IsabelleSyntaxClass("Proof Script Commands", "syntax.cmd.script")
val CMD_GOAL = IsabelleSyntaxClass("Structured Proof Commands", "syntax.cmd.goal")
val ML_SOURCE = IsabelleSyntaxClass("ML Source", "syntax.ml.source")
val ML_KEYWORD = IsabelleSyntaxClass("ML Keywords", "syntax.ml.keyword")
val ML_NUMERAL = IsabelleSyntaxClass("ML Numerals", "syntax.ml.num")
val ML_STRING = IsabelleSyntaxClass("ML Strings", "syntax.ml.string")
val ML_COMMENT = IsabelleSyntaxClass("ML Comments", "syntax.ml.comment")
val ACTIVE = IsabelleSyntaxClass("Isabelle Action Links", "syntax.active")
val DIALOG_SELECTED = IsabelleSyntaxClass("Isabelle Selected Dialog", "syntax.dialog.selected")
case class Category(name: String, children: List[IsabelleSyntaxClass])
val isabelleCategory = Category("Isabelle", List(
STRING, INNER_STRING,
KEYWORD, KEYWORD2,
OPERATOR, LITERAL, DELIMITER, TYPE, FREE, SKOLEM, BOUND, VAR, DYN_FACT, ANTIQ,
CMD, CMD_SCRIPT, CMD_GOAL,
DEFAULT))
val mlCategory = Category("ML", List(
ML_SOURCE, ML_KEYWORD, ML_NUMERAL, ML_STRING, ML_COMMENT))
val commentsCategory = Category("Comments", List(
COMMENT, INNER_COMMENT, VERBATIM, DOCUMENT_SOURCE))
val actionsCategory = Category("Actions", List(
ACTIVE, DIALOG_SELECTED))
val categories = List(isabelleCategory, mlCategory, commentsCategory, actionsCategory)
val ALL_SYNTAX_CLASSES = (categories map (_.children)).flatten.distinct
val COLOR_SUFFIX = ".color"
val COLOR_ENABLED_SUFFIX = ".colorEnabled"
val BACKGROUND_COLOR_SUFFIX = ".bgColor"
val BACKGROUND_COLOR_ENABLED_SUFFIX = ".bgColorEnabled"
val BOLD_SUFFIX = ".bold"
val ITALIC_SUFFIX = ".italic"
val STRIKETHROUGH_SUFFIX = ".strikethrough"
val UNDERLINE_SUFFIX = ".underline"
val UNDERLINE_STYLE_SUFFIX = ".underlineStyle"
}
object IsabellePartitionToSyntaxClass {
import IsabellePartitions._
def apply(partition: String): IsabelleSyntaxClass = partition match {
case ISABELLE_COMMENT => IsabelleSyntaxClasses.COMMENT
case ISABELLE_VERBATIM => IsabelleSyntaxClasses.VERBATIM
case ISABELLE_STRING | ISABELLE_ALTSTRING => IsabelleSyntaxClasses.STRING
case ISABELLE_KEYWORD => IsabelleSyntaxClasses.KEYWORD
case _ => IsabelleSyntaxClasses.UNDEFINED
}
}
object IsabelleTokenToSyntaxClass {
import isabelle.Token.Kind._
import isabelle.Keyword._
val commandClasses: Map[String, IsabelleSyntaxClass] = Map(
THY_END -> IsabelleSyntaxClasses.KEYWORD2,
THY_SCRIPT -> IsabelleSyntaxClasses.CMD_SCRIPT,
PRF_SCRIPT -> IsabelleSyntaxClasses.CMD_SCRIPT,
PRF_ASM -> IsabelleSyntaxClasses.CMD_GOAL,
PRF_ASM_GOAL -> IsabelleSyntaxClasses.CMD_GOAL
).withDefaultValue(IsabelleSyntaxClasses.CMD)
val tokenClasses: Map[Value, IsabelleSyntaxClass] = Map(
KEYWORD -> IsabelleSyntaxClasses.KEYWORD2,
// IDENT -> IsabelleSyntaxClasses.VAR,
// LONG_IDENT -> IsabelleSyntaxClasses.VAR,
// SYM_IDENT -> IsabelleSyntaxClasses.VAR,
// VAR -> IsabelleSyntaxClasses.VAR,
// TYPE_IDENT -> IsabelleSyntaxClasses.TYPE,
// TYPE_VAR -> IsabelleSyntaxClasses.TYPE,
NAT -> IsabelleSyntaxClasses.UNDEFINED,
FLOAT -> IsabelleSyntaxClasses.UNDEFINED,
STRING -> IsabelleSyntaxClasses.STRING,
ALT_STRING -> IsabelleSyntaxClasses.STRING,
VERBATIM -> IsabelleSyntaxClasses.VERBATIM,
SPACE -> IsabelleSyntaxClasses.UNDEFINED,
COMMENT -> IsabelleSyntaxClasses.COMMENT//,
// ERROR -> IsabelleSyntaxClasses.UNDEFINED
).withDefaultValue(IsabelleSyntaxClasses.UNDEFINED)
def apply(syntax: Outer_Syntax, token: isabelle.Token): IsabelleSyntaxClass =
if (token.is_command) commandClasses(syntax.keyword_kind(token.content).getOrElse(""))
else if (token.is_operator) IsabelleSyntaxClasses.OPERATOR
else tokenClasses(token.kind)
}
object IsabelleMarkupToSyntaxClass {
val DIALOG_SELECTED = "dialog-selected"
import isabelle.Markup._
val markupClasses: Map[String, IsabelleSyntaxClass] = Map(
KEYWORD1 -> IsabelleSyntaxClasses.KEYWORD,
KEYWORD2 -> IsabelleSyntaxClasses.KEYWORD2,
LITERAL -> IsabelleSyntaxClasses.LITERAL,
DELIMITER -> IsabelleSyntaxClasses.DELIMITER,
TFREE -> IsabelleSyntaxClasses.TYPE,
TVAR -> IsabelleSyntaxClasses.TYPE,
FREE -> IsabelleSyntaxClasses.FREE,
SKOLEM -> IsabelleSyntaxClasses.SKOLEM,
BOUND -> IsabelleSyntaxClasses.BOUND,
VAR -> IsabelleSyntaxClasses.VAR,
INNER_STRING -> IsabelleSyntaxClasses.INNER_STRING,
INNER_COMMENT -> IsabelleSyntaxClasses.INNER_COMMENT,
DYNAMIC_FACT -> IsabelleSyntaxClasses.DYN_FACT,
ANTIQ -> IsabelleSyntaxClasses.ANTIQ,
ML_KEYWORD -> IsabelleSyntaxClasses.ML_KEYWORD,
ML_DELIMITER -> IsabelleSyntaxClasses.DELIMITER,
ML_NUMERAL -> IsabelleSyntaxClasses.ML_NUMERAL,
ML_CHAR -> IsabelleSyntaxClasses.ML_STRING,
ML_STRING -> IsabelleSyntaxClasses.ML_STRING,
ML_COMMENT -> IsabelleSyntaxClasses.ML_COMMENT
)
val actionMarkupClasses: Map[String, IsabelleSyntaxClass] = Map(
BROWSER -> IsabelleSyntaxClasses.ACTIVE,
GRAPHVIEW -> IsabelleSyntaxClasses.ACTIVE,
SENDBACK -> IsabelleSyntaxClasses.ACTIVE,
DIALOG -> IsabelleSyntaxClasses.ACTIVE,
DIALOG_SELECTED -> IsabelleSyntaxClasses.DIALOG_SELECTED
)
val sourceMarkupClasses: Map[String, IsabelleSyntaxClass] = Map(
// VERBATIM -> IsabelleSyntaxClasses.VERBATIM,
// STRING -> IsabelleSyntaxClasses.STRING,
// ALTSTRING -> IsabelleSyntaxClasses.STRING,
// `doc_source` is used in Isabelle2013, will change to `DOCUMENT_SOURCE` in the next release.
// see http://isabelle.in.tum.de/repos/isabelle/rev/e09446d3caca
"doc_source" -> IsabelleSyntaxClasses.DOCUMENT_SOURCE,
DOCUMENT_SOURCE -> IsabelleSyntaxClasses.DOCUMENT_SOURCE,
ML_SOURCE -> IsabelleSyntaxClasses.ML_SOURCE
)
val allMarkupClasses: Map[String, IsabelleSyntaxClass] =
(markupClasses ++ sourceMarkupClasses ++ actionMarkupClasses).
withDefaultValue(IsabelleSyntaxClasses.UNDEFINED)
def apply(markupType: String): IsabelleSyntaxClass = allMarkupClasses(markupType)
}
| andriusvelykis/isabelle-eclipse | isabelle.eclipse.ui/src/isabelle/eclipse/ui/preferences/IsabelleSyntaxClasses.scala | Scala | epl-1.0 | 7,960 |
package org.ensime.model
import org.ensime.api._
import org.ensime.util.file._
import org.apache.commons.vfs2.FileObject
import org.ensime.core.RichPresentationCompiler
import org.ensime.indexer.DatabaseService._
import org.ensime.indexer.EnsimeVFS
import scala.collection.mutable
import scala.reflect.internal.util.{ NoPosition, Position, RangePosition }
import scala.tools.nsc.io.AbstractFile
trait ModelBuilders { self: RichPresentationCompiler =>
import rootMirror.RootPackage
private val typeCache = new mutable.HashMap[Int, Type]
private val typeCacheReverse = new mutable.HashMap[Type, Int]
def clearTypeCache(): Unit = {
typeCache.clear()
typeCacheReverse.clear()
}
def typeById(id: Int): Option[Type] = {
typeCache.get(id)
}
def cacheType(tpe: Type): Int = {
if (typeCacheReverse.contains(tpe)) {
typeCacheReverse(tpe)
} else {
val id = typeCache.size + 1
typeCache(id) = tpe
typeCacheReverse(tpe) = id
id
}
}
def locateSymbolPos(sym: Symbol, needPos: PosNeeded): Option[SourcePosition] = {
_locateSymbolPos(sym, needPos).orElse({
logger.debug(s"search $sym: Try Companion")
sym.companionSymbol match {
case NoSymbol => None
case s: Symbol => _locateSymbolPos(s, needPos)
}
})
}
def _locateSymbolPos(sym: Symbol, needPos: PosNeeded): Option[SourcePosition] = {
if (sym == NoSymbol || needPos == PosNeededNo)
None
else if (sym.pos != NoPosition) {
if (needPos == PosNeededYes || needPos == PosNeededAvail) {
OffsetSourcePositionHelper.fromPosition(sym.pos)
} else
Some(EmptySourcePosition())
} else {
// only perform operations is actively requested - this is comparatively expensive
if (needPos == PosNeededYes) {
// we might need this for some Java fqns but we need some evidence
// val name = genASM.jsymbol(sym).fullName
val name = symbolIndexerName(sym)
val hit = search.findUnique(name)
logger.debug(s"search: $name = $hit")
hit.flatMap(LineSourcePositionHelper.fromFqnSymbol(_)(config, vfs)).flatMap { sourcePos =>
if (sourcePos.file.getName.endsWith(".scala"))
askLinkPos(sym, AbstractFile.getFile(sourcePos.file)).
flatMap(pos => OffsetSourcePositionHelper.fromPosition(pos))
else
Some(sourcePos)
}
} else
None
}
}
// When inspecting a type, transform a raw list of TypeMembers to a sorted
// list of InterfaceInfo objects, each with its own list of sorted member infos.
def prepareSortedInterfaceInfo(members: Iterable[Member], parents: Iterable[Type]): Iterable[InterfaceInfo] = {
// ...filtering out non-visible and non-type members
val visMembers: Iterable[TypeMember] = members.flatMap {
case m @ TypeMember(sym, tpe, true, _, _) => List(m)
case _ => List.empty
}
val parentMap = parents.map(_.typeSymbol -> List[TypeMember]()).toMap
val membersMap = visMembers.groupBy {
case TypeMember(sym, _, _, _, _) => sym.owner
}
// Create a list of pairs [(typeSym, membersOfSym)]
val membersByOwner = (parentMap ++ membersMap).toList.sortWith {
// Sort the pairs on the subtype relation
case ((s1, _), (s2, _)) => s1.tpe <:< s2.tpe
}
membersByOwner.map {
case (ownerSym, members) =>
// If all the members in this interface were
// provided by the same view, remember that
// view for later display to user.
val byView = members.groupBy(_.viaView)
val viaView = if (byView.size == 1) {
byView.keys.headOption.filter(_ != NoSymbol)
} else { None }
// Do one top level sort by name on members, before
// subdividing into kinds of members.
val sortedMembers = members.toList.sortWith { (a, b) =>
a.sym.nameString <= b.sym.nameString
}
// Convert type members into NamedTypeMemberInfos
// and divide into different kinds..
val nestedTypes = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val constructors = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val fields = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
val methods = new mutable.ArrayBuffer[NamedTypeMemberInfo]()
for (tm <- sortedMembers) {
val info = NamedTypeMemberInfo(tm)
val decl = info.declAs
if (decl == DeclaredAs.Method) {
if (info.name == "this") {
constructors += info
} else {
methods += info
}
} else if (decl == DeclaredAs.Field) {
fields += info
} else if (decl == DeclaredAs.Class || decl == DeclaredAs.Trait ||
decl == DeclaredAs.Interface || decl == DeclaredAs.Object) {
nestedTypes += info
}
}
val sortedInfos = nestedTypes ++ fields ++ constructors ++ methods
new InterfaceInfo(TypeInfo(ownerSym.tpe, PosNeededAvail, sortedInfos), viaView.map(_.name.toString))
}
}
object PackageInfo {
def root: PackageInfo = fromSymbol(RootPackage)
def fromPath(path: String): PackageInfo = {
val pack = packageSymFromPath(path)
pack match {
case Some(packSym) => fromSymbol(packSym)
case None => nullInfo
}
}
// TODO THIS SHOULD NOT EXIST
val nullInfo = new PackageInfo("NA", "NA", List.empty)
private def sortedMembers(items: Iterable[EntityInfo]) = {
items.toList.sortBy(_.name)
}
def fromSymbol(sym: Symbol): PackageInfo = {
val members = sortedMembers(packageMembers(sym).flatMap(packageMemberInfoFromSym))
if (sym.isRoot || sym.isRootPackage) {
new PackageInfo("root", "_root_", members)
} else {
new PackageInfo(sym.name.toString, sym.fullName, members)
}
}
def packageMemberInfoFromSym(sym: Symbol): Option[EntityInfo] = {
try {
if (sym == RootPackage) {
Some(root)
} else if (sym.hasPackageFlag) {
Some(fromSymbol(sym))
} else if (!sym.nameString.contains("$") && (sym != NoSymbol) && (sym.tpe != NoType)) {
if (sym.isClass || sym.isTrait || sym.isModule ||
sym.isModuleClass || sym.isPackageClass) {
Some(TypeInfo(sym.tpe, PosNeededAvail))
} else {
None
}
} else {
None
}
} catch {
case e: Throwable => None
}
}
}
object TypeInfo {
// use needPos=PosNeededYes sparingly as it potentially causes lots of I/O
def apply(typ: Type, needPos: PosNeeded = PosNeededNo, members: Iterable[EntityInfo] = List.empty): TypeInfo = {
val tpe = typ match {
// TODO: Instead of throwing away this information, would be better to
// alert the user that the type is existentially quantified.
case et: ExistentialType => et.underlying
case t => t
}
def basicTypeInfo(tpe: Type): BasicTypeInfo = {
val typeSym = tpe.typeSymbol
val symbolToLocate = if (typeSym.isModuleClass) typeSym.sourceModule else typeSym
val symPos = locateSymbolPos(symbolToLocate, needPos)
val outerTypeId = outerClass(typeSym).map(s => cacheType(s.tpe))
new BasicTypeInfo(
typeShortName(tpe),
cacheType(tpe),
declaredAs(typeSym),
typeFullName(tpe),
tpe.typeArgs.map(TypeInfo(_)),
members,
symPos,
outerTypeId
)
}
tpe match {
case tpe: MethodType => ArrowTypeInfo(tpe)
case tpe: PolyType => ArrowTypeInfo(tpe)
case tpe: NullaryMethodType => basicTypeInfo(tpe.resultType)
case tpe: Type => basicTypeInfo(tpe)
case _ => nullInfo
}
}
def nullInfo = {
new BasicTypeInfo("NA", -1, DeclaredAs.Nil, "NA", List.empty, List.empty, None, None)
}
}
object ParamSectionInfo {
def apply(params: Iterable[Symbol]): ParamSectionInfo = {
new ParamSectionInfo(
params.map { s => (s.nameString, TypeInfo(s.tpe)) },
params.exists(_.isImplicit)
)
}
}
object CallCompletionInfo {
def apply(tpe: Type): CallCompletionInfo = {
tpe match {
case tpe: MethodType => apply(tpe.paramss.map(ParamSectionInfo.apply), tpe.finalResultType)
case tpe: PolyType => apply(tpe.paramss.map(ParamSectionInfo.apply), tpe.finalResultType)
case _ => nullInfo()
}
}
def apply(paramSections: List[ParamSectionInfo], finalResultType: Type): CallCompletionInfo = {
new CallCompletionInfo(
TypeInfo(finalResultType),
paramSections
)
}
def nullInfo() = {
new CallCompletionInfo(TypeInfo.nullInfo, List.empty)
}
}
object SymbolInfo {
def apply(sym: Symbol): SymbolInfo = {
val tpe = askOption(sym.tpe) match {
case None => NoType
case Some(t) => t
}
val nameString = sym.nameString
val (name, localName) = if (sym.isClass || sym.isTrait || sym.isModule ||
sym.isModuleClass || sym.isPackageClass) {
(typeFullName(tpe), nameString)
} else {
(nameString, nameString)
}
val ownerTpe = if (sym.owner != NoSymbol && sym.owner.tpe != NoType) {
Some(sym.owner.tpe)
} else None
new SymbolInfo(
name,
localName,
locateSymbolPos(sym, PosNeededYes),
TypeInfo(tpe, PosNeededAvail),
isArrowType(tpe),
ownerTpe.map(cacheType)
)
}
}
object CompletionInfo {
def apply(
name: String,
tpeSig: CompletionSignature,
tpeId: Int,
isCallable: Boolean,
relevance: Int,
toInsert: Option[String]
) = new CompletionInfo(
name, tpeSig, tpeId, isCallable, relevance, toInsert
)
def fromSymbol(sym: Symbol, relevance: Int): CompletionInfo =
CompletionInfo.fromSymbolAndType(sym, sym.tpe, relevance)
def fromSymbolAndType(sym: Symbol, tpe: Type, relevance: Int): CompletionInfo = {
CompletionInfo(
sym.nameString,
completionSignatureForType(tpe),
cacheType(tpe.underlying),
isArrowType(tpe.underlying),
relevance,
None
)
}
}
object NamedTypeMemberInfo {
def apply(m: TypeMember): NamedTypeMemberInfo = {
val decl = declaredAs(m.sym)
val pos = if (m.sym.pos == NoPosition) None else Some(EmptySourcePosition())
val signatureString = if (decl == DeclaredAs.Method) Some(m.sym.signatureString) else None
new NamedTypeMemberInfo(m.sym.nameString, TypeInfo(m.tpe), pos, signatureString, decl)
}
}
object ArrowTypeInfo {
def apply(tpe: Type): ArrowTypeInfo = {
tpe match {
case tpe: MethodType => apply(tpe, tpe.paramss.map(ParamSectionInfo.apply), tpe.finalResultType)
case tpe: PolyType => apply(tpe, tpe.paramss.map(ParamSectionInfo.apply), tpe.finalResultType)
case _ => nullInfo()
}
}
def apply(tpe: Type, paramSections: List[ParamSectionInfo], finalResultType: Type): ArrowTypeInfo = {
new ArrowTypeInfo(
tpe.toString(),
cacheType(tpe),
TypeInfo(tpe.finalResultType),
paramSections
)
}
def nullInfo() = {
new ArrowTypeInfo("NA", -1, TypeInfo.nullInfo, List.empty)
}
}
}
object LineSourcePositionHelper {
// HACK: the emacs client currently can't open files in jars
// so we extract to the cache and report that as the source
// see the hack in the RichPresentationCompiler
import org.ensime.util.RichFileObject._
import org.ensime.util.io._
private def possiblyExtractFile(fo: FileObject)(implicit config: EnsimeConfig): File =
fo.pathWithinArchive match {
case None => fo.asLocalFile
case Some(path) =>
// subpath expected by the client
val file = (config.cacheDir / "dep-src" / "source-jars" / path)
if (!file.exists) {
// create and populate the file if it does not exist
// https://github.com/ensime/ensime-server/issues/761
file.getParentFile.mkdirs()
file.outputStream().drain(fo.getContent.getInputStream)
file.setWritable(false)
}
file
}
def fromFqnSymbol(sym: FqnSymbol)(implicit config: EnsimeConfig, vfs: EnsimeVFS): Option[LineSourcePosition] =
(sym.sourceFileObject, sym.line, sym.offset) match {
case (None, _, _) => None
case (Some(fo), lineOpt, offsetOpt) =>
val f = possiblyExtractFile(fo)
Some(new LineSourcePosition(f, lineOpt.getOrElse(0)))
}
}
object OffsetSourcePositionHelper {
import org.ensime.util.file._
def fromPosition(p: Position): Option[OffsetSourcePosition] = p match {
case NoPosition => None
case realPos =>
Some(new OffsetSourcePosition(File(realPos.source.file.path).canon, realPos.point))
}
}
object ERangePositionHelper {
def fromRangePosition(rp: RangePosition): ERangePosition = new ERangePosition(rp.source.path, rp.point, rp.start, rp.end)
}
| eddsteel/ensime | core/src/main/scala/org/ensime/model/ModelBuilders.scala | Scala | gpl-3.0 | 13,181 |
package com.awesomesauce.minecraft.forge.core.components
import com.awesomesauce.minecraft.forge.core.lib.TAwesomeSauceMod
import com.awesomesauce.minecraft.forge.core.lib.item.Description
import com.awesomesauce.minecraft.forge.core.lib.util.ItemUtil
@Mod(modid = "AwesomeSauceComponents", name = "AwesomeSauceComponents", version = "0.1.0", modLanguage = "scala")
object AwesomeSauceComponents extends TAwesomeSauceMod {
var grinderRecipes: scala.collection.mutable.Map[String, ItemStack] = null
var ingotImpureAwesomeite: Item = null
var ingotPureAwesomeite: Item = null
var dustImpureAwesomeite: Item = null
noTab
tab = CreativeTabs.tabMaterials
var dustPureAwesomeite: Item = null
var flour: Item = null
var nuggetImpureAwesomeite: Item = null
var blockImpureAwesomeite: Block = null
var nuggetPureAwesomeite: Item = null
var blockPureAwesomeite: Block = null
var awesomeCore: Block = null
var awesomeiteHammer: Item = null
@Mod.Metadata("AwesomeSauceComponents")
var metadata:
ModMetadata = null
@EventHandler
def aspri(e: FMLPreInitializationEvent) = super.awesomesaucepreinit(e)
@EventHandler
def asi(e: FMLInitializationEvent) = super.awesomesauceinit(e)
@EventHandler
def aspoi(e: FMLPostInitializationEvent) = super.awesomesaucepostinit(e)
def ingotBronze = try {
OreDictionary.getOres("ingotBronze").get(0).getItem
} catch {
case e: Exception => null
}
def ingotCopper = try {
OreDictionary.getOres("ingotCopper").get(0).getItem
} catch {
case e: Exception => null
}
def ingotSilver = try {
OreDictionary.getOres("ingotSilver").get(0).getItem
} catch {
case e: Exception => null
}
def ingotTin = try {
OreDictionary.getOres("ingotTin").get(0).getItem
} catch {
case e: Exception => null
}
override def preInit = {}
override def postInit = {
try {
grinderRecipes = scala.collection.mutable.Map(
"oreIron" -> new ItemStack(dustIron, 2),
"oreGold" -> new ItemStack(dustGold, 2),
"oreTin" -> new ItemStack(dustTin, 2),
"oreCopper" -> new ItemStack(dustCopper, 2),
"oreSilver" -> new ItemStack(dustSilver, 2),
"wheat" -> new ItemStack(flour))
}
catch {
case e: Exception => {}
}
}
def dustCopper = try {
OreDictionary.getOres("dustCopper").get(0).getItem
} catch {
case e: Exception => null
}
def dustSilver = try {
OreDictionary.getOres("dustSilver").get(0).getItem
} catch {
case e: Exception => null
}
def dustTin = try {
OreDictionary.getOres("dustTin").get(0).getItem
} catch {
case e: Exception => null
}
def dustIron = try {
OreDictionary.getOres("dustIron").get(0).getItem
} catch {
case e: Exception => null
}
def dustGold = try {
OreDictionary.getOres("dustGold").get(0).getItem
} catch {
case e: Exception => null
}
override def init() = {
flour = ItemUtil.makeItem(this, "flour", true)
flour.asInstanceOf[Description].addUsage("Smelting", "Smelted into bread.")
ItemUtil.addSmelting(this, new ItemStack(flour), new ItemStack(Items.bread), 2.0F)
OreDictionary.registerOre("flour", flour)
OreDictionary.registerOre("dustWheat", flour)
OreDictionary.registerOre("wheat", Items.wheat)
OreDictionary.registerOre("diamond", Items.diamond)
OreDictionary.registerOre("flint", Items.flint)
ingotImpureAwesomeite = ItemUtil.makeItem(this, "ingotImpureAwesomeite",
true)
ItemUtil.addDescription(this, "awesomeite", ingotImpureAwesomeite.asInstanceOf[Description])
OreDictionary.registerOre("ingotAwesomeite", ingotImpureAwesomeite)
dustImpureAwesomeite = ItemUtil.makeItem(this, "dustImpureAwesomeite",
true)
ItemUtil.addDescription(this, "awesomeite", dustImpureAwesomeite.asInstanceOf[Description])
OreDictionary.registerOre("dustAwesomeite", dustImpureAwesomeite)
ItemUtil.addSmelting(this, new ItemStack(dustImpureAwesomeite), new ItemStack(
ingotImpureAwesomeite), 2F)
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(dustImpureAwesomeite, 6),
"dustBronze", "dustElectrum", "dustInvar", "dustLead",
"dustRedstone", "dustRedstone", "dustRedstone", "dustRedstone"))
nuggetImpureAwesomeite = ItemUtil.makeItem(this, "nuggetImpureAwesomeite", true)
ItemUtil.addDescription(this, "awesomeite", nuggetImpureAwesomeite.asInstanceOf[Description])
OreDictionary.registerOre("nuggetAwesomeite", nuggetImpureAwesomeite)
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(nuggetImpureAwesomeite, 9), "ingotImpureAwesomeite"))
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(ingotImpureAwesomeite), "xxx", "xxx", "xxx", Character.valueOf('x'), "nuggetImpureAwesomeite"))
blockImpureAwesomeite = ItemUtil.makeBlock(this, "blockImpureAwesomeite", Material.iron, true).setCreativeTab(CreativeTabs.tabBlock)
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(ingotImpureAwesomeite, 9), "blockImpureAwesomeite"))
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(blockImpureAwesomeite), "xxx", "xxx", "xxx", Character.valueOf('x'), "ingotImpureAwesomeite"))
awesomeiteHammer = ItemUtil.makeItem(this, "awesomeiteHammer", true)
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(awesomeiteHammer), "xx", "yx", Character.valueOf('x'), "nuggetAwesomeite", Character.valueOf('y'), "stickWood"))
if (OreDictionary.getOres("ingotBronze").size() > 0 && OreDictionary.getOres("ingotElectrum").size() > 0 && OreDictionary.getOres("ingotInvar").size() > 0 && OreDictionary.getOres("ingotLead").size() > 0) {
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(dustImpureAwesomeite, 6),
"dustBronze", "dustElectrum", "dustInvar", "dustLead",
"dustRedstone", "dustRedstone", "dustRedstone", "dustRedstone"))
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(dustImpureAwesomeite, 6), "ingotBronze", "ingotElectrum", "ingotInvar", "ingotLead",
"dustRedstone", "dustRedstone", "dustRedstone", "dustRedstone", "awesomeiteHammer"))
}
else {
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(dustImpureAwesomeite, 2), "ingotIron", "ingotGold", "dustRedstone", "dustRedstone", "dustRedstone", "dustRedstone"))
}
ingotPureAwesomeite = ItemUtil.makeItem(this, "ingotPureAwesomeite", true)
ItemUtil.addDescription(this, "pureAwesomeite", ItemUtil.addDescription(this, "awesomeite", ingotPureAwesomeite.asInstanceOf[Description]))
OreDictionary.registerOre("ingotAwesomeite", ingotPureAwesomeite)
dustPureAwesomeite = ItemUtil.makeItem(this, "dustPureAwesomeite", true)
ItemUtil.addDescription(this, "pureAwesomeite", ItemUtil.addDescription(this, "awesomeite", dustPureAwesomeite.asInstanceOf[Description]))
ItemUtil.addSmelting(this, new ItemStack(dustPureAwesomeite), new ItemStack(ingotPureAwesomeite), 2F)
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(dustPureAwesomeite, 6), "dustImpureAwesomeite", "dustImpureAwesomeite", "dustImpureAwesomeite", "diamond", "flint"))
OreDictionary.registerOre("dustAwesomeite", dustPureAwesomeite)
nuggetPureAwesomeite = ItemUtil.makeItem(this, "nuggetPureAwesomeite", true)
OreDictionary.registerOre("nuggetAwesomeite", nuggetPureAwesomeite)
ItemUtil.addDescription(this, "pureAwesomeite", ItemUtil.addDescription(this, "awesomeite", nuggetPureAwesomeite.asInstanceOf[Description]))
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(nuggetPureAwesomeite, 9), "ingotPureAwesomeite"))
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(ingotPureAwesomeite), "xxx", "xxx", "xxx", Character.valueOf('x'), "nuggetPureAwesomeite"))
blockPureAwesomeite = ItemUtil.makeBlock(this, "blockPureAwesomeite", Material.iron, true).setCreativeTab(CreativeTabs.tabBlock)
ItemUtil.addRecipe(this, new ShapelessOreRecipe(new ItemStack(ingotPureAwesomeite, 9), "blockAwesomeite"))
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(blockPureAwesomeite), "xxx", "xxx", "xxx", Character.valueOf('x'), "ingotPureAwesomeite"))
awesomeCore = ItemUtil.makeBlock(this, "awesomeCore", Material.iron, true).setCreativeTab(CreativeTabs.tabBlock)
ItemUtil.addRecipe(this, new ShapedOreRecipe(new ItemStack(awesomeCore), "xyx", "yyy", "xyx", Character.valueOf('x'), "ingotAwesomeite", Character.valueOf('y'), "dustRedstone"))
}
def dustBronze = OreDictionary.getOres("dustBronze").get(0).getItem
def getTabIconItem: () => Item = () => ingotPureAwesomeite
def getTextureDomain: String = "awesomesaucecomponents"
def getModName: String = "AwesomeSauceComponents"
def getModID: String = "AwesomeSauceComponents"
}
| AwesomeSauceMods/AwesomeSauceCore | main/scala/com/awesomesauce/minecraft/forge/core/components/AwesomeSauceComponents.scala | Scala | mit | 8,969 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import sbt.Keys.version
import com.typesafe.tools.mima.core._
import com.typesafe.tools.mima.core.MissingClassProblem
import com.typesafe.tools.mima.core.MissingTypesProblem
import com.typesafe.tools.mima.core.ProblemFilters._
import com.typesafe.tools.mima.plugin.MimaKeys.{mimaBinaryIssueFilters, mimaPreviousArtifacts, mimaFailOnNoPrevious}
object MimaBuild {
def excludeMember(fullName: String) = Seq(
ProblemFilters.exclude[MissingMethodProblem](fullName),
// Sometimes excluded methods have default arguments and
// they are translated into public methods/fields($default$) in generated
// bytecode. It is not possible to exhaustively list everything.
// But this should be okay.
ProblemFilters.exclude[MissingMethodProblem](fullName+"$default$2"),
ProblemFilters.exclude[MissingMethodProblem](fullName+"$default$1"),
ProblemFilters.exclude[MissingFieldProblem](fullName),
ProblemFilters.exclude[IncompatibleResultTypeProblem](fullName),
ProblemFilters.exclude[IncompatibleMethTypeProblem](fullName),
ProblemFilters.exclude[IncompatibleFieldTypeProblem](fullName)
)
// Exclude a single class
def excludeClass(className: String) = Seq(
ProblemFilters.exclude[Problem](className + ".*"),
ProblemFilters.exclude[MissingClassProblem](className),
ProblemFilters.exclude[MissingTypesProblem](className)
)
// Exclude a Spark class, that is in the package org.apache.spark
def excludeSparkClass(className: String) = {
excludeClass("org.apache.spark." + className)
}
// Exclude a Spark package, that is in the package org.apache.spark
def excludeSparkPackage(packageName: String) = {
ProblemFilters.exclude[Problem]("org.apache.spark." + packageName + ".*")
}
def ignoredABIProblems(base: File, currentSparkVersion: String) = {
// Excludes placed here will be used for all Spark versions
val defaultExcludes = Seq()
// Read package-private excludes from file
val classExcludeFilePath = file(base.getAbsolutePath + "/.generated-mima-class-excludes")
val memberExcludeFilePath = file(base.getAbsolutePath + "/.generated-mima-member-excludes")
val ignoredClasses: Seq[String] =
if (!classExcludeFilePath.exists()) {
Seq()
} else {
IO.read(classExcludeFilePath).split("\n")
}
val ignoredMembers: Seq[String] =
if (!memberExcludeFilePath.exists()) {
Seq()
} else {
IO.read(memberExcludeFilePath).split("\n")
}
defaultExcludes ++ ignoredClasses.flatMap(excludeClass) ++
ignoredMembers.flatMap(excludeMember) ++ MimaExcludes.excludes(currentSparkVersion)
}
def mimaSettings(sparkHome: File, projectRef: ProjectRef): Seq[Setting[_]] = {
val organization = "org.apache.spark"
val previousSparkVersion = "3.0.0"
val project = projectRef.project
val fullId = "spark-" + project + "_2.12"
Seq(
mimaFailOnNoPrevious := true,
mimaPreviousArtifacts := Set(organization % fullId % previousSparkVersion),
mimaBinaryIssueFilters ++= ignoredABIProblems(sparkHome, version.value)
)
}
}
| wangmiao1981/spark | project/MimaBuild.scala | Scala | apache-2.0 | 3,957 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.home.scaladsl.serialization.v2a
//#add-optional
case class ItemAdded(shoppingCartId: String, productId: String, quantity: Int, discount: Option[BigDecimal])
//#add-optional
| rcavalcanti/lagom | docs/manual/scala/guide/cluster/code/docs/home/scaladsl/serialization/v2a/ItemAdded.scala | Scala | apache-2.0 | 265 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.express.flow
import java.io.OutputStream
import java.util.Properties
import scala.Some
import scala.collection.mutable.Buffer
import scala.collection.JavaConverters.asScalaIteratorConverter
import scala.collection.JavaConverters.collectionAsScalaIterableConverter
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.JavaConverters.mapAsJavaMapConverter
import cascading.flow.FlowProcess
import cascading.scheme.SinkCall
import cascading.tap.Tap
import cascading.tuple.Fields
import cascading.tuple.Tuple
import cascading.tuple.TupleEntry
import com.twitter.scalding.AccessMode
import com.twitter.scalding.Mode
import com.twitter.scalding.Read
import com.twitter.scalding.Source
import com.twitter.scalding.Write
import com.twitter.scalding.Test
import com.twitter.scalding.HadoopTest
import com.twitter.scalding.Hdfs
import com.twitter.scalding.Local
import com.google.common.base.Objects
import cascading.flow.hadoop.util.HadoopUtil
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.mapred.JobConf
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
import org.kiji.schema.EntityIdFactory
import org.kiji.schema.Kiji
import org.kiji.schema.KijiColumnName
import org.kiji.schema.KijiDataRequest
import org.kiji.schema.KijiTableReader
import org.kiji.schema.KijiTableWriter
import org.kiji.schema.KijiTable
import org.kiji.schema.KijiRowScanner
import org.kiji.schema.KijiRowData
import org.kiji.schema.KijiURI
import org.kiji.schema.layout.ColumnReaderSpec
import org.kiji.schema.KijiDataRequest.Column
import org.kiji.schema.KijiTableReader.KijiScannerOptions
import org.kiji.express.flow.framework.BaseKijiScheme
import org.kiji.express.flow.framework.DirectKijiSinkContext
import org.kiji.express.flow.framework.KijiTap
import org.kiji.express.flow.framework.KijiScheme
import org.kiji.express.flow.framework.LocalKijiTap
import org.kiji.express.flow.framework.LocalKijiScheme
import org.kiji.express.flow.util.ResourceUtil
/**
* A read or write view of a Kiji table.
*
* A Scalding `Source` provides a view of a data source that can be read as Scalding tuples. It
* is comprised of a Cascading tap [[cascading.tap.Tap]], which describes where the data is and how
* to access it, and a Cascading Scheme [[cascading.scheme.Scheme]], which describes how to read
* and interpret the data.
*
* When reading from a Kiji table, a `KijiSource` will provide a view of a Kiji table as a
* collection of tuples that correspond to rows from the Kiji table. Which columns will be read
* and how they are associated with tuple fields can be configured,
* as well as the time span that cells retrieved must belong to.
*
* When writing to a Kiji table, a `KijiSource` views a Kiji table as a collection of tuples that
* correspond to cells from the Kiji table. Each tuple to be written must provide a cell address
* by specifying a Kiji `EntityID` in the tuple field `entityId`, a value to be written in a
* configurable field, and (optionally) a timestamp in a configurable field.
*
* End-users cannot directly obtain instances of `KijiSource`. Instead,
* they should use the factory methods provided as part of the [[org.kiji.express.flow]] module.
*
* @param tableAddress is a Kiji URI addressing the Kiji table to read or write to.
* @param timeRange that cells read must belong to. Ignored when the source is used to write.
* @param timestampField is the name of a tuple field that will contain cell timestamp when the
* source is used for writing. Specify `None` to write all cells at the current time.
* @param inputColumns is a one-to-one mapping from field names to Kiji columns. The columns in the
* map will be read into their associated tuple fields.
* @param outputColumns is a one-to-one mapping from field names to Kiji columns. Values from the
* tuple fields will be written to their associated column.
* @param rowRangeSpec is the specification for which interval of rows to scan.
* @param rowFilterSpec is the specification for which row filter to apply.
*/
@ApiAudience.Framework
@ApiStability.Stable
final class KijiSource private[express] (
val tableAddress: String,
val timeRange: TimeRangeSpec,
val timestampField: Option[Symbol],
val inputColumns: Map[Symbol, ColumnInputSpec] = Map(),
val outputColumns: Map[Symbol, ColumnOutputSpec] = Map(),
val rowRangeSpec: RowRangeSpec = RowRangeSpec.All,
val rowFilterSpec: RowFilterSpec = RowFilterSpec.NoFilter
) extends Source {
import KijiSource._
/** The URI of the target Kiji table. */
private val uri: KijiURI = KijiURI.newBuilder(tableAddress).build()
/** A Kiji scheme intended to be used with Scalding/Cascading's hdfs mode. */
val kijiScheme: KijiScheme =
new KijiScheme(
tableAddress,
timeRange,
timestampField,
convertKeysToStrings(inputColumns),
convertKeysToStrings(outputColumns),
rowRangeSpec,
rowFilterSpec)
/** A Kiji scheme intended to be used with Scalding/Cascading's local mode. */
val localKijiScheme: LocalKijiScheme =
new LocalKijiScheme(
uri,
timeRange,
timestampField,
convertKeysToStrings(inputColumns),
convertKeysToStrings(outputColumns),
rowRangeSpec,
rowFilterSpec)
/**
* Create a connection to the physical data source (also known as a Tap in Cascading)
* which, in this case, is a [[org.kiji.schema.KijiTable]].
*
* @param readOrWrite Specifies if this source is to be used for reading or writing.
* @param mode Specifies which job runner/flow planner is being used.
* @return A tap to use for this data source.
*/
override def createTap(readOrWrite: AccessMode)(implicit mode: Mode): Tap[_, _, _] = {
/** Combination of normal input columns and input versions of the output columns (the latter are
* needed for reading back written results) */
def getInputColumnsForTesting: Map[String, ColumnInputSpec] = {
val columnsFromReads = inputColumnSpecifyAllData(convertKeysToStrings(inputColumns))
val columnsFromWrites = inputColumnSpecifyAllData(
convertKeysToStrings(outputColumns)
.mapValues { colSpec: ColumnOutputSpec =>
ColumnInputSpec(colSpec.columnName.toString, schemaSpec = colSpec.schemaSpec)
})
columnsFromReads ++ columnsFromWrites
}
mode match {
// Production taps.
case Hdfs(_,_) => new KijiTap(uri, kijiScheme).asInstanceOf[Tap[_, _, _]]
case Local(_) => new LocalKijiTap(uri, localKijiScheme).asInstanceOf[Tap[_, _, _]]
// Test taps.
case HadoopTest(conf, buffers) => readOrWrite match {
case Read => {
val scheme = kijiScheme
populateTestTable(uri, buffers(this), scheme.getSourceFields, conf)
new KijiTap(uri, scheme).asInstanceOf[Tap[_, _, _]]
}
case Write => {
val scheme = new TestKijiScheme(
uri,
timestampField,
getInputColumnsForTesting,
convertKeysToStrings(outputColumns),
rowRangeSpec,
rowFilterSpec)
new KijiTap(uri, scheme).asInstanceOf[Tap[_, _, _]]
}
}
case Test(buffers) => readOrWrite match {
// Use Kiji's local tap and scheme when reading.
case Read => {
val scheme = localKijiScheme
populateTestTable(
uri,
buffers(this),
scheme.getSourceFields,
HBaseConfiguration.create())
new LocalKijiTap(uri, scheme).asInstanceOf[Tap[_, _, _]]
}
// After performing a write, use TestLocalKijiScheme to populate the output buffer.
case Write => {
val scheme = new TestLocalKijiScheme(
buffers(this),
uri,
timeRange,
timestampField,
getInputColumnsForTesting,
convertKeysToStrings(outputColumns),
rowRangeSpec,
rowFilterSpec)
new LocalKijiTap(uri, scheme).asInstanceOf[Tap[_, _, _]]
}
}
case _ => throw new RuntimeException("Trying to create invalid tap")
}
}
override def toString: String =
Objects
.toStringHelper(this)
.add("tableAddress", tableAddress)
.add("timeRangeSpec", timeRange)
.add("timestampField", timestampField)
.add("inputColumns", inputColumns)
.add("outputColumns", outputColumns)
.add("rowRangeSpec", rowRangeSpec)
.add("rowFilterSpec", rowFilterSpec)
.toString
override def equals(obj: Any): Boolean = obj match {
case other: KijiSource => (
tableAddress == other.tableAddress
&& inputColumns == other.inputColumns
&& outputColumns == other.outputColumns
&& timestampField == other.timestampField
&& timeRange == other.timeRange
&& rowRangeSpec == other.rowRangeSpec
&& rowFilterSpec == other.rowFilterSpec)
case _ => false
}
override def hashCode(): Int =
Objects.hashCode(
tableAddress,
inputColumns,
outputColumns,
timestampField,
timeRange,
rowRangeSpec,
rowFilterSpec)
}
/**
* Contains a private, inner class used by [[org.kiji.express.flow.KijiSource]] when working with
* tests.
*/
@ApiAudience.Framework
@ApiStability.Stable
private[express] object KijiSource {
/**
* Construct a KijiSource and create a connection to the physical data source
* (also known as a Tap in Cascading) which, in this case, is a [[org.kiji.schema.KijiTable]].
* This method is meant to be used by kiji-express-cascading's Java TapBuilder.
* Scala users ought to construct and create their taps via the provided class methods.
*
* @param tableAddress is a Kiji URI addressing the Kiji table to read or write to.
* @param timeRange that cells read must belong to. Ignored when the source is used to write.
* @param timestampField is the name of a tuple field that will contain cell timestamp when the
* source is used for writing. Specify `None` to write all cells at the current time.
* @param inputColumns is a one-to-one mapping from field names to Kiji columns.
* The columns in the map will be read into their associated tuple fields.
* @param outputColumns is a one-to-one mapping from field names to Kiji columns. Values from the
* tuple fields will be written to their associated column.
* @return A tap to use for this data source.
*/
private[express] def makeTap(
tableAddress: String,
timeRange: TimeRangeSpec,
timestampField: String,
inputColumns: java.util.Map[String, ColumnInputSpec],
outputColumns: java.util.Map[String, ColumnOutputSpec]
): Tap[_, _, _] = {
val kijiSource = new KijiSource(
tableAddress,
timeRange,
Option(Symbol(timestampField)),
inputColumns.asScala.toMap
.map{ case (symbolName, column) => (Symbol(symbolName), column) },
outputColumns.asScala.toMap
.map{ case (symbolName, column) => (Symbol(symbolName), column) }
)
new KijiTap(kijiSource.uri, kijiSource.kijiScheme)
}
/**
* Convert scala columns definition into its corresponding java variety.
*
* @param columnMap Mapping from field name to Kiji column name.
* @return Java map from field name to column definition.
*/
private[express] def convertKeysToStrings[T <: Any](columnMap: Map[Symbol, T]): Map[String, T] =
columnMap.map { case (symbol, column) => (symbol.name, column) }
// Test specific code below here.
/**
* Takes a buffer containing rows and writes them to the table at the specified uri.
*
* @param tableUri of the table to populate.
* @param rows Tuples to write to populate the table with.
* @param fields Field names for elements in the tuple.
* @param configuration defining the cluster to use.
*/
private def populateTestTable(
tableUri: KijiURI,
rows: Option[Buffer[Tuple]],
fields: Fields,
configuration: Configuration) {
ResourceUtil.doAndRelease(Kiji.Factory.open(tableUri)) { kiji: Kiji =>
// Layout to get the default reader schemas from.
val layout = ResourceUtil.withKijiTable(tableUri, configuration) { table: KijiTable =>
table.getLayout
}
val eidFactory = EntityIdFactory.getFactory(layout)
// Write the desired rows to the table.
ResourceUtil.withKijiTableWriter(tableUri, configuration) { writer: KijiTableWriter =>
rows.toSeq.flatten.foreach { row: Tuple =>
val tupleEntry = new TupleEntry(fields, row)
val iterator = fields.iterator()
// Get the entity id field.
val entityIdField = iterator.next().toString
val entityId = tupleEntry
.getObject(entityIdField)
.asInstanceOf[EntityId]
// Iterate through fields in the tuple, adding each one.
while (iterator.hasNext) {
val field = iterator.next().toString
// Get the timeline to be written.
val cells: Seq[FlowCell[Any]] = tupleEntry
.getObject(field)
.asInstanceOf[Seq[FlowCell[Any]]]
// Write the timeline to the table.
cells.foreach { cell: FlowCell[Any] =>
writer.put(
entityId.toJavaEntityId(eidFactory),
cell.family,
cell.qualifier,
cell.version,
cell.datum
)
}
}
}
}
}
}
private[express] def newGetAllData(col: ColumnInputSpec): ColumnInputSpec = {
ColumnInputSpec(
col.columnName.toString,
Integer.MAX_VALUE,
col.filterSpec,
col.pagingSpec,
col.schemaSpec)
}
/**
* Returns a map from field name to column input spec where the column input spec has been
* configured as an output column.
*
* This is used in tests, when we use KijiScheme to read tuples from a Kiji table, and we want
* to read all data in all of the columns, so the test can inspect all data in the table.
*
* @param columns to transform.
* @return transformed map where the column input specs are configured for output.
*/
private def inputColumnSpecifyAllData(
columns: Map[String, ColumnInputSpec]): Map[String, ColumnInputSpec] = {
columns.mapValues(newGetAllData)
// Need this to make the Map serializable (issue with mapValues)
.map(identity)
}
/**
* A LocalKijiScheme that loads rows in a table into the provided buffer. This class
* should only be used during tests.
*
* @param buffer to fill with post-job table rows for tests.
* @param timeRange of timestamps to read from each column.
* @param timestampField is the name of a tuple field that will contain cell timestamp when the
* source is used for writing. Specify `None` to write all cells at the current time.
* @param inputColumns is a map of Scalding field name to ColumnInputSpec.
* @param outputColumns is a map of ColumnOutputSpec to Scalding field name.
* @param rowRangeSpec is the specification for which interval of rows to scan.
* @param rowFilterSpec is the specification for which row filter to apply.
*/
private class TestLocalKijiScheme(
val buffer: Option[Buffer[Tuple]],
uri: KijiURI,
timeRange: TimeRangeSpec,
timestampField: Option[Symbol],
inputColumns: Map[String, ColumnInputSpec],
outputColumns: Map[String, ColumnOutputSpec],
rowRangeSpec: RowRangeSpec,
rowFilterSpec: RowFilterSpec)
extends LocalKijiScheme(
uri,
timeRange,
timestampField,
inputColumnSpecifyAllData(inputColumns),
outputColumns,
rowRangeSpec,
rowFilterSpec) {
override def sinkCleanup(
process: FlowProcess[Properties],
sinkCall: SinkCall[DirectKijiSinkContext, OutputStream]) {
// flush table writer
sinkCall.getContext.writer.flush()
// Store the output table.
val conf: JobConf =
HadoopUtil.createJobConf(process.getConfigCopy, new JobConf(HBaseConfiguration.create()))
// Read table into buffer.
ResourceUtil.withKijiTable(uri, conf) { table: KijiTable =>
// We also want the entire time range, so the test can inspect all data in the table.
val request: KijiDataRequest =
BaseKijiScheme.buildRequest(table.getLayout, TimeRangeSpec.All, inputColumns.values)
val overrides: Map[KijiColumnName, ColumnReaderSpec] =
request
.getColumns
.asScala
.map { column: Column => (column.getColumnName, column.getReaderSpec)}
.toMap
val tableReader: KijiTableReader = table.getReaderFactory.readerBuilder()
.withColumnReaderSpecOverrides(overrides.asJava)
.buildAndOpen()
ResourceUtil.doAndClose(tableReader) { reader =>
// Set up scanning options.
val eidFactory = EntityIdFactory.getFactory(table.getLayout)
val scannerOptions = new KijiScannerOptions()
scannerOptions.setKijiRowFilter(
rowFilterSpec.toKijiRowFilter.getOrElse(null))
scannerOptions.setStartRow(
rowRangeSpec.startEntityId match {
case Some(entityId) => entityId.toJavaEntityId(eidFactory)
case None => null
}
)
scannerOptions.setStopRow(
rowRangeSpec.limitEntityId match {
case Some(entityId) => entityId.toJavaEntityId(eidFactory)
case None => null
}
)
ResourceUtil.doAndClose(reader.getScanner(request, scannerOptions)) {
scanner: KijiRowScanner =>
scanner.iterator().asScala.foreach { row: KijiRowData =>
val tuple = KijiScheme.rowToTuple(
inputColumns,
getSourceFields,
timestampField,
row)
val newTupleValues = tuple
.iterator()
.asScala
.map {
// This converts stream into a list to force the stream to compute all of the
// transformations that have been applied lazily to it. This is necessary
// because some of the transformations applied in KijiScheme#rowToTuple have
// dependencies on an open connection to a schema table.
case stream: Stream[_] => stream.toList
case x => x
}
.toSeq
buffer.foreach { _ += new Tuple(newTupleValues: _*) }
}
}
}
}
super.sinkCleanup(process, sinkCall)
}
}
/**
* Merges an input column mapping with an output column mapping producing an input column mapping.
* This is used to configure input columns for reading back written data on a source that has just
* been used as a sink.
*
* @param inputs describing which columns to request and what fields to associate them with.
* @param outputs describing which columns fields should be output to.
* @return a merged mapping from field names to input column requests.
*/
private def mergeColumnMapping(
inputs: Map[String, ColumnInputSpec],
outputs: Map[String, ColumnOutputSpec]
): Map[String, ColumnInputSpec] = {
def mergeEntry(
inputs: Map[String, ColumnInputSpec],
entry: (String, ColumnOutputSpec)
): Map[String, ColumnInputSpec] = {
val (fieldName, columnRequest) = entry
val input = ColumnInputSpec(
column = columnRequest.columnName.getName,
maxVersions = Int.MaxValue,
schemaSpec = columnRequest.schemaSpec
)
inputs + ((fieldName, input))
}
outputs.foldLeft(inputs)(mergeEntry)
}
/**
* A KijiScheme that loads rows in a table into the provided buffer. This class should only be
* used during tests.
*
* @param timestampField is the name of a tuple field that will contain cell timestamp when the
* source is used for writing. Specify `None` to write all cells at the current time.
* @param inputColumns Scalding field name to column input spec mapping.
* @param outputColumns Scalding field name to column output spec mapping.
* @param rowRangeSpec is the specification for which interval of rows to scan.
* @param rowFilterSpec is the specification for which filter to apply.
*/
private class TestKijiScheme(
uri: KijiURI,
timestampField: Option[Symbol],
inputColumns: Map[String, ColumnInputSpec],
outputColumns: Map[String, ColumnOutputSpec],
rowRangeSpec: RowRangeSpec,
rowFilterSpec: RowFilterSpec)
extends KijiScheme(
uri.toString,
TimeRangeSpec.All,
timestampField,
mergeColumnMapping(inputColumns, outputColumns),
outputColumns,
rowRangeSpec,
rowFilterSpec) {
}
}
| kijiproject/kiji-express | kiji-express/src/main/scala/org/kiji/express/flow/KijiSource.scala | Scala | apache-2.0 | 22,185 |
package core.scala
import java.util.Arrays
/**
* provides simple space filling curves methods
*/
object SfcFunction {
val LongZKeyBits:Int = 63;
/**
*
* 1. get center point
* 2. normalize to unit cube
* 3. map to integer array
* 4. compute z-order key
* @return symmetric z-order value as long
*/
def getSymmetricCyclicZOrderKeyFunction(universe: RectangleTuple, universeSideLength : Array[Double])= (r : RectangleTuple ) => {
val intPoint = normalize(universe, universeSideLength, r)
computeSymmetricCyclicZOrderKey(intPoint)
}
/**
* if rectangle is already normalized to unit cube.
* 1. get center point
* 2. map to integer array
* 3. compute z-order key
* @return symmetric z-order value as long
*/
def getSymmetricCyclicZOrderKeyFunctionNormalizedInput() = (r : RectangleTuple) => {
val center : Array[Double] = r.getCenter()
computeSymmetricCyclicZOrderKey ( mapToIntValues (center))
}
/**
* normalize to unit cube and map to integer value
*
* since we have 63 bit for a key, we use 63 div dimension as resolution for an integer mapping
*
* @return normalized point coordinates
*/
def normalize(universe: RectangleTuple, universeSideLength : Array[Double], r : RectangleTuple): Array[Int] = {
val leftLowerUniversePoint = universe.left
val center : Array[Double] = r.getCenter()
// shift and normalize to unit cube
for(i <- 0 until center.length){
center(i) = (center(i) - leftLowerUniversePoint(i))/universeSideLength(i)
}
// multiply with 1<<31
val shift : Int = (LongZKeyBits / r.dimension) -1
val factor : Int = 1 << shift
center.map ( d => (d * (factor)).intValue())
}
/**
* @param point represented as array of double values
* @return array of integers
*/
def mapToIntValues( normalizedPoint : Array[Double]): Array[Int] = {
val shift : Int = (LongZKeyBits / normalizedPoint.length) -1
val factor : Int = 1 << shift
normalizedPoint.map ( d => (d * (factor)).intValue())
}
/**
* simple symmetric z-order function.
* (non optimized)
* @return symmetric z-order value as long
*/
def computeSymmetricCyclicZOrderKey(point: Array[Int]): Long = {
val bitPerDim : Int = LongZKeyBits / point.length
var sfcKey: Long = 0L
var mask : Long = 1L
val dimension : Int = point.length
val dimMasks : Array[Int] = new Array[Int](point.length)
Arrays.fill(dimMasks, 1 << (bitPerDim -1 ))
var k = dimension * bitPerDim
for ( i <- 0 until (dimension * bitPerDim) ){
var pointI = point(i % dimension) // cyclic symmetric
pointI = pointI & dimMasks(i % dimension)
dimMasks(i % dimension) = dimMasks(i % dimension) >> 1
if(pointI != 0){
mask = 1 << (k-1)
sfcKey |= mask
}
k -= 1
}
sfcKey
}
} | daniarleagk/rbulkloading | bulkloading/src/main/java/core/scala/SfcFunction.scala | Scala | mit | 2,884 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.actors
import io.coral.actors.transform.ZscoreActor
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.JObject
import scala.collection.mutable.{Map => mMap, ListBuffer}
/**
* Class that represents a collect definition.
* A collect definition is a JSON array stating the external data
* that a Coral actor needs to fulfill a trigger request.
* These fields are specified in the constructor of the actor.
*/
object CollectDef {
implicit val formats = org.json4s.DefaultFormats
val cannotCollectFromSelf = "There are actors that try to collect state from themselves. " +
"Please make sure that no actors collect state from themselves."
val noCollectSpecified = "There are actors with missing collect definitions. Please make sure that all " +
"actors that expect a collect definition have defined one."
val collectsWithEmptyAlias = "There are collect definitions with an empty or missing 'alias' value. " +
"Please make sure that each collect definition has a filled in 'alias' value."
val collectsWithEmptyFrom = "There are collect definitions with an empty or missing 'from' value. " +
"Please make sure that each collect definition has a filled in 'from' value."
val collectsWithEmptyField = "There are collect definitions with an empty or missing 'field' value. " +
"Please make sure that each collect definition has a filled in 'field' value."
val duplicateAliases = "There are duplicate alias names. " +
"Please make sure that each alias name is unique."
val aliasNotMatchingDefinition = "There are alias names that do not match the definition of " +
"the actor to which the collect definition belongs. Please make sure that you check the " +
"documentation for the required names for each collect field."
val notAlphaNumeric = "There are field, alias or from values which do not consist solely of " +
"alphanumeric characters. Please make sure each of these values is a " +
"string value with only alphanumeric characters."
val fromNonExistingActor = "A collect is specified that collects from an actor that is " +
"not present in the runtime definition. Please make sure that each collect 'from' field refers to an" +
"actor name that actually exists in the runtime definition."
/**
* Checks whether the supplied actor constructor is valid.
* If the actor collects information from other actors, the list of
* collect aliases is used to check if they are all present.
* @param json The JSON constructor of the actor
* @param expectedAliases The expected aliases of that actor.
* Each alias must occur exactly once in the constructor.
* @return Either a Left(JObject) with errors or a Right(true) if it is
* a valid collect definition for that actor.
*/
def validCollectDef(json: JObject, expectedAliases: List[String]): Either[JObject, Boolean] = {
def noCollect(json: JObject): Boolean = {
val collect = (json \\ "collect").extractOpt[JArray]
val noneAtAll = !collect.isDefined
val empty = if (noneAtAll) true else collect.get.children.size == 0
noneAtAll || empty
}
def doubleAliases(json: JObject): Boolean = {
val list = getAll(json, "alias")
list.distinct.size != list.size
}
def aliasesDoNotMatch(json: JObject): Boolean = {
val aliases: List[String] = getAll(json, "alias")
val actorType = (json \\ "type").extractOpt[String]
if (!actorType.isDefined) {
false
} else {
val expectedAliases = actorType.get match {
case "zscore" => ZscoreActor.collectAliases
case _ => List()
// Add other actors with collect aliases here
}
aliases.sorted != expectedAliases.sorted
}
}
def emptyAliases(json: JObject): Boolean = {
empty(json, "alias")
}
def emptyFroms(json: JObject): Boolean = {
empty(json, "from")
}
def emptyFields(json: JObject): Boolean = {
empty(json, "field")
}
def getAll(json: JObject, value: String): List[String] = {
val array = (json \\ "collect").extractOpt[JArray]
array match {
case None => List()
case Some(a) =>
a.asInstanceOf[JArray].children.map(c => {
val x = (c \\ value).extractOpt[String]
if (x.isDefined) x.get else null
}).filter(_ != null)
}
}
def defined(json: JObject, value: String): Boolean = {
forAnyCollect(c => {
val result = (c \\ value).extractOpt[String].isDefined
result
}, false)
}
def empty(json: JObject, value: String): Boolean = {
forAnyCollect(c => {
val v = (c \\ value).extractOpt[String]
if (v.isDefined) {
if (v.get.isEmpty) true else false
} else true
}, false)
}
def forAnyCollect(f: JValue => Boolean, default: Boolean): Boolean = {
val array = (json \\ "collect").extractOpt[JArray]
array match {
case None => default
case Some(a) => a.children.exists(f)
}
}
def notOnlyAlphaNumeric(json: JObject): Boolean = {
val alphaNumeric = "^[a-zA-Z0-9_]*$"
!getAll(json, "alias").forall(_.matches(alphaNumeric)) ||
!getAll(json, "from").forall(_.matches(alphaNumeric)) ||
!getAll(json, "field").forall(_.matches(alphaNumeric))
}
def referringToSelf(json: JObject): Boolean = {
val selfName = (json \\ "name").extract[String]
getAll(json, "from").contains(selfName)
}
var errors = ListBuffer.empty[String]
if (expectedAliases.length == 0) {
throw new Exception("Cannot validate collect definition with empty alias list.")
}
if (noCollect(json)) {
// If this happens, don't bother with checking the rest
errors += noCollectSpecified
} else {
val emptyAlias = emptyAliases(json)
val emptyFrom = emptyFroms(json)
val emptyField = emptyFields(json)
val duplicateAlias = doubleAliases(json)
if (emptyAlias) errors += collectsWithEmptyAlias
if (emptyFrom) errors += collectsWithEmptyFrom
if (emptyField) errors += collectsWithEmptyField
if (duplicateAlias) errors += duplicateAliases
if (notOnlyAlphaNumeric(json)) errors += notAlphaNumeric
if (referringToSelf(json)) errors += cannotCollectFromSelf
// Only complain about not matching definition if other errors are fixed
if (!emptyAlias && !emptyFrom && !emptyField && !duplicateAlias) {
if (aliasesDoNotMatch(json)) errors += aliasNotMatchingDefinition
}
}
if (errors.isEmpty) {
Right(true)
} else {
Left(("success" -> false) ~ ("errors" -> JArray(errors.map(JString(_)).toList)))
}
}
/**
* Parse a CollectDef from a JSON constructor.
* @param json The JSON constructor of the CoralActor to parse the
* CollectDef from.
* @return Some(def) if parsing succeeded, None otherwise.
*/
def get(json: JObject): Option[CollectDef] = {
val collects = (json \\ "params" \\ "collect").extractOpt[JArray]
val result = new CollectDef()
collects match {
case Some(array) =>
array.arr.foreach(c => {
val alias = (c \\ "alias").extractOpt[String]
val from = (c \\ "from").extractOpt[String]
val field = (c \\ "field").extractOpt[String]
val data = (c \\ "data").extractOpt[JObject]
if (alias.isDefined && from.isDefined) {
if (field.isDefined) {
// It is a static field query
result.putFieldCollectDef(alias.get, (from.get, field.get))
} else if (data.isDefined) {
// It is a JSON request
result.putJsonCollectDef(alias.get, (from.get, data.get))
}
} else {
// If just one field is missing, fail
return None
}
})
case None => None
}
Some(result)
}
}
/**
* Represents the "collect" definition of a CoralActor.
* The collect definition states all external data dependencies
* that a CoralActor has on other actors.
*
* There are two types of collect definitions:
* - fields: Collect a static field from the state of another actor
* - jsons: Give another actor a JSON object, let it process
* the JSON object and return with the answer
*/
class CollectDef {
/**
* Collect definitions that ask for a state
* field of a certain CoralActor.
*/
// <alias, <actor name, field>>
val fields = mMap.empty[String, (String, String)]
/**
* Collect definitions that ask for a response
* on a JSON input object.
*/
// <alias, <actor name, json>>
val jsons = mMap.empty[String, (String, JObject)]
/**
* Add a new JSON collect definition to this actor's collect definition.
* @param alias The alias under which the JSON collect definition is known.
* @param actorAndJson The name of the actor and the JSON object to
* pass to the actor.
*/
def putJsonCollectDef(alias: String, actorAndJson: (String, JObject)) = {
jsons.put(alias, actorAndJson)
}
/**
* Put a new collect definition into the definition map.
* @param alias The alias of the collect definition.
* This is used by the CoralActor to fetch the definition.
* @param actorAndField The actor name and the field name that belongs
* to this collect definition.
*/
def putFieldCollectDef(alias: String, actorAndField: (String, String)) = {
fields.put(alias, actorAndField)
}
/**
* Get a collect definition from the definition map
* @param alias The alias of the collect.
* @return A pair of (actor name, field) that enables
* fetching the field from the actor with that name.
*/
def getFieldCollectDef(alias: String): Option[(String, String)] = {
fields.get(alias)
}
/**
* Gets a JSON collect definition from a given alias.
* @param alias The alias of the collect definition.
* @return A pair (actor name, JSON) that belongs to that
* collect definition.
*/
def getJsonCollectDef(alias: String): Option[(String, JObject)] = {
jsons.get(alias)
}
} | coral-streaming/coral | src/main/scala/io/coral/actors/CollectDef.scala | Scala | apache-2.0 | 10,374 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka
import language.implicitConversions
import akka.actor.ActorSystem
import scala.concurrent.duration.{ Duration, FiniteDuration }
import scala.reflect.ClassTag
import scala.collection.immutable
import java.util.concurrent.TimeUnit.MILLISECONDS
package object testkit {
def filterEvents[T](eventFilters: Iterable[EventFilter])(block: ⇒ T)(implicit system: ActorSystem): T = {
def now = System.currentTimeMillis
system.eventStream.publish(TestEvent.Mute(eventFilters.to[immutable.Seq]))
try {
val result = block
val testKitSettings = TestKitSettings //@note IMPLEMENT IN SCALA.JS TestKitExtension(system)
val stop = now + testKitSettings.TestEventFilterLeeway.toMillis
val failed = eventFilters filterNot (_.awaitDone(Duration(stop - now, MILLISECONDS))) map ("Timeout (" + testKitSettings.TestEventFilterLeeway + ") waiting for " + _)
if (failed.nonEmpty)
throw new AssertionError("Filter completion error:\\n" + failed.mkString("\\n"))
result
} finally {
system.eventStream.publish(TestEvent.UnMute(eventFilters.to[immutable.Seq]))
}
}
def filterEvents[T](eventFilters: EventFilter*)(block: ⇒ T)(implicit system: ActorSystem): T = filterEvents(eventFilters.toSeq)(block)
def filterException[T <: Throwable](block: ⇒ Unit)(implicit system: ActorSystem, t: ClassTag[T]): Unit = EventFilter[T]() intercept (block)
/**
* Scala API. Scale timeouts (durations) during tests with the configured
* 'akka.test.timefactor'.
* Implicit class providing `dilated` method.
* {{{
* import scala.concurrent.duration._
* import akka.testkit._
* 10.milliseconds.dilated
* }}}
* Corresponding Java API is available in JavaTestKit.dilated()
*/
implicit class TestDuration(val duration: FiniteDuration) extends AnyVal {
def dilated(implicit system: ActorSystem): FiniteDuration =
(duration * /** @note IMPLEMENT IN SCALA.JS TestKitExtension(system) */ TestKitSettings.TestTimeFactor).asInstanceOf[FiniteDuration]
}
} | jmnarloch/akka.js | akka-js-testkit/js/src/main/scala/akka/testkit/package.scala | Scala | bsd-3-clause | 2,119 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.avro
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream}
import org.apache.avro.io.{BinaryDecoder, DecoderFactory, DirectBinaryEncoder, EncoderFactory}
import org.locationtech.geomesa.features.SerializationOption.SerializationOption
import org.locationtech.geomesa.features.SimpleFeatureSerializer
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* @param sft the simple feature type to encode
* @param options the options to apply when encoding
*/
class AvroFeatureSerializer(sft: SimpleFeatureType, val options: Set[SerializationOption] = Set.empty)
extends SimpleFeatureSerializer {
private val writer = new AvroSimpleFeatureWriter(sft, options)
// Encode using a direct binary encoder that is reused. No need to buffer
// small simple features. Reuse a common BAOS as well.
private val baos = new ByteArrayOutputStream()
private var reuse: DirectBinaryEncoder = null
override def serialize(feature: SimpleFeature): Array[Byte] = {
baos.reset()
reuse = EncoderFactory.get().directBinaryEncoder(baos, reuse).asInstanceOf[DirectBinaryEncoder]
writer.write(feature, reuse)
reuse.flush()
baos.toByteArray
}
override def deserialize(bytes: Array[Byte]): SimpleFeature =
throw new NotImplementedError("This instance only handles serialization")
}
/**
* @param original the simple feature type that was encoded
* @param projected the simple feature type to project to when decoding
* @param options the options what were applied when encoding
*/
class ProjectingAvroFeatureDeserializer(original: SimpleFeatureType, projected: SimpleFeatureType,
val options: Set[SerializationOption] = Set.empty)
extends SimpleFeatureSerializer {
private val reader = new FeatureSpecificReader(original, projected, options)
override def serialize(feature: SimpleFeature): Array[Byte] =
throw new NotImplementedError("This instance only handles deserialization")
override def deserialize(bytes: Array[Byte]): SimpleFeature = decode(new ByteArrayInputStream(bytes))
private var reuse: BinaryDecoder = null
def decode(is: InputStream) = {
reuse = DecoderFactory.get().directBinaryDecoder(is, reuse)
reader.read(null, reuse)
}
}
/**
* @param sft the simple feature type to decode
* @param options the options what were applied when encoding
*/
class AvroFeatureDeserializer(sft: SimpleFeatureType, options: Set[SerializationOption] = Set.empty)
extends ProjectingAvroFeatureDeserializer(sft, sft, options)
| ronq/geomesa | geomesa-features/geomesa-feature-avro/src/main/scala/org/locationtech/geomesa/features/avro/AvroFeatureSerializer.scala | Scala | apache-2.0 | 3,075 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer
import org.ensime.fixture._
import org.ensime.util.EnsimeSpec
import org.ensime.util.file._
class SourceResolverSpec extends EnsimeSpec
with SharedEnsimeVFSFixture
with SharedSourceResolverFixture
with SourceResolverTestUtils {
def original = EnsimeConfigFixture.SimpleTestProject
"SourceResolver" should "resolve java sources in J2SE" in withSourceResolver { implicit r =>
find("java.lang", "String.java") shouldBe Some("/java/lang/String.java")
}
it should "resolve scala sources in the project dependencies" in withSourceResolver { implicit r =>
find("scala.collection.immutable", "List.scala") shouldBe
Some("/scala/collection/immutable/List.scala")
find("org.scalatest", "FunSpec.scala") shouldBe
Some("/org/scalatest/FunSpec.scala")
}
it should "resolve sources in the project" in withSourceResolver { (c, r) =>
implicit val config = c
implicit val resolver = r
find("org.example.Foo", "Foo.scala") shouldBe
Some((scalaMain / "org/example/Foo.scala").getAbsolutePath)
}
it should "should resolve files in parent directories in the project" in withSourceResolver { (c, r) =>
implicit val config = c
implicit val resolver = r
find("org.example", "bad-convention.scala") shouldBe
Some((scalaMain / "bad-convention.scala").getAbsolutePath)
}
}
trait SourceResolverTestUtils {
def find(pkg: String, file: String)(implicit resolver: SourceResolver) = {
import org.ensime.util.RichFileObject._
resolver.resolve(
PackageName(pkg.split('.').toList), RawSource(Some(file), None)
).map(fo => fo.pathWithinArchive match {
case None => fo.asLocalFile.getAbsolutePath
case _ => fo.getName.getPath
})
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/core/src/it/scala/org/ensime/indexer/SourceResolverSpec.scala | Scala | apache-2.0 | 1,895 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.http
import javax.inject.Inject
import play.api.ApplicationLoader.DevContext
import play.api.http.Status._
import play.api.inject.{ Binding, BindingKey }
import play.api.libs.streams.Accumulator
import play.api.mvc._
import play.api.routing.Router
import play.api.{ Configuration, Environment, OptionalDevContext }
import play.core.j.{ JavaHandler, JavaHandlerComponents, JavaHttpRequestHandlerDelegate }
import play.core.{ DefaultWebCommands, WebCommands }
import play.utils.Reflect
/**
* Primary entry point for all HTTP requests on Play applications.
*/
trait HttpRequestHandler {
/**
* Get a handler for the given request.
*
* In addition to retrieving a handler for the request, the request itself may be modified - typically it will be
* tagged with routing information. It is also acceptable to simply return the request as is. Play will switch to
* using the returned request from this point in in its request handling.
*
* The reason why the API allows returning a modified request, rather than just wrapping the Handler in a new Handler
* that modifies the request, is so that Play can pass this request to other handlers, such as error handlers, or
* filters, and they will get the tagged/modified request.
*
* @param request The request to handle
* @return The possibly modified/tagged request, and a handler to handle it
*/
def handlerForRequest(request: RequestHeader): (RequestHeader, Handler)
/**
* Adapt this to a Java HttpRequestHandler
*/
def asJava = new JavaHttpRequestHandlerDelegate(this)
}
object HttpRequestHandler {
def bindingsFromConfiguration(environment: Environment, configuration: Configuration): Seq[Binding[_]] = {
Reflect.bindingsFromConfiguration[HttpRequestHandler, play.http.HttpRequestHandler, play.core.j.JavaHttpRequestHandlerAdapter, play.http.DefaultHttpRequestHandler, JavaCompatibleHttpRequestHandler](
environment,
configuration, "play.http.requestHandler", "RequestHandler")
}
}
object ActionCreator {
import play.http.{ ActionCreator, DefaultActionCreator }
def bindingsFromConfiguration(environment: Environment, configuration: Configuration): Seq[Binding[_]] = {
Reflect.configuredClass[ActionCreator, ActionCreator, DefaultActionCreator](
environment,
configuration, "play.http.actionCreator", "ActionCreator").fold(Seq[Binding[_]]()) { either =>
val impl = either.fold(identity, identity)
Seq(BindingKey(classOf[ActionCreator]).to(impl))
}
}
}
/**
* Implementation of a [HttpRequestHandler] that always returns NotImplemented results
*/
object NotImplementedHttpRequestHandler extends HttpRequestHandler {
def handlerForRequest(request: RequestHeader) = request -> EssentialAction(_ => Accumulator.done(Results.NotImplemented))
}
/**
* A base implementation of the [[HttpRequestHandler]] that handles Scala actions. If you use Java actions in your
* application, you should override [[JavaCompatibleHttpRequestHandler]]; otherwise you can override this for your
* custom handler.
*
* Technically, this is not the default request handler that Play uses, rather, the [[JavaCompatibleHttpRequestHandler]]
* is the default one, in order to provide support for Java actions.
*/
class DefaultHttpRequestHandler(
webCommands: WebCommands,
optDevContext: Option[DevContext],
router: Router,
errorHandler: HttpErrorHandler,
configuration: HttpConfiguration,
filters: Seq[EssentialFilter]) extends HttpRequestHandler {
@Inject
def this(
webCommands: WebCommands,
optDevContext: OptionalDevContext,
router: Router,
errorHandler: HttpErrorHandler,
configuration: HttpConfiguration,
filters: HttpFilters) = {
this(webCommands, optDevContext.devContext, router, errorHandler, configuration, filters.filters)
}
@deprecated("Use the main DefaultHttpRequestHandler constructor", "2.7.0")
def this(router: Router, errorHandler: HttpErrorHandler, configuration: HttpConfiguration, filters: HttpFilters) = {
this(new DefaultWebCommands, None, router, errorHandler, configuration, filters.filters)
}
@deprecated("Use the main DefaultHttpRequestHandler constructor", "2.7.0")
def this(router: Router, errorHandler: HttpErrorHandler, configuration: HttpConfiguration, filters: EssentialFilter*) = {
this(new DefaultWebCommands, None, router, errorHandler, configuration, filters)
}
private val context = configuration.context.stripSuffix("/")
/** Work out whether a path is handled by this application. */
private def inContext(path: String): Boolean = {
// Assume context is a string without a trailing '/'.
// Handle four cases:
// * context.isEmpty
// - There is no context, everything is in context, short circuit all other checks
// * !path.startsWith(context)
// - Either path is shorter than context or starts with a different prefix.
// * path.startsWith(context) && path.length == context.length
// - Path is equal to context.
// * path.startsWith(context) && path.charAt(context.length) == '/')
// - Path starts with context followed by a '/' character.
context.isEmpty ||
(path.startsWith(context) && (path.length == context.length || path.charAt(context.length) == '/'))
}
override def handlerForRequest(request: RequestHeader): (RequestHeader, Handler) = {
def handleWithStatus(status: Int) = ActionBuilder.ignoringBody.async(BodyParsers.utils.empty)(req =>
errorHandler.onClientError(req, status)
)
/**
* Call the router to get the handler, but with a couple of types of fallback.
* First, if a HEAD request isn't explicitly routed try routing it as a GET
* request. Second, if no routing information is present, fall back to a 404
* error.
*/
def routeWithFallback(request: RequestHeader): Handler = {
routeRequest(request).getOrElse {
request.method match {
// We automatically permit HEAD requests against any GETs without the need to
// add an explicit mapping in Routes. Since we couldn't route the HEAD request,
// try to get a Handler for the equivalent GET request instead. Notes:
// 1. The handler returned will still be passed a HEAD request when it is
// actually evaluated.
// 2. When the endpoint is to a WebSocket connection, the handler returned
// will result in a Bad Request. That is because, while we can translate
// GET requests to HEAD, we can't do that for WebSockets, since there is
// no way (or reason) to Upgrade the connection. For more information see
// https://tools.ietf.org/html/rfc6455#section-1.3
case HttpVerbs.HEAD => {
routeRequest(request.withMethod(HttpVerbs.GET)) match {
case Some(handler: Handler) => handler match {
case ws: WebSocket => handleWithStatus(BAD_REQUEST)
case _ => handler
}
case None => handleWithStatus(NOT_FOUND)
}
}
case _ =>
// An Action for a 404 error
handleWithStatus(NOT_FOUND)
}
}
}
// If we've got a BuildLink (i.e. if we're running in dev mode) then run the WebCommands.
// The WebCommands will have a chance to intercept the request and override the result.
// This is used by, for example, the evolutions code to present an evolutions UI to the
// user when the access the web page through a browser.
//
// In prod mode this code will not be run.
val webCommandResult: Option[Result] = optDevContext.flatMap { devContext: DevContext =>
webCommands.handleWebCommand(request, devContext.buildLink, devContext.buildLink.projectPath)
}
// Look at the result of the WebCommand and either short-circuit the result or apply
// the routes, filters, actions, etc.
webCommandResult match {
case Some(r) =>
// A WebCommand returned a result
(request, ActionBuilder.ignoringBody { r })
case None =>
// 1. Query the router to get a handler
// 2. Resolve handlers that preprocess the request
// 3. Modify the handler to do filtering, if necessary
// 4. Again resolve any handlers that do preprocessing
val routedHandler = routeWithFallback(request)
val (preprocessedRequest, preprocessedHandler) = Handler.applyStages(request, routedHandler)
val filteredHandler = filterHandler(preprocessedRequest, preprocessedHandler)
val (preprocessedPreprocessedRequest, preprocessedFilteredHandler) = Handler.applyStages(preprocessedRequest, filteredHandler)
(preprocessedPreprocessedRequest, preprocessedFilteredHandler)
}
}
/**
* Apply any filters to the given handler.
*/
@deprecated("Use filterHandler(RequestHeader, Handler) instead", "2.6.0")
protected def filterHandler(next: RequestHeader => Handler): (RequestHeader => Handler) = {
(request: RequestHeader) =>
next(request) match {
case action: EssentialAction if inContext(request.path) => filterAction(action)
case handler => handler
}
}
/**
* Update the given handler so that when the handler is run any filters will also be run. The
* default behavior is to wrap all [[play.api.mvc.EssentialAction]]s by calling `filterAction`, but to leave
* other kinds of handlers unchanged.
*/
protected def filterHandler(request: RequestHeader, handler: Handler): Handler = {
handler match {
case action: EssentialAction if inContext(request.path) => filterAction(action)
case handler => handler
}
}
/**
* Apply filters to the given action.
*/
protected def filterAction(next: EssentialAction): EssentialAction = {
filters.foldRight(next)(_ apply _)
}
/**
* Called when an HTTP request has been received.
*
* The default is to use the application router to find the appropriate action.
*
* This method can be overridden if you want to provide some custom routing strategies, for example, using different
* routers based on various request parameters.
*
* @param request The request
* @return A handler to handle the request, if one can be found
*/
def routeRequest(request: RequestHeader): Option[Handler] = {
router.handlerFor(request)
}
}
/**
* A Java compatible HTTP request handler.
*
* If a router routes to Java actions, it will return instances of [[play.core.j.JavaHandler]]. This takes an instance
* of [[play.core.j.JavaHandlerComponents]] to supply the necessary infrastructure to invoke a Java action, and returns
* a new [[play.api.mvc.Handler]] that the core of Play knows how to handle.
*
* If your application routes to Java actions, then you must use this request handler as the base class as is or as
* the base class for your custom [[HttpRequestHandler]].
*/
class JavaCompatibleHttpRequestHandler(
webCommands: WebCommands,
optDevContext: Option[DevContext],
router: Router,
errorHandler: HttpErrorHandler,
configuration: HttpConfiguration,
filters: Seq[EssentialFilter],
handlerComponents: JavaHandlerComponents)
extends DefaultHttpRequestHandler(webCommands, optDevContext, router, errorHandler, configuration, filters) {
@Inject
def this(
webCommands: WebCommands,
optDevContext: OptionalDevContext,
router: Router,
errorHandler: HttpErrorHandler,
configuration: HttpConfiguration,
filters: HttpFilters,
handlerComponents: JavaHandlerComponents) = {
this(webCommands, optDevContext.devContext, router, errorHandler, configuration, filters.filters, handlerComponents)
}
@deprecated("Use the main JavaCompatibleHttpRequestHandler constructor", "2.7.0")
def this(router: Router, errorHandler: HttpErrorHandler,
configuration: HttpConfiguration, filters: HttpFilters, handlerComponents: JavaHandlerComponents) = {
this(new DefaultWebCommands, new OptionalDevContext(None), router, errorHandler, configuration, filters, handlerComponents)
}
// This is a Handler that, when evaluated, converts its underlying JavaHandler into
// another handler.
private class MapJavaHandler(nextHandler: Handler) extends Handler.Stage {
override def apply(requestHeader: RequestHeader): (RequestHeader, Handler) = {
// First, preprocess the request and our handler so we can get the underlying handler
val (preprocessedRequest, preprocessedHandler) = Handler.applyStages(requestHeader, nextHandler)
// Next, if the underlying handler is a JavaHandler, get its real handler
val mappedHandler: Handler = preprocessedHandler match {
case javaHandler: JavaHandler => javaHandler.withComponents(handlerComponents)
case other => other
}
(preprocessedRequest, mappedHandler)
}
}
override def routeRequest(request: RequestHeader): Option[Handler] = {
// Override the usual routing logic so that any JavaHandlers are
// rewritten.
super.routeRequest(request).map(new MapJavaHandler(_))
}
}
| Shenker93/playframework | framework/src/play/src/main/scala/play/api/http/HttpRequestHandler.scala | Scala | apache-2.0 | 13,241 |
package nlpdata.datasets
import nlpdata.util._
package object wiki1k {
val wiki1kDomains = List("wikipedia", "wikinews")
object Parsing {
def readFile(path: Wiki1kPath, lines: Iterator[String]): Wiki1kFile = {
val id = lines.next
val revId = lines.next
val title = lines.next
def makeParagraph(paragraphNum: Int, lines: List[String]) =
lines.reverse.zipWithIndex.map {
case (line, index) =>
Wiki1kSentence(Wiki1kSentencePath(path, paragraphNum, index), line.split(" ").toVector)
}.toVector
val paragraphs = {
val (mostPs, lastParagraphNum, extraLines) =
lines.foldLeft((List.empty[Vector[Wiki1kSentence]], 0, List.empty[String])) {
case ((curParagraphs, curParagraphNum, curLines), nextLine) =>
if (nextLine.isEmpty) {
val curSentences = makeParagraph(curParagraphNum, curLines)
(curSentences :: curParagraphs, curParagraphNum + 1, Nil)
} else {
(curParagraphs, curParagraphNum, nextLine :: curLines)
}
}
if (extraLines.isEmpty) mostPs.reverse.toVector
else {
val newP = makeParagraph(lastParagraphNum, extraLines)
(newP :: mostPs).reverse.toVector
}
}
Wiki1kFile(path, id, revId, title, paragraphs)
}
}
}
| julianmichael/nlpdata | nlpdata/src/nlpdata/datasets/wiki1k/package.scala | Scala | mit | 1,378 |
/**
* Copyright 2012-2013 greencheek.org (www.greencheek.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.greencheek.jms.yankeedo.structure.scenario
import org.greencheek.jms.yankeedo.structure.dsl.Dsl._
import akka.actor.{Props, ActorRef}
import org.greencheek.jms.yankeedo.scenarioexecution.{StartExecutingScenarios, ReturnScenarioActorSystems, ScenarioActorSystems, ScenariosExecutionManager}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent.duration._
import scala.concurrent.Await
import java.util.concurrent.TimeUnit
import akka.camel.CamelMessage
import org.specs2.runner.JUnitRunner
import org.junit.runner.RunWith
import org.greencheek.jms.yankeedo.scenarioexecution.consumer.messageprocessor.CamelMessageProcessor
/**
* Created by dominictootell on 15/03/2014.
*/
@RunWith(classOf[JUnitRunner])
class TestMessageProcessorExceptionHandlingSpec extends BrokerBasedSpec {
val myContext = WithActorSystem();
"Producing messages" >> {
"Check that message is not consumed when exception throw, but message processor says not to consume" in myContext {
doTestConsumer(myContext,false,"queuewithnoconsume",100,1) should beTrue
}
"Check that message is consumed when exception throw, but message processor says to consume" in myContext {
doTestConsumer(myContext,true,"queuewithconsume",99,1) should beTrue
}
def doTestConsumer(testContext : WithActorSystem, consumeOnException: Boolean, queueName : String, messagesExpectedOnQueue : Int,
messageExceptedToBeSentForProcessing : Int) : Boolean = {
val appLatch = testContext.latch
val actorSystem = testContext.actorSystem
val messageProcessor = new MessageProcessorThrowingException(consumeOnException)
val producerScenario1 = createScenario(
"Consumer 1 message scenario" connect_to "tcp://localhost:" + port + "?daemon=true&jms.closeTimeout=200"
until_no_of_messages_consumed 1
consume from queue queueName
with_message_consumer messageProcessor
prefetch 1
)
val producerScenario2 = createScenario(
"Product 100 messages scenario" connect_to "tcp://localhost:" + port + "?daemon=true&jms.closeTimeout=200"
until_no_of_messages_sent 100
produce to queue queueName
with_persistent_delivery
)
val scenarioExecutor : ActorRef = actorSystem.actorOf(Props(new ScenariosExecutionManager(appLatch,ScenarioContainer(producerScenario1,producerScenario2))))
scenarioExecutor ! StartExecutingScenarios
implicit val timeout = Timeout(2,SECONDS)
val future = scenarioExecutor ? ReturnScenarioActorSystems
val result = Await.result(future, timeout.duration).asInstanceOf[ScenarioActorSystems]
result should not beNull
val actorSystemSize = result.actorSystems.size
actorSystemSize should beEqualTo(2)
var ok : Boolean = false
try {
ok = appLatch.await(15,TimeUnit.SECONDS)
} catch {
case e: Exception => {
}
}
ok should beTrue
val map = broker.getBroker.getDestinationMap()
getMessageCountForQueueDestination(map,queueName) should beEqualTo(messagesExpectedOnQueue)
messageProcessor.numberOfMessagesProcessed should beEqualTo(messageExceptedToBeSentForProcessing)
true
}
}
class MessageProcessorThrowingException(val consumeOnException : Boolean = true) extends CamelMessageProcessor {
@volatile var _numberOfMessagesProcessed : Int = 0
def process(message: CamelMessage) {
_numberOfMessagesProcessed+=1
throw new Exception("MessageProcessorThrowingException")
}
def consumerOnError: Boolean = consumeOnException
def numberOfMessagesProcessed : Int = {
_numberOfMessagesProcessed
}
}
}
| tootedom/yankeedo | yankeedo-core/src/test/scala/org/greencheek/jms/yankeedo/structure/scenario/TestMessageProcessorExceptionHandlingSpec.scala | Scala | apache-2.0 | 4,372 |
/*
* IntSelector.scala
* Selection of an integer uniformly from 0 to a variable upper bound.
*
* Created By: Avi Pfeffer (apfeffer@cra.com)
* Creation Date: Oct 17, 2011
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email figaro@cra.com for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.library.compound
import com.cra.figaro.language._
import com.cra.figaro.util._
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.factored._
import com.cra.figaro.algorithm.lazyfactored._
/**
* An IntSelector represents the selection of an integer from 0 (inclusive) to a variable upper bound (exclusive). The
* upper bound is an element represented by the bound argument. The IntSelector class has been defined so that
* (1) the value is always uniformly distributed in the range, no matter how the upper bound changes, and
* (2) it attempts to avoid changing the value as much as it possibly can as the upper bound changes.
* This latter property makes the class useful in an algorithm like Metropolis-Hastings, where we would like to
* change as little as possible as we make a proposal.
*/
class IntSelector(name: Name[Int], counter: Element[Int], collection: ElementCollection)
extends Element[Int](name, collection) with IfArgsCacheable[Int] with ValuesMaker[Int] with ProbFactorMaker {
// We achieve the two properties by making the randomness a random stream of doubles and selecting the index
// within range that has the highest randomness. If the bound changes, the double associated with the index
// does not change, so quite often the highest index will stay the same.
type Randomness = Stream[Double]
def args = List(counter)
def generateRandomness(): Randomness = Stream.continually(random.nextDouble())
def generateValue(rand: Randomness): Int = argmax(rand take counter.value)
def makeValues(depth: Int): ValueSet[Int] = {
val counterValues = LazyValues(universe)(counter, depth - 1)
if (counterValues.regularValues.nonEmpty) {
val maxCounter = counterValues.regularValues.max
val all = List.tabulate(maxCounter)(i => i).toSet
if (counterValues.hasStar) ValueSet.withStar(all); else ValueSet.withoutStar(all)
} else { ValueSet.withStar(Set()) }
}
def makeFactors: List[Factor[Double]] = {
val thisVar = Variable(this)
val counterVar = Variable(counter)
val comb = Factory.make[Double](List(thisVar, counterVar))
comb.fillByRule((l: List[Any]) => {
val xvalue0 :: xvalue1 :: _ = l.asInstanceOf[List[Extended[Int]]]
if (xvalue0.isRegular && xvalue1.isRegular) {
if (xvalue0.value < xvalue1.value) 1.0/xvalue1.value; else 0.0
} else 1.0
})
List(comb)
}
}
object IntSelector {
/**
* Create an IntSelector using the counter element as the upper bound
*/
def apply(counter: Element[Int])(implicit name: Name[Int], collection: ElementCollection) =
new IntSelector(name, counter, collection)
}
| bruttenberg/figaro | Figaro/src/main/scala/com/cra/figaro/library/compound/IntSelector.scala | Scala | bsd-3-clause | 3,166 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.api
import java.io.File
import java.{lang, util}
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.{Collections, Properties}
import kafka.api.GroupedUserPrincipalBuilder._
import kafka.api.GroupedUserQuotaCallback._
import kafka.server._
import kafka.utils.JaasTestUtils.ScramLoginModule
import kafka.utils.{JaasTestUtils, Logging, TestUtils}
import kafka.zk.ConfigEntityChangeNotificationZNode
import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.{Cluster, Reconfigurable}
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.errors.SaslAuthenticationException
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth._
import org.apache.kafka.common.security.scram.ScramCredential
import org.apache.kafka.server.quota._
import org.junit.Assert._
import org.junit.{After, Before, Test}
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup {
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected def listenerName = new ListenerName("CLIENT")
override protected def interBrokerListenerName: ListenerName = new ListenerName("BROKER")
override protected lazy val trustStoreFile = Some(File.createTempFile("truststore", ".jks"))
override val consumerCount: Int = 0
override val producerCount: Int = 0
override val serverCount: Int = 2
private val kafkaServerSaslMechanisms = Seq("SCRAM-SHA-256")
private val kafkaClientSaslMechanism = "SCRAM-SHA-256"
override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism))
override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism))
private val adminClients = new ArrayBuffer[AdminClient]()
private var producerWithoutQuota: KafkaProducer[Array[Byte], Array[Byte]] = _
val defaultRequestQuota = 1000
val defaultProduceQuota = 2000 * 1000 * 1000
val defaultConsumeQuota = 1000 * 1000 * 1000
@Before
override def setUp() {
startSasl(jaasSections(kafkaServerSaslMechanisms, Some("SCRAM-SHA-256"), KafkaSasl, JaasTestUtils.KafkaServerContextName))
this.serverConfig.setProperty(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ClientQuotaCallbackClassProp, classOf[GroupedUserQuotaCallback].getName)
this.serverConfig.setProperty(s"${listenerName.configPrefix}${KafkaConfig.PrincipalBuilderClassProp}",
classOf[GroupedUserPrincipalBuilder].getName)
this.serverConfig.setProperty(KafkaConfig.DeleteTopicEnableProp, "true")
super.setUp()
brokerList = TestUtils.bootstrapServers(servers, listenerName)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
producerWithoutQuota = createNewProducer
producers += producerWithoutQuota
}
@After
override def tearDown(): Unit = {
// Close producers and consumers without waiting for requests to complete
// to avoid waiting for throttled responses
producers.foreach(_.close(0, TimeUnit.MILLISECONDS))
producers.clear()
consumers.foreach(_.close(0, TimeUnit.MILLISECONDS))
consumers.clear()
super.tearDown()
}
override def configureSecurityBeforeServersStart() {
super.configureSecurityBeforeServersStart()
zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path)
createScramCredentials(zkConnect, JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword)
}
@Test
def testCustomQuotaCallback() {
// Large quota override, should not throttle
var brokerId = 0
var user = createGroupWithOneUser("group0_user1", brokerId)
user.configureAndWaitForQuota(1000000, 2000000)
quotaLimitCalls.values.foreach(_.set(0))
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// ClientQuotaCallback#quotaLimit is invoked by each quota manager once for each new client
assertEquals(1, quotaLimitCalls(ClientQuotaType.PRODUCE).get)
assertEquals(1, quotaLimitCalls(ClientQuotaType.FETCH).get)
assertTrue(s"Too many quotaLimit calls $quotaLimitCalls", quotaLimitCalls(ClientQuotaType.REQUEST).get <= serverCount)
// Large quota updated to small quota, should throttle
user.configureAndWaitForQuota(9000, 3000)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Quota override deletion - verify default quota applied (large quota, no throttling)
user = addUser("group0_user2", brokerId)
user.removeQuotaOverrides()
user.waitForQuotaUpdate(defaultProduceQuota, defaultConsumeQuota, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Make default quota smaller, should throttle
user.configureAndWaitForQuota(8000, 2500, divisor = 1, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Configure large quota override, should not throttle
user = addUser("group0_user3", brokerId)
user.configureAndWaitForQuota(2000000, 2000000)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Quota large enough for one partition, should not throttle
brokerId = 1
user = createGroupWithOneUser("group1_user1", brokerId)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100)
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Create large number of partitions on another broker, should result in throttling on first partition
val largeTopic = "group1_largeTopic"
createTopic(largeTopic, numPartitions = 99, leader = 0)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove quota override and test default quota applied with scaling based on partitions
user = addUser("group1_user2", brokerId)
user.waitForQuotaUpdate(defaultProduceQuota / 100, defaultConsumeQuota / 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100, divisor=100, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove the second topic with large number of partitions, verify no longer throttled
adminZkClient.deleteTopic(largeTopic)
user = addUser("group1_user3", brokerId)
user.waitForQuotaUpdate(8000 * 100, 2500 * 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Alter configs of custom callback dynamically
val adminClient = createAdminClient()
val newProps = new Properties
newProps.put(GroupedUserQuotaCallback.DefaultProduceQuotaProp, "8000")
newProps.put(GroupedUserQuotaCallback.DefaultFetchQuotaProp, "2500")
TestUtils.alterConfigs(servers, adminClient, newProps, perBrokerConfig = false)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
assertEquals(serverCount, callbackInstances.get)
}
/**
* Creates a group with one user and one topic with one partition.
* @param firstUser First user to create in the group
* @param brokerId The broker id to use as leader of the partition
*/
private def createGroupWithOneUser(firstUser: String, brokerId: Int): GroupedUser = {
val user = addUser(firstUser, brokerId)
createTopic(user.topic, numPartitions = 1, brokerId)
user.configureAndWaitForQuota(defaultProduceQuota, defaultConsumeQuota, divisor = 1, group = None)
user
}
private def createTopic(topic: String, numPartitions: Int, leader: Int): Unit = {
val assignment = (0 until numPartitions).map { i => i -> Seq(leader) }.toMap
TestUtils.createTopic(zkClient, topic, assignment, servers)
}
private def createAdminClient(): AdminClient = {
val config = new util.HashMap[String, Object]
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
TestUtils.bootstrapServers(servers, new ListenerName("BROKER")))
clientSecurityProps("admin-client").asInstanceOf[util.Map[Object, Object]].asScala.foreach { case (key, value) =>
config.put(key.toString, value)
}
config.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
val adminClient = AdminClient.create(config)
adminClients += adminClient
adminClient
}
private def produceWithoutThrottle(topic: String, numRecords: Int): Unit = {
(0 until numRecords).foreach { i =>
val payload = i.toString.getBytes
producerWithoutQuota.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null, null, payload))
}
}
private def addUser(user: String, leader: Int): GroupedUser = {
val password = s"$user:secret"
createScramCredentials(zkConnect, user, password)
servers.foreach { server =>
val cache = server.credentialProvider.credentialCache.cache(kafkaClientSaslMechanism, classOf[ScramCredential])
TestUtils.waitUntilTrue(() => cache.get(user) != null, "SCRAM credentials not created")
}
val userGroup = group(user)
val topic = s"${userGroup}_topic"
val producerClientId = s"$user:producer-client-id"
val consumerClientId = s"$user:producer-client-id"
producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, producerClientId)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
val producer = createNewProducer
producers += producer
consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId)
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, s"$user-group")
consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
val consumer = createNewConsumer
consumers += consumer
GroupedUser(user, userGroup, topic, servers(leader), producerClientId, consumerClientId, producer, consumer)
}
case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaServer,
producerClientId: String, consumerClientId: String,
producer: KafkaProducer[Array[Byte], Array[Byte]],
consumer: KafkaConsumer[Array[Byte], Array[Byte]]) extends
QuotaTestClients(topic, leaderNode, producerClientId, consumerClientId, producer, consumer) {
override def userPrincipal: KafkaPrincipal = GroupedUserPrincipal(user, userGroup)
override def quotaMetricTags(clientId: String): Map[String, String] = {
Map(GroupedUserQuotaCallback.QuotaGroupTag -> userGroup)
}
override def overrideQuotas(producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
configureQuota(userGroup, producerQuota, consumerQuota, requestQuota)
}
override def removeQuotaOverrides(): Unit = {
adminZkClient.changeUserOrUserClientIdConfig(quotaEntityName(userGroup), new Properties)
}
def configureQuota(userGroup: String, producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
val quotaProps = quotaProperties(producerQuota, consumerQuota, requestQuota)
adminZkClient.changeUserOrUserClientIdConfig(quotaEntityName(userGroup), quotaProps)
}
def configureAndWaitForQuota(produceQuota: Long, fetchQuota: Long, divisor: Int = 1,
group: Option[String] = Some(userGroup)): Unit = {
configureQuota(group.getOrElse(""), produceQuota, fetchQuota, defaultRequestQuota)
waitForQuotaUpdate(produceQuota / divisor, fetchQuota / divisor, defaultRequestQuota)
}
def produceConsume(expectProduceThrottle: Boolean, expectConsumeThrottle: Boolean): Unit = {
val numRecords = 1000
val produced = produceUntilThrottled(numRecords, waitForRequestCompletion = false)
verifyProduceThrottle(expectProduceThrottle, verifyClientMetric = false)
// make sure there are enough records on the topic to test consumer throttling
produceWithoutThrottle(topic, numRecords - produced)
consumeUntilThrottled(numRecords, waitForRequestCompletion = false)
verifyConsumeThrottle(expectConsumeThrottle, verifyClientMetric = false)
}
def removeThrottleMetrics(): Unit = {
def removeSensors(quotaType: QuotaType, clientId: String): Unit = {
val sensorSuffix = quotaMetricTags(clientId).values.mkString(":")
leaderNode.metrics.removeSensor(s"${quotaType}ThrottleTime-$sensorSuffix")
leaderNode.metrics.removeSensor(s"$quotaType-$sensorSuffix")
}
removeSensors(QuotaType.Produce, producerClientId)
removeSensors(QuotaType.Fetch, consumerClientId)
removeSensors(QuotaType.Request, producerClientId)
removeSensors(QuotaType.Request, consumerClientId)
}
private def quotaEntityName(userGroup: String): String = s"${userGroup}_"
}
}
object GroupedUserPrincipalBuilder {
def group(str: String): String = {
if (str.indexOf("_") <= 0)
""
else
str.substring(0, str.indexOf("_"))
}
}
class GroupedUserPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
val securityProtocol = context.securityProtocol
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
val user = context.asInstanceOf[SaslAuthenticationContext].server().getAuthorizationID
val userGroup = group(user)
if (userGroup.isEmpty)
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
else
GroupedUserPrincipal(user, userGroup)
} else
throw new IllegalStateException(s"Unexpected security protocol $securityProtocol")
}
}
case class GroupedUserPrincipal(user: String, userGroup: String) extends KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
object GroupedUserQuotaCallback {
val QuotaGroupTag = "group"
val DefaultProduceQuotaProp = "default.produce.quota"
val DefaultFetchQuotaProp = "default.fetch.quota"
val UnlimitedQuotaMetricTags = Collections.emptyMap[String, String]
val quotaLimitCalls = Map(
ClientQuotaType.PRODUCE -> new AtomicInteger,
ClientQuotaType.FETCH -> new AtomicInteger,
ClientQuotaType.REQUEST -> new AtomicInteger
)
val callbackInstances = new AtomicInteger
}
/**
* Quota callback for a grouped user. Both user principals and topics of each group
* are prefixed with the group name followed by '_'. This callback defines quotas of different
* types at the group level. Group quotas are configured in ZooKeeper as user quotas with
* the entity name "${group}_". Default group quotas are configured in ZooKeeper as user quotas
* with the entity name "_".
*
* Default group quotas may also be configured using the configuration options
* "default.produce.quota" and "default.fetch.quota" which can be reconfigured dynamically
* without restarting the broker. This tests custom reconfigurable options for quota callbacks,
*/
class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable with Logging {
var brokerId: Int = -1
val customQuotasUpdated = ClientQuotaType.values.toList
.map(quotaType =>(quotaType -> new AtomicBoolean)).toMap
val quotas = ClientQuotaType.values.toList
.map(quotaType => (quotaType -> new ConcurrentHashMap[String, Double])).toMap
val partitionRatio = new ConcurrentHashMap[String, Double]()
override def configure(configs: util.Map[String, _]): Unit = {
brokerId = configs.get(KafkaConfig.BrokerIdProp).toString.toInt
callbackInstances.incrementAndGet
}
override def reconfigurableConfigs: util.Set[String] = {
Set(DefaultProduceQuotaProp, DefaultFetchQuotaProp).asJava
}
override def validateReconfiguration(configs: util.Map[String, _]): Unit = {
reconfigurableConfigs.asScala.foreach(configValue(configs, _))
}
override def reconfigure(configs: util.Map[String, _]): Unit = {
configValue(configs, DefaultProduceQuotaProp).foreach(value => quotas(ClientQuotaType.PRODUCE).put("", value))
configValue(configs, DefaultFetchQuotaProp).foreach(value => quotas(ClientQuotaType.FETCH).put("", value))
customQuotasUpdated.values.foreach(_.set(true))
}
private def configValue(configs: util.Map[String, _], key: String): Option[Long] = {
val value = configs.get(key)
if (value != null) Some(value.toString.toLong) else None
}
override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = {
principal match {
case groupPrincipal: GroupedUserPrincipal =>
val userGroup = groupPrincipal.userGroup
val quotaLimit = quotaOrDefault(userGroup, quotaType)
if (quotaLimit != null)
Map(QuotaGroupTag -> userGroup).asJava
else
UnlimitedQuotaMetricTags
case _ =>
UnlimitedQuotaMetricTags
}
}
override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = {
quotaLimitCalls(quotaType).incrementAndGet
val group = metricTags.get(QuotaGroupTag)
if (group != null) quotaOrDefault(group, quotaType) else null
}
override def updateClusterMetadata(cluster: Cluster): Boolean = {
val topicsByGroup = cluster.topics.asScala.groupBy(group)
!topicsByGroup.forall { case (group, groupTopics) =>
val groupPartitions = groupTopics.flatMap(topic => cluster.partitionsForTopic(topic).asScala)
val totalPartitions = groupPartitions.size
val partitionsOnThisBroker = groupPartitions.count { p => p.leader != null && p.leader.id == brokerId }
val multiplier = if (totalPartitions == 0)
1
else if (partitionsOnThisBroker == 0)
1.0 / totalPartitions
else
partitionsOnThisBroker.toDouble / totalPartitions
partitionRatio.put(group, multiplier) != multiplier
}
}
override def updateQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity, newValue: Double): Unit = {
quotas(quotaType).put(userGroup(quotaEntity), newValue)
}
override def removeQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity): Unit = {
quotas(quotaType).remove(userGroup(quotaEntity))
}
override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = customQuotasUpdated(quotaType).getAndSet(false)
def close(): Unit = {}
private def userGroup(quotaEntity: ClientQuotaEntity): String = {
val configEntity = quotaEntity.configEntities.get(0)
if (configEntity.entityType == ClientQuotaEntity.ConfigEntityType.USER)
group(configEntity.name)
else
throw new IllegalArgumentException(s"Config entity type ${configEntity.entityType} is not supported")
}
private def quotaOrDefault(group: String, quotaType: ClientQuotaType): lang.Double = {
val quotaMap = quotas(quotaType)
var quotaLimit: Any = quotaMap.get(group)
if (quotaLimit == null)
quotaLimit = quotaMap.get("")
if (quotaLimit != null) scaledQuota(quotaType, group, quotaLimit.asInstanceOf[Double]) else null
}
private def scaledQuota(quotaType: ClientQuotaType, group: String, configuredQuota: Double): Double = {
if (quotaType == ClientQuotaType.REQUEST)
configuredQuota
else {
val multiplier = partitionRatio.get(group)
if (multiplier <= 0.0) configuredQuota else configuredQuota * multiplier
}
}
}
| sebadiaz/kafka | core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala | Scala | apache-2.0 | 21,082 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.utils
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.io.OutputFormat
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.{RowTypeInfo, TupleTypeInfo}
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.{FunctionInitializationContext, FunctionSnapshotContext}
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction
import org.apache.flink.streaming.api.datastream.{DataStream, DataStreamSink}
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
import org.apache.flink.table.api.Types
import org.apache.flink.table.dataformat.{BaseRow, DataFormatConverters, GenericRow}
import org.apache.flink.table.planner.utils.BaseRowTestUtil
import org.apache.flink.table.runtime.types.TypeInfoLogicalTypeConverter
import org.apache.flink.table.runtime.typeutils.BaseRowTypeInfo
import org.apache.flink.table.sinks._
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.types.Row
import _root_.java.lang.{Boolean => JBoolean}
import _root_.java.util.TimeZone
import _root_.java.util.concurrent.atomic.AtomicInteger
import _root_.scala.collection.JavaConverters._
import _root_.scala.collection.mutable
import _root_.scala.collection.mutable.ArrayBuffer
object StreamTestSink {
private[utils] val idCounter: AtomicInteger = new AtomicInteger(0)
private[utils] val globalResults =
mutable.HashMap.empty[Int, mutable.Map[Int, ArrayBuffer[String]]]
private[utils] val globalRetractResults =
mutable.HashMap.empty[Int, mutable.Map[Int, ArrayBuffer[String]]]
private[utils] val globalUpsertResults =
mutable.HashMap.empty[Int, mutable.Map[Int, mutable.Map[String, String]]]
private[utils] def getNewSinkId: Int = {
val idx = idCounter.getAndIncrement()
this.synchronized {
globalResults.put(idx, mutable.HashMap.empty[Int, ArrayBuffer[String]])
globalRetractResults.put(idx, mutable.HashMap.empty[Int, ArrayBuffer[String]])
globalUpsertResults.put(idx, mutable.HashMap.empty[Int, mutable.Map[String, String]])
}
idx
}
def clear(): Unit = {
globalResults.clear()
globalRetractResults.clear()
globalUpsertResults.clear()
}
}
abstract class AbstractExactlyOnceSink[T] extends RichSinkFunction[T] with CheckpointedFunction {
protected var resultsState: ListState[String] = _
protected var localResults: ArrayBuffer[String] = _
protected val idx: Int = StreamTestSink.getNewSinkId
protected var globalResults: mutable.Map[Int, ArrayBuffer[String]] = _
protected var globalRetractResults: mutable.Map[Int, ArrayBuffer[String]] = _
protected var globalUpsertResults: mutable.Map[Int, mutable.Map[String, String]] = _
override def initializeState(context: FunctionInitializationContext): Unit = {
resultsState = context.getOperatorStateStore
.getListState(new ListStateDescriptor[String]("sink-results", Types.STRING))
localResults = mutable.ArrayBuffer.empty[String]
if (context.isRestored) {
for (value <- resultsState.get().asScala) {
localResults += value
}
}
val taskId = getRuntimeContext.getIndexOfThisSubtask
StreamTestSink.synchronized(
StreamTestSink.globalResults(idx) += (taskId -> localResults)
)
}
override def snapshotState(context: FunctionSnapshotContext): Unit = {
resultsState.clear()
for (value <- localResults) {
resultsState.add(value)
}
}
protected def clearAndStashGlobalResults(): Unit = {
if (globalResults == null) {
StreamTestSink.synchronized {
globalResults = StreamTestSink.globalResults.remove(idx).get
globalRetractResults = StreamTestSink.globalRetractResults.remove(idx).get
globalUpsertResults = StreamTestSink.globalUpsertResults.remove(idx).get
}
}
}
protected def getResults: List[String] = {
clearAndStashGlobalResults()
val result = ArrayBuffer.empty[String]
this.globalResults.foreach {
case (_, list) => result ++= list
}
result.toList
}
}
final class TestingAppendBaseRowSink(
rowTypeInfo: BaseRowTypeInfo, tz: TimeZone)
extends AbstractExactlyOnceSink[BaseRow] {
def this(rowTypeInfo: BaseRowTypeInfo) {
this(rowTypeInfo, TimeZone.getTimeZone("UTC"))
}
override def invoke(value: BaseRow): Unit = localResults +=
BaseRowTestUtil.baseRowToString(value, rowTypeInfo, tz)
def getAppendResults: List[String] = getResults
}
final class TestingAppendSink(tz: TimeZone) extends AbstractExactlyOnceSink[Row] {
def this() {
this(TimeZone.getTimeZone("UTC"))
}
override def invoke(value: Row): Unit = localResults += TestSinkUtil.rowToString(value, tz)
def getAppendResults: List[String] = getResults
}
final class TestingUpsertSink(keys: Array[Int], tz: TimeZone)
extends AbstractExactlyOnceSink[(Boolean, BaseRow)] {
private var upsertResultsState: ListState[String] = _
private var localUpsertResults: mutable.Map[String, String] = _
private var fieldTypes: Array[TypeInformation[_]] = _
def this(keys: Array[Int]) {
this(keys, TimeZone.getTimeZone("UTC"))
}
def configureTypes(fieldTypes: Array[TypeInformation[_]]): Unit = {
this.fieldTypes = fieldTypes
}
override def initializeState(context: FunctionInitializationContext): Unit = {
super.initializeState(context)
upsertResultsState = context.getOperatorStateStore.getListState(
new ListStateDescriptor[String]("sink-upsert-results", Types.STRING))
localUpsertResults = mutable.HashMap.empty[String, String]
if (context.isRestored) {
var key: String = null
var value: String = null
for (entry <- upsertResultsState.get().asScala) {
if (key == null) {
key = entry
} else {
value = entry
localUpsertResults += (key -> value)
key = null
value = null
}
}
if (key != null) {
throw new RuntimeException("The resultState is corrupt.")
}
}
val taskId = getRuntimeContext.getIndexOfThisSubtask
StreamTestSink.synchronized {
StreamTestSink.globalUpsertResults(idx) += (taskId -> localUpsertResults)
}
}
override def snapshotState(context: FunctionSnapshotContext): Unit = {
super.snapshotState(context)
upsertResultsState.clear()
for ((key, value) <- localUpsertResults) {
upsertResultsState.add(key)
upsertResultsState.add(value)
}
}
override def invoke(d: (Boolean, BaseRow)): Unit = {
this.synchronized {
val wrapRow = new GenericRow(2)
wrapRow.setField(0, d._1)
wrapRow.setField(1, d._2)
val converter =
DataFormatConverters.getConverterForDataType(
TypeConversions.fromLegacyInfoToDataType(
new TupleTypeInfo(Types.BOOLEAN, new RowTypeInfo(fieldTypes: _*))))
.asInstanceOf[DataFormatConverters.DataFormatConverter[BaseRow, JTuple2[JBoolean, Row]]]
val v = converter.toExternal(wrapRow)
val rowString = TestSinkUtil.rowToString(v.f1, tz)
val tupleString = "(" + v.f0.toString + "," + rowString + ")"
localResults += tupleString
val keyString = TestSinkUtil.rowToString(Row.project(v.f1, keys), tz)
if (v.f0) {
localUpsertResults += (keyString -> rowString)
} else {
val oldValue = localUpsertResults.remove(keyString)
if (oldValue.isEmpty) {
throw new RuntimeException("Tried to delete a value that wasn't inserted first. " +
"This is probably an incorrectly implemented test. " +
"Try to set the parallelism of the sink to 1.")
}
}
}
}
def getRawResults: List[String] = getResults
def getUpsertResults: List[String] = {
clearAndStashGlobalResults()
val result = ArrayBuffer.empty[String]
this.globalUpsertResults.foreach {
case (_, map) => map.foreach(result += _._2)
}
result.toList
}
}
final class TestingUpsertTableSink(val keys: Array[Int], val tz: TimeZone)
extends UpsertStreamTableSink[BaseRow] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
var sink = new TestingUpsertSink(keys, tz)
var expectedKeys: Option[Array[String]] = None
var expectedIsAppendOnly: Option[Boolean] = None
def this(keys: Array[Int]) {
this(keys, TimeZone.getTimeZone("UTC"))
}
override def setKeyFields(keys: Array[String]): Unit = {
if (expectedKeys.isDefined && keys == null) {
throw new AssertionError("Provided key fields should not be null.")
} else if (expectedKeys.isEmpty) {
return
}
val expectedStr = expectedKeys.get.sorted.mkString(",")
val keysStr = keys.sorted.mkString(",")
if (!expectedStr.equals(keysStr)) {
throw new AssertionError(
s"Provided key fields($keysStr) do not match expected keys($expectedStr)")
}
}
override def setIsAppendOnly(isAppendOnly: JBoolean): Unit = {
if (expectedIsAppendOnly.isEmpty) {
return
}
if (expectedIsAppendOnly.get != isAppendOnly) {
throw new AssertionError("Provided isAppendOnly does not match expected isAppendOnly")
}
}
override def getRecordType: TypeInformation[BaseRow] =
new BaseRowTypeInfo(fTypes.map(TypeInfoLogicalTypeConverter.fromTypeInfoToLogicalType), fNames)
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
override def consumeDataStream(
dataStream: DataStream[JTuple2[JBoolean, BaseRow]]): DataStreamSink[_] = {
dataStream.map(new MapFunction[JTuple2[JBoolean, BaseRow], (Boolean, BaseRow)] {
override def map(value: JTuple2[JBoolean, BaseRow]): (Boolean, BaseRow) = {
(value.f0, value.f1)
}
})
.setParallelism(dataStream.getParallelism)
.addSink(sink)
.name(s"TestingUpsertTableSink(keys=${
if (keys != null) {
"(" + keys.mkString(",") + ")"
} else {
"null"
}
})")
.setParallelism(dataStream.getParallelism)
}
override def emitDataStream(dataStream: DataStream[JTuple2[JBoolean, BaseRow]]): Unit = {
consumeDataStream(dataStream)
}
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TestingUpsertTableSink = {
val copy = new TestingUpsertTableSink(keys, tz)
copy.fNames = fieldNames
copy.fTypes = fieldTypes
sink.configureTypes(fieldTypes)
copy.sink = sink
copy
}
def getRawResults: List[String] = sink.getRawResults
def getUpsertResults: List[String] = sink.getUpsertResults
}
final class TestingAppendTableSink(tz: TimeZone) extends AppendStreamTableSink[Row] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
var sink = new TestingAppendSink(tz)
var outputFormat = new TestingOutputFormat[Row](tz)
def this() {
this(TimeZone.getTimeZone("UTC"))
}
override def consumeDataStream(dataStream: DataStream[Row]): DataStreamSink[_] = {
dataStream.addSink(sink).name("TestingAppendTableSink")
.setParallelism(dataStream.getParallelism)
}
override def emitDataStream(dataStream: DataStream[Row]): Unit = {
consumeDataStream(dataStream)
}
override def getOutputType: TypeInformation[Row] = new RowTypeInfo(fTypes, fNames)
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TestingAppendTableSink = {
val copy = new TestingAppendTableSink(tz)
copy.fNames = fieldNames
copy.fTypes = fieldTypes
copy.outputFormat = outputFormat
copy.sink = sink
copy
}
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
def getAppendResults: List[String] = sink.getAppendResults
def getResults: List[String] = sink.getAppendResults
}
class TestingOutputFormat[T](tz: TimeZone)
extends OutputFormat[T] {
val index: Int = StreamTestSink.getNewSinkId
var localRetractResults: ArrayBuffer[String] = _
def this() {
this(TimeZone.getTimeZone("UTC"))
}
protected var globalResults: mutable.Map[Int, ArrayBuffer[String]] = _
def configure(var1: Configuration): Unit = {}
def open(taskNumber: Int, numTasks: Int): Unit = {
localRetractResults = mutable.ArrayBuffer.empty[String]
StreamTestSink.synchronized {
StreamTestSink.globalResults(index) += (taskNumber -> localRetractResults)
}
}
def writeRecord(value: T): Unit = localRetractResults += {
value match {
case r: Row => TestSinkUtil.rowToString(r, tz)
case tp: JTuple2[java.lang.Boolean, Row] =>
"(" + tp.f0.toString + "," + TestSinkUtil.rowToString(tp.f1, tz) + ")"
case _ => ""
}
}
def close(): Unit = {}
protected def clearAndStashGlobalResults(): Unit = {
if (globalResults == null) {
StreamTestSink.synchronized {
globalResults = StreamTestSink.globalResults.remove(index).get
}
}
}
def getResults: List[String] = {
clearAndStashGlobalResults()
val result = ArrayBuffer.empty[String]
this.globalResults.foreach {
case (_, list) => result ++= list
}
result.toList
}
}
class TestingRetractSink(tz: TimeZone)
extends AbstractExactlyOnceSink[(Boolean, Row)] {
protected var retractResultsState: ListState[String] = _
protected var localRetractResults: ArrayBuffer[String] = _
def this() {
this(TimeZone.getTimeZone("UTC"))
}
override def initializeState(context: FunctionInitializationContext): Unit = {
super.initializeState(context)
retractResultsState = context.getOperatorStateStore.getListState(
new ListStateDescriptor[String]("sink-retract-results", Types.STRING))
localRetractResults = mutable.ArrayBuffer.empty[String]
if (context.isRestored) {
for (value <- retractResultsState.get().asScala) {
localRetractResults += value
}
}
val taskId = getRuntimeContext.getIndexOfThisSubtask
StreamTestSink.synchronized {
StreamTestSink.globalRetractResults(idx) += (taskId -> localRetractResults)
}
}
override def snapshotState(context: FunctionSnapshotContext): Unit = {
super.snapshotState(context)
retractResultsState.clear()
for (value <- localRetractResults) {
retractResultsState.add(value)
}
}
override def invoke(v: (Boolean, Row)): Unit = {
this.synchronized {
val tupleString = "(" + v._1.toString + "," + TestSinkUtil.rowToString(v._2, tz) + ")"
localResults += tupleString
val rowString = TestSinkUtil.rowToString(v._2, tz)
if (v._1) {
localRetractResults += rowString
} else {
val index = localRetractResults.indexOf(rowString)
if (index >= 0) {
localRetractResults.remove(index)
} else {
throw new RuntimeException("Tried to retract a value that wasn't added first. " +
"This is probably an incorrectly implemented test. " +
"Try to set the parallelism of the sink to 1.")
}
}
}
}
def getRawResults: List[String] = getResults
def getRetractResults: List[String] = {
clearAndStashGlobalResults()
val result = ArrayBuffer.empty[String]
this.globalRetractResults.foreach {
case (_, list) => result ++= list
}
result.toList
}
}
final class TestingRetractTableSink(tz: TimeZone) extends RetractStreamTableSink[Row] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
var sink = new TestingRetractSink(tz)
def this() {
this(TimeZone.getTimeZone("UTC"))
}
override def consumeDataStream(
dataStream: DataStream[JTuple2[JBoolean, Row]]): DataStreamSink[_] = {
dataStream.map(new MapFunction[JTuple2[JBoolean, Row], (Boolean, Row)] {
override def map(value: JTuple2[JBoolean, Row]): (Boolean, Row) = {
(value.f0, value.f1)
}
}).setParallelism(dataStream.getParallelism)
.addSink(sink)
.name("TestingRetractTableSink")
.setParallelism(dataStream.getParallelism)
}
override def emitDataStream(dataStream: DataStream[JTuple2[JBoolean, Row]]): Unit = {
consumeDataStream(dataStream)
}
override def getRecordType: TypeInformation[Row] =
new RowTypeInfo(fTypes, fNames)
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TestingRetractTableSink = {
val copy = new TestingRetractTableSink(tz)
copy.fNames = fieldNames
copy.fTypes = fieldTypes
copy.sink = sink
copy
}
def getRawResults: List[String] = {
sink.getRawResults
}
def getRetractResults: List[String] = {
sink.getRetractResults
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/utils/StreamTestSink.scala | Scala | apache-2.0 | 17,887 |
/*******************************************************************************
Copyright (c) 2013-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import org.w3c.dom.Node
import org.w3c.dom.Element
import kr.ac.kaist.jsaf.analysis.typing.Helper
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr, InternalError}
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.models.DOMCore.{DOMElement, DOMNodeList, DOMNamedNodeMap}
import kr.ac.kaist.jsaf.analysis.typing.models.DOMObject.CSSStyleDeclaration
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.Shell
object HTMLImageElement extends DOM {
private val name = "HTMLImageElement"
/* predefined locatoins */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_proto = newSystemRecentLoc(name + "Proto")
val loc_ins = newSystemRecentLoc(name + "Ins")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
// HTML 5 HTMLElement.Image() constructor
("@construct", AbsInternalFunc("HTMLImageElement.Image")),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* instance */
private val prop_ins: List[(String, AbsProperty)] =
HTMLElement.getInsList2() ++ List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(loc_proto, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// DOM Level 1
("lowSrc", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("name", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("align", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("alt", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("border", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("isMap", AbsConstValue(PropValue(ObjectValue(BoolTop, T, T, T)))),
("longDesc", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("src", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("useMap", AbsConstValue(PropValue(ObjectValue(StrTop, T, T, T)))),
("height", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("hspace", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("vspace", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("width", AbsConstValue(PropValue(ObjectValue(NumTop, T, T, T)))),
("naturalWidth", AbsConstValue(PropValue(ObjectValue(UInt, F, T, T)))),
("naturalHeight", AbsConstValue(PropValue(ObjectValue(UInt, F, T, T))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(HTMLElement.loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue)))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T)))),
// HTML 5 HTMLElement.Image() constructor
("Image", AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = if(Shell.params.opt_Dommodel2) List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global), (loc_ins, prop_ins)
) else List(
(loc_cons, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global) )
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
// HTML 5 HTMLElement.Image() constructor
// WHATWG HTML Living Standard - Section 4.8.1 The Img Element
// http://www.whatwg.org/specs/web-apps/current-work/multipage/embedded-content-1.html#dom-image
("HTMLImageElement.Image" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
if(Shell.params.opt_Dommodel2) {
((Helper.ReturnStore(h, Value(loc_ins)), ctx), (he, ctxe))
}
else {
val lset_env = h(SinglePureLocalLoc)("@env")._2._2
val set_addr = lset_env.foldLeft[Set[Address]](Set())((a, l) => a + locToAddr(l))
if (set_addr.size > 1) throw new InternalError("API heap allocation: Size of env address is " + set_addr.size)
val addr_env = (cp._1._1, set_addr.head)
val addr1 = cfg.getAPIAddress(addr_env, 0)
val addr2 = cfg.getAPIAddress(addr_env, 1)
val addr3 = cfg.getAPIAddress(addr_env, 2)
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
// locations for 'childNodes', 'attributes', and 'style' property of a new created element
val l_childNodes = addrToLoc(addr1, Recent)
val l_attributes = addrToLoc(addr2, Recent)
val l_style = addrToLoc(addr3, Recent)
val h1 = HTMLTopElement.setInsLoc(h, lset_this)
val (h_1, ctx_1) = Helper.Oldify(h1, ctx, addr1)
val (h_2, ctx_2) = Helper.Oldify(h_1, ctx_1, addr2)
val (h_3, ctx_3) = Helper.Oldify(h_2, ctx_2, addr3)
/* arguments */
// argument length
val arglen = Operator.ToUInt32(getArgValue(h_3, ctx_3, args, "length"))
// optional arguments for width and height
val (width, height) = arglen.getAbsCase match {
case AbsBot => (NumBot, NumBot)
case _ => AbsNumber.getUIntSingle(arglen) match {
// no arguments
case Some(n) if n == 0 =>
(AbsNumber.alpha(0), AbsNumber.alpha(0))
// one argument for width
case Some(n) if n ==1 =>
(Helper.toNumber(Helper.toPrimitive_better(h_3, getArgValue(h_3, ctx_3, args, "0"))), AbsNumber.alpha(0))
// two arguments for width and height
case Some(n) if n > 1 =>
(Helper.toNumber(Helper.toPrimitive_better(h_3, getArgValue(h_3, ctx_3, args, "0"))),
Helper.toNumber(Helper.toPrimitive_better(h_3, getArgValue(h_3, ctx_3, args, "1"))))
case _ => (NumTop, NumTop)
}
}
// create a new HTMLImageElement
if(width </ NumBot && height </ NumBot) {
val h_4 = lset_this.foldLeft(h_3)((_h, l) => {
val newimgobj_list = default_getInsList:::DOMElement.getInsList(PropValue(ObjectValue(AbsString.alpha("IMG"), F, T, T)))
val newimgobj = newimgobj_list.foldLeft(Obj.empty)((obj, prop) =>
if(prop._1=="width")
obj.update("width", PropValue(ObjectValue(width, T, T, T)))
else if(prop._1=="height")
obj.update("height", PropValue(ObjectValue(height, T, T, T)))
else
obj.update(prop._1, prop._2)
)
// 'childNodes' update
val childNodes_list = DOMNodeList.getInsList(0)
val childNodes = childNodes_list.foldLeft(Obj.empty)((x, y) => x.update(y._1, y._2))
// 'attibutes' update
val attributes_list = DOMNamedNodeMap.getInsList(0)
val attributes = attributes_list.foldLeft(Obj.empty)((x, y) => x.update(y._1, y._2))
// 'style' update
val style_list = CSSStyleDeclaration.getInsList()
val style = style_list.foldLeft(Obj.empty)((x, y) => x.update(y._1, y._2))
val newimgobj_up = newimgobj.update("childNodes", PropValue(ObjectValue(l_childNodes, F, T, T))).update(
"attributes", PropValue(ObjectValue(l_attributes, F, T, T))).update(
"style", PropValue(ObjectValue(l_style, T, T, T)))
_h.update(l_childNodes, childNodes).update(l_attributes, attributes).update(l_style, style).update(l, newimgobj_up)
})
((Helper.ReturnStore(h_4, Value(lset_this)), ctx_3), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
}
}))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
def getDefMap(): Map[String, AccessFun] = {
Map()
}
def getUseMap(): Map[String, AccessFun] = {
Map()
}
/* semantics */
// no function
/* instance */
override def getInstance(cfg: CFG): Option[Loc] = Some(newRecentLoc())
/* list of properties in the instance object */
override def getInsList(node: Node): List[(String, PropValue)] = node match {
case e: Element =>
// This object has all properties of the HTMLElement object
HTMLElement.getInsList(node) ++ List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("name", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("name")), T, T, T))),
("align", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("align")), T, T, T))),
("alt", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("alt")), T, T, T))),
("border", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("border")), T, T, T))),
("isMap", PropValue(ObjectValue((if(e.getAttribute("isMap")=="true") T else F), T, T, T))),
("longDesc", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("longDesc")), T, T, T))),
("src", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("src")), T, T, T))),
("useMap", PropValue(ObjectValue(AbsString.alpha(e.getAttribute("useMap")), T, T, T))),
// Modified in DOM Level 2
("height", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("height")))), T, T, T))),
("hspace", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("hspace")))), T, T, T))),
("vspace", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("vspace")))), T, T, T))),
("width", PropValue(ObjectValue(Helper.toNumber(PValue(AbsString.alpha(e.getAttribute("width")))), T, T, T))))
case _ => {
System.err.println("* Warning: " + node.getNodeName + " cannot have instance objects.")
List()
}
}
def getInsList(name: PropValue, align: PropValue, alt: PropValue, border: PropValue, isMap: PropValue,
longDesc: PropValue, src: PropValue, useMap: PropValue, height: PropValue, hspace: PropValue,
vspace: PropValue, width: PropValue, xpath: PropValue): List[(String, PropValue)] = List(
("@class", PropValue(AbsString.alpha("Object"))),
("@proto", PropValue(ObjectValue(loc_proto, F, F, F))),
("@extensible", PropValue(BoolTrue)),
// DOM Level 1
("name", name),
("align", align),
("alt", alt),
("border", border),
("isMap", isMap),
("longDesc", longDesc),
("src", src),
("useMap", useMap),
// Modified in DOM Level 2
("height", height),
("hspace", hspace),
("vspace", vspace),
("width", width),
("xpath", xpath)
)
override def default_getInsList(): List[(String, PropValue)] = {
val name = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val align = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val alt = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val border = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val isMap = PropValue(ObjectValue(BoolFalse, T, T, T))
val longDesc = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val src = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val useMap = PropValue(ObjectValue(AbsString.alpha(""), T, T, T))
val height = PropValue(ObjectValue(AbsNumber.alpha(0), T, T, T))
val hspace = PropValue(ObjectValue(NumTop, T, T, T))
val vspace = PropValue(ObjectValue(NumTop, T, T, T))
val width = PropValue(ObjectValue(AbsNumber.alpha(0), T, T, T))
val xpath = PropValue(ObjectValue(AbsString.alpha(""), F, F, F))
// This object has all properties of the HTMLElement object
HTMLElement.default_getInsList :::
getInsList(name, align, alt, border, isMap, longDesc, src, useMap, height, hspace, vspace, width, xpath)
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml/HTMLImageElement.scala | Scala | bsd-3-clause | 13,189 |
package japgolly.scalajs.react.core
import japgolly.scalajs.react._
import japgolly.scalajs.react.test.TestUtil._
import japgolly.scalajs.react.vdom.html_<^._
object ScalaSpecificHooksTest {
import HooksTest._
// TODO: https://github.com/lampepfl/dotty/issues/12663
// I swapped the order of the last two hooks to avoid use of a CtxFn after a DynamicNextStep.
def testCustomHook(): Unit = {
val counter = new Counter
val hookS = CustomHook[Int].useStateBy(identity).buildReturning(_.hook1)
val hookE = CustomHook[Int].useEffectBy(counter.incCB(_)).build
val comp = ScalaFnComponent.withHooks[PI]
.custom(hookE(10))
.custom(hookS(3)) // <--------------------------------------- s1
.custom(hookS.contramap[PI](_.pi)) // <---------------------- s2
.customBy((p, s, _) => hookE(p.pi + s.value))
.customBy($ => hookE($.props.pi + $.hook1.value + 1))
.customBy($ => hookS($.props.pi + $.hook1.value + 1)) // <--- s3
.render((_, s1, s2, s3) =>
<.div(
s"${s1.value}:${s2.value}:${s3.value}",
<.button(^.onClick --> s1.modState(_ + 1))
)
)
test(comp(PI(5))) { t =>
t.assertText("3:5:9")
assertEq(counter.value, 10 + (5+3) + (5+3+1))
counter.value = 0
t.clickButton()
t.assertText("4:5:9")
assertEq(counter.value, 10 + (5+4) + (5+4+1))
}
}
}
| japgolly/scalajs-react | tests/src/test/scala-3/japgolly/scalajs/react/core/ScalaSpecificHooksTest.scala | Scala | apache-2.0 | 1,393 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.mimir
import quasar.yggdrasil._
import quasar.yggdrasil.bytecode._
trait OpFinderModule[M[+ _]] extends Instructions with TableModule[M] with TableLibModule[M] {
import instructions._
trait OpFinder {
def op1ForUnOp(op: UnaryOperation): library.Op1
def op2ForBinOp(op: BinaryOperation): Option[library.Op2]
}
}
trait StdLibOpFinderModule[M[+ _]] extends Instructions with StdLibModule[M] with OpFinderModule[M] {
import instructions._
import library._
trait StdLibOpFinder extends OpFinder {
override def op1ForUnOp(op: UnaryOperation) = op match {
case BuiltInFunction1Op(op1) => op1
case New | WrapArray => sys.error("assertion error")
case Comp => Unary.Comp
case Neg => Unary.Neg
case _ => sys.error(s"Unexpected op $op")
}
override def op2ForBinOp(op: BinaryOperation) = {
import instructions._
op match {
case BuiltInFunction2Op(op2) => Some(op2)
case Add => Some(Infix.Add)
case Sub => Some(Infix.Sub)
case Mul => Some(Infix.Mul)
case Div => Some(Infix.Div)
case Mod => Some(Infix.Mod)
case Pow => Some(Infix.Pow)
case Lt => Some(Infix.Lt)
case LtEq => Some(Infix.LtEq)
case Gt => Some(Infix.Gt)
case GtEq => Some(Infix.GtEq)
case Eq | instructions.NotEq => None
case Or => Some(Infix.Or)
case And => Some(Infix.And)
case WrapObject | JoinObject | JoinArray | ArraySwap | DerefMetadata | DerefObject | DerefArray => None
case _ => sys.error(s"Unexpected op $op")
}
}
}
}
| drostron/quasar | mimir/src/main/scala/quasar/mimir/OpFinder.scala | Scala | apache-2.0 | 3,556 |
package nounou.io.neuralynx
import java.io.File
import breeze.io.{ByteConverterLittleEndian, RandomAccessFile}
import breeze.linalg.{DenseVector => DV, convert}
import nounou.elements.NNElement
import nounou.elements.data.{NNDataChannel, NNData, NNDataChannelFilestream}
import nounou.elements.ranges.SampleRangeValid
import nounou.elements.traits.{NNDataScale, NNDataTiming}
import nounou.io.{FileLoader, FileSaver}
/**
* @author ktakagaki
*/
class FileAdapterNCS extends FileAdapterNeuralynx with FileSaver {
// <editor-fold defaultstate="collapsed" desc=" misc methods for FileLoader/FileSaver ">
override val canLoadExtensions = Array("ncs")
override def canSaveClass(obj: NNElement): Boolean = obj match {
case x: NNData => true
case x: NNDataChannel => true
case _ => false
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" record structure ">
/**Number of samples per record*/
final val recordSampleCount= 512
final val recordBytes = 1044
/**Size of non-data bytes at head of each record*/
final val recordNonDataHead = recordBytes - recordSampleCount * 2
override def recordStartByte(record: Int) = (headerBytes + recordBytes * record)
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" load ">
override def load( file: File ): Array[NNElement] = {
val xBits = 1024
lazy val xBitsD = xBits.toDouble
val absOffset = 0D
val absUnit: String = "microV"
//val absGain = ???
fHand = new RandomAccessFile(file, "r")(ByteConverterLittleEndian)
// <editor-fold desc="parse the Neuralynx header">
nlxHeaderLoad()
val tempAcqEntName = nlxHeaderParserS("AcqEntName", "NoName")
val tempRecordSize = nlxHeaderParserI("RecordSize", "0")
require(tempRecordSize == recordBytes,
s"NCS file with non-standard record size: $tempRecordSize")
/**Sample rate, Hz*/
val sampleRate = nlxHeaderParserD("SamplingFrequency", "1")
require(sampleRate >= 1000d, //tempSampleFreqD == sampleRate,
s"NCS file with non-standard sampling frequency: $sampleRate")
val tempADBitVolts = nlxHeaderParserD("ADBitVolts", "1")
// </editor-fold>
/**The number of records in the ncs file, based on the file length*/
val tempNoRecords = ((fHand.length - headerBytes).toDouble/tempRecordSize.toDouble).toInt
/**Standard timestamp increment for contiguous records*/
val tempRecTSIncrement = (1000000D * tempRecordSize.toDouble/sampleRate).toLong
var rec = 0
var dwChannelNum0: Long = 0
def readNCSRecordHeaderCheckAndReturnTS() = {
val returnTS = fHand.readUInt64Shifted()
//dwChannelNumber... must advance by 4 bytes anyway
if(rec == 0) {
//for the first record, just read, no checks
dwChannelNum0 = fHand.readUInt32
} else {
val dwChannelNum = fHand.readUInt32
loggerRequire(dwChannelNum0 == dwChannelNum,
s"Cannot read *.ncs files with multiple recording channels ($dwChannelNum, $dwChannelNum0) yet!")
}
//fHand.jumpBytes(4)
//dwSampleFreq
val dwSampleFreq = fHand.readUInt32.toDouble
require(dwSampleFreq == sampleRate,
s"Reported sampling frequency $dwSampleFreq for rec $rec is different from header $sampleRate)"
)
//dwNumValidSamples
val dwNumValidSamples = fHand.readUInt32
require(dwNumValidSamples == recordSampleCount,
s"Currently can only deal with records which are $recordSampleCount samples long, $dwNumValidSamples is error in rec $rec.")
returnTS
}
// <editor-fold defaultstate="collapsed" desc=" Loop through the file and process record start timestamps ">
// <editor-fold defaultstate="collapsed" desc=" First record dealt with separately ">
fHand.seek( headerBytes )
///qwTimeStamp
var thisRecTS = readNCSRecordHeaderCheckAndReturnTS()//fHand.readUInt64Shifted()
var lastRecTS = thisRecTS
var tempStartTimestamps = Vector[Long]( lastRecTS )
var tempLengths = Vector[Int]() //tempLengths defined with -1 at header for looping convenience, will be dropped later
var tempSegmentStartFrame = 0
// //snSamples
// fHand.jumpBytes(recordSampleCount*2)
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" read loop ">
rec = 1 //already dealt with rec=0
//var lastRecJump = 1
//var lastTriedJump = 4096
while(rec < tempNoRecords){
fHand.seek( recordStartByte(rec) )
//qwTimeStamp
thisRecTS = readNCSRecordHeaderCheckAndReturnTS()
//ToDo 3: Implement cases where timestamps skip just a slight amount d/t DAQ problems
if(thisRecTS > lastRecTS + tempRecTSIncrement/*lastRecJump=1*/){
//new segment!
//Append timestamp for record rec as a new segment start
tempStartTimestamps = tempStartTimestamps :+ thisRecTS
//Append length of previous segment as segment length
tempLengths = tempLengths :+ (rec*512 - tempSegmentStartFrame)
//New segment's start frame
tempSegmentStartFrame = rec*512
} else { } //advanced correctly within segment
//reset marker for lastTS
lastRecTS = thisRecTS
rec += 1 //this will cause break in while if on lastValid record
}
// <editor-fold defaultstate="collapsed" desc=" backup old while with skipping">
//
// while(rec < tempNoRecords){
//
// fHand.seek( recordStartByte(rec) )
// //qwTimeStamp
// thisRecTS = readNCSRecordHeaderCheckAndReturnTS()
//
// //ToDo 3: Implement cases where timestamps skip just a slight amount d/t DAQ problems
// if(thisRecTS > lastRecTS + tempRecTSIncrement*lastRecJump){
//
// //jumped over too many records!
// if( lastRecJump != 1 ){
// //Went over change in segment, rewind and try with step of 1
// rec = rec - lastRecJump + 1
// fHand.seek( recordStartByte(rec) )
// lastRecJump = 1
//
// //qwTimeStamp
// thisRecTS = fHand.readUInt64Shifted
//
// if(thisRecTS > lastRecTS + tempRecTSIncrement/*lastRecJump*/){
// //We got the correct start of a segment, with lastRecJump of 1!!!
//
// //Append timestamp for record rec as a new segment start
// tempStartTimestamps = tempStartTimestamps :+ thisRecTS
// //Append length of previous segment as segment length
// tempLengths = tempLengths :+ (rec*512 - tempSegmentStartFrame)
// //New segment's start frame
// tempSegmentStartFrame = rec*512
//
// //reset next jump attempt count
// lastTriedJump = 4096
//
// } else {
// //We went ahead by lastRecJump = 1, but the record was just one frame ahead in the same jump
// if( lastTriedJump > 1 ){
// //Jump less at next loop
// lastTriedJump = lastTriedJump / 2
// }
// }
//
// } else {
// //lastRecJump = 1, we've found the start of a new segment
//
// //Append timestamp for record rec as a new segment start
// tempStartTimestamps = tempStartTimestamps :+ thisRecTS
// //Append length of previous segment as segment length
// tempLengths = tempLengths :+ (rec*512 - tempSegmentStartFrame)
// //New segment's start frame
// tempSegmentStartFrame = rec*512
//
// //reset next jump attempt count
// lastTriedJump = 4096
//
// }
//
// } //else { } //advanced correctly within segment
//
// //reset marker for lastTS
// lastRecTS = thisRecTS
//
// // <editor-fold defaultstate="collapsed" desc=" VARIOUS CHECKS, NOT NECESSARY ">
// //dwChannelNumber
// fHand.jumpBytes(4)
// //dwSampleFreq
// val dwSampleFreq = fHand.readUInt32
// require(dwSampleFreq == sampleRate,
// s"Reported sampling frequency for record $rec, $dwSampleFreq, " +
// s"is different from file sampling frequency $sampleRate )" )
// //dwNumValidSamples
// val dwNumValidSamples = fHand.readUInt32
// require(dwNumValidSamples == recordSampleCount,
// s"Currently can only deal with records which are $recordSampleCount samples long.")
// // </editor-fold>
//
// // <editor-fold defaultstate="collapsed" desc=" loop 'rec' advancement ">
// if( rec == tempNoRecords -1 ){
// //was on lastValid record
// rec += 1 //this will cause break in while
// } else if (rec + lastTriedJump < tempNoRecords ) {
// //try the jump in lastTriedJump
// lastRecJump = lastTriedJump
// rec += lastRecJump
// } else {
// //jump to the end of the file
// lastRecJump = tempNoRecords-1-rec
// lastTriedJump = lastRecJump
// rec += lastRecJump
// }
// // </editor-fold>
//
//
// }
// </editor-fold>
//Last record cleanup: Append length of previous segment as segment length
tempLengths = tempLengths :+ (tempNoRecords*512 - tempSegmentStartFrame)
// </editor-fold>
//println("tempADBitVolts " + tempADBitVolts)
val nnDataChannelNCS = new NNDataChannelNCS(
fileHandle = fHand,
new NNDataTiming(sampleRate, tempLengths.toArray,
tempStartTimestamps.toArray, BigInt(9223372036854775807L)+1),
NNDataScale.apply(Short.MinValue.toInt*xBits, Short.MaxValue.toInt*xBits,
absGain = 1.0E6 * tempADBitVolts / xBitsD,
absOffset = 0d,
absUnit = "microV"),
channelName = tempAcqEntName)
//println("absGain " + xDataChannelNCS.scale.absGain)
logger.info( "loaded {}", nnDataChannelNCS )
Array[NNElement]( nnDataChannelNCS )
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" save ">
/** Actual saving of file.
* @param fileName if the filename does not end with the correct extension, it will be appended. If it exists, it will be given a postscript.
*/
override def save(data: Array[NNElement], fileName: String): Unit = ???
// </editor-fold>
// /** Factory method returning single instance. */
// override def create(): FileLoader = FileAdapterNCS.instance
}
object FileAdapterNCS {
val instance = new FileAdapterNCS
def load( file: String ): Array[NNElement] = instance.load(file)
def save(data: Array[NNElement], fileName: String): Unit = instance.save(data, fileName)
}
/**A specialized immutable [[nounou.elements.data.NNDataChannelFilestream]] for NCS files.
*/
class NNDataChannelNCS( override val fileHandle: RandomAccessFile,
timingEntry: NNDataTiming, scaleEntry: NNDataScale,
override val channelName: String)
extends NNDataChannelFilestream{
val t = FileAdapterNCS.instance
setTiming( timingEntry )
setScale( scaleEntry )
// <editor-fold defaultstate="collapsed" desc=" recordIndex ">
def recordIndexStartByte(record: Int, index: Int) = {
t.recordStartByte(record) + 20L + (index * 2)
}
def fsToRecordIndex(frame: Int, segment: Int) = {
val cumFrame = timing.segmentStartFrame(segment) + frame
( cumFrame / t.recordSampleCount, cumFrame % t.recordSampleCount)
}
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" data implementations ">
override def readPointImpl(frame: Int, segment: Int): Int = {
val (record, index) = fsToRecordIndex( frame, segment )
fileHandle.seek( recordIndexStartByte( record, index ) )
fileHandle.readInt16 * scale.xBits
}
override def readTraceDVImpl(range: SampleRangeValid): DV[Int] = {
//println("XDataChannelNCS " + range.toString())
var (currentRecord: Int, currentIndex: Int) = fsToRecordIndex(range.start, range.segment)
val (endReadRecord: Int, endReadIndex: Int) = fsToRecordIndex(range.last, range.segment) //range is inclusive of lastValid
//println( "curr " + (currentRecord, currentIndex).toString )
//println( "end " + (endReadRecord, endReadIndex).toString )
//ToDo1 program step
//val step = range.step
val tempRet = DV.zeros[Int](range.last-range.start+1)//range.length)//DV[Int]()
var currentTempRetPos = 0
fileHandle.seek( recordIndexStartByte(currentRecord, currentIndex) )
if(currentRecord == endReadRecord){
//if the whole requested trace fits in one record
val writeLen = (endReadIndex - currentIndex) + 1
val writeEnd = currentTempRetPos + writeLen
// println( "writeLen " + writeLen.toString + " writeEnd " + writeEnd.toString )
//ToDo 3: improve breeze dv requirement documentation
tempRet(currentTempRetPos until writeEnd ) := convert( DV(fileHandle.readInt16(writeLen)), Int) * scale.xBits
currentTempRetPos = writeEnd
} else {
//if the requested trace spans multiple records
//read data contained in first record
var writeEnd = currentTempRetPos + (512 - currentIndex)
tempRet(currentTempRetPos until writeEnd ) := convert( DV(fileHandle.readInt16(512 - currentIndex)), Int) * scale.xBits
currentRecord += 1
currentTempRetPos = writeEnd
fileHandle.jumpBytes(t.recordNonDataHead)
//read data from subsequent records, excluding lastValid record
while (currentRecord < endReadRecord) {
writeEnd = currentTempRetPos + 512
tempRet(currentTempRetPos until writeEnd ) := convert( DV(fileHandle.readInt16(512 /*- currentIndex*/)), Int) * scale.xBits
currentRecord += 1
currentTempRetPos = writeEnd
fileHandle.jumpBytes(t.recordNonDataHead)
}
//read data contained in lastValid record
writeEnd = currentTempRetPos + endReadIndex + 1
tempRet(currentTempRetPos until writeEnd ) := convert( DV(fileHandle.readInt16(endReadIndex + 1)), Int) * scale.xBits
}
tempRet( 0 until tempRet.length by range.step )
// </editor-fold>
}
}
| ktakagaki/nounou.rebooted150527 | src/main/scala/nounou/io/neuralynx/FileAdapterNCS.scala | Scala | apache-2.0 | 14,151 |
// Copyright (c) 2016 Ben Zimmer. All rights reserved.
// Functions for more complex tracking of tasks.
package bdzimmer.secondary.export.controller
import scala.collection.immutable.Seq
import bdzimmer.secondary.export.model.WorldItems.{WorldItem, CollectionItem}
import bdzimmer.secondary.export.model.WorldItems
import bdzimmer.secondary.export.model.Tags.Task
import bdzimmer.secondary.export.model.Tags
import bdzimmer.secondary.export.view.{Markdown, Html, Bootstrap, WebResource}
// TODO: actual date class for dates
case class TaskTableItem(
kind: String,
desc: String,
item: WorldItem,
group: WorldItem,
log: Option[String],
start: Option[String],
done: Option[String])
object Tasks {
val MatcherShorthand = "^\\\\s*([-|+])\\\\s+(.+)$".r
val TasksStyles =
s"""<script src="${WebResource.Jquery.localRelFilename}"></script>""" + "\\n" +
s"""<script src="${WebResource.DataTablesJs.localRelFilename}" charset="utf-8"></script>""" + "\\n" +
s"""<link href="${WebResource.DataTablesCss.localRelFilename}" rel="stylesheet">""" + "\\n"
def render(
master: WorldItem,
tagsMap: Map[Int, Map[Int, Tags.ParsedTag]],
shorthand: Boolean,
recursive: Boolean,
mode: String
): String = {
val (items, groups) = if (recursive) {
val items = WorldItems.collectionToList(master)
val groups = master match {
case x: CollectionItem => x.children.flatMap(
group => WorldItems.collectionToList(group).map(item => (item.uid, group))).toMap
case _ => Map[Int, WorldItem]()
}
(items, groups)
} else {
(List(master), Map[Int, WorldItem]())
}
val tasksFromTags = for {
item <- items
taskTag <- tagsMap(item.uid).values.collect({case x: Tags.Task => x})
} yield {
(taskTag, item, groups.getOrElse(item.uid, master))
}
val allTasks = if (shorthand) {
val tasksFromShorthand = for {
item <- items
line <- item.notes.split("\\n")
m <- MatcherShorthand.findAllMatchIn(line)
} yield {
val kind = if (m.group(1).equals("-")) "todo" else "started"
val desc = m.group(2)
(Tags.Task(kind, desc, None, None, None, 0), item, groups.getOrElse(item.uid, master))
}
tasksFromTags ++ tasksFromShorthand
} else {
tasksFromTags
}
// get invalid tags
def getInvalidTags(item: WorldItem): List[String] = {
tagsMap(item.uid).values.collect({
case x: Tags.ParseError => s"${x.msg} in tag '${x.tag.kind}'"}).toList
}
def taskList(todoFunc: WorldItem => List[String]): String = {
Html.listGroup(items
.map(x => (x, todoFunc(x)))
.filter(_._2.length > 0)
.map(x => {
Html.listItem(
RenderPages.textLinkPage(x._1) +
Html.listGroup(x._2.map(text => Html.listItem(Markdown.processLine(text)))))}))
}
if (mode.equals("count")) {
allTasks.count(_._1.kind.equals("todo")) + " todo, " +
allTasks.count(_._1.kind.equals("started")) + " started"
} else {
val extras = if (mode.equals("all")) {
val emptyNotes = Bootstrap.column(
Bootstrap.Column6,
Html.h4("Empty Notes") +
Html.listGroup(items
.filter(_.notes.equals(""))
.map(x => Html.listItem(RenderPages.textLinkPage(x)))))
val invalidTags = Bootstrap.column(
Bootstrap.Column6,
Html.h4("Invalid Tags") +
taskList(getInvalidTags))
emptyNotes + invalidTags
} else {
""
}
Bootstrap.row(
Bootstrap.column(Bootstrap.Column12, Tasks.table(allTasks.map(x => Tasks.createTask(x._1, x._2, x._3)))) +
extras
)
}
}
def createTask(s: Task, item: WorldItem, group: WorldItem): TaskTableItem = {
TaskTableItem(s.kind, s.desc, item, group, s.log, s.start, s.done)
}
def table(tasks: Seq[TaskTableItem]): String = {
val head = List(
Html.b("Group"),
Html.b("Item"),
Html.b("Status"),
Html.b("Description"),
Html.b("Log"),
Html.b("Start"),
Html.b("Done")).map(_ + Html.nbsp + Html.nbsp)
val body = tasks.sortBy(_.log).map(task => {
List(
RenderPages.textLinkPage(task.group),
RenderPages.textLinkPage(task.item),
Html.b(task.kind.capitalize),
Markdown.processLine(task.desc),
task.log.getOrElse(""),
task.start.getOrElse(""),
task.done.getOrElse("")).map(_ + Html.nbsp + Html.nbsp)
}).toList
val styles = List(
"vertical-align: top; white-space: nowrap",
"vertical-align: top; white-space: nowrap",
"vertical-align: top; white-space: nowrap",
"vertical-align: top",
"vertical-align: top; white-space: nowrap",
"vertical-align: top; white-space: nowrap",
"vertical-align: top; white-space: nowrap")
Html.table(Some(head), body, Some(styles), Some("tasks"), Some("dataTable display"), None, None) +
TasksStyles +
"""<script>$(document).ready(function() {$('#tasks').dataTable({"pageLength": 30, "stateSave": true});});</script>"""
}
}
| bdzimmer/secondary | src/main/scala/bdzimmer/secondary/export/controller/Tasks.scala | Scala | bsd-3-clause | 5,282 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.businessmatching.updateservice.add
import cats.data.OptionT
import cats.implicits._
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import javax.inject.{Inject, Singleton}
import models.businessmatching.updateservice.ServiceChangeRegister
import models.businessmatching.{AccountancyServices, BillPaymentServices, TelephonePaymentService, TrustAndCompanyServices}
import models.flowmanagement.{AddBusinessTypeFlowModel, NeedMoreInformationPageId}
import play.api.mvc.MessagesControllerComponents
import services.flowmanagement.Router
import utils.{AuthAction, ControllerHelper}
import views.html.businessmatching.updateservice.add.new_service_information
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class NeedMoreInformationController @Inject()(authAction: AuthAction,
val ds: CommonPlayDependencies,
implicit val dataCacheConnector: DataCacheConnector,
val router: Router[AddBusinessTypeFlowModel],
val cc: MessagesControllerComponents,
new_service_information: new_service_information)(implicit ec: ExecutionContext) extends AmlsBaseController(ds, cc) {
def get() = authAction.async {
implicit request =>
(for {
model <- OptionT(dataCacheConnector.fetch[ServiceChangeRegister](request.credId, ServiceChangeRegister.key))
activity <- OptionT.fromOption[Future](model.addedActivities)
cacheMap <- OptionT(dataCacheConnector.fetchAll(request.credId))
} yield {
val activityNames = activity map {
_.getMessage()
}
val isTdiOrBpspPresent = activity exists {
case BillPaymentServices | TelephonePaymentService => true
case _ => false
}
val isAspOrTcspPresent = activity exists {
case AccountancyServices | TrustAndCompanyServices => true
case _ => false
}
val subSectors = model.addedSubSectors.getOrElse(Set.empty)
Ok(new_service_information(activityNames, ControllerHelper.supervisionComplete(cacheMap), subSectors, isTdiOrBpspPresent, isAspOrTcspPresent))
}) getOrElse InternalServerError("Get: Unable to show New Service Information page")
}
def post() = authAction.async {
implicit request =>
(for {
route <- OptionT.liftF(router.getRoute(request.credId, NeedMoreInformationPageId, new AddBusinessTypeFlowModel))
_ <- OptionT.liftF(dataCacheConnector.removeByKey[ServiceChangeRegister](request.credId, ServiceChangeRegister.key))
} yield route) getOrElse InternalServerError("Post: Cannot retrieve data: Add : NewServiceInformationController")
}
}
| hmrc/amls-frontend | app/controllers/businessmatching/updateservice/add/NeedMoreInformationController.scala | Scala | apache-2.0 | 3,533 |
package org.workcraft.plugins.cpog.scala.serialisation
import org.workcraft.plugins.cpog.scala.{nodes => M}
import org.workcraft.plugins.cpog.scala.{VisualArc => MVisualArc}
import org.workcraft.plugins.cpog.scala.nodes.{snapshot => P}
import org.workcraft.scala.StorageManager
import org.workcraft.plugins.cpog.optimisation.booleanvisitors.VariableReplacer
import org.workcraft.scala.Util._
import org.workcraft.plugins.cpog.optimisation.BooleanFormula
import java.awt.geom.Point2D
import pcollections.TreePVector
import pcollections.PVector
object SnapshotLoader {
def makePVector[A](iter : Iterable[A]) = ((TreePVector.empty[A] : PVector[A]) /: iter)((v : PVector[A], a : A) => v.plus(a))
def load(cpog : P.CPOG, sm : StorageManager) : org.workcraft.plugins.cpog.scala.CPOG = {
def loadVisualProperties(prop : P.VisualProperties) = {
val P.VisualProperties(label, labelPositioning, position) = prop
M.VisualProperties(sm.create(label), sm.create(labelPositioning), sm.create(position))
}
val P.CPOG (variables, vertices, arcs, rhoClauses) = cpog
val mVariables = variables.map({ case (k, P.Variable(state, visual)) => (k, new M.Variable(sm.create(state), loadVisualProperties(visual))) }).toMap
def formulaReplacer(formula : BooleanFormula[P.Id[P.Variable]]) = VariableReplacer.replace(asFunctionObject(mVariables.apply), formula)
// can't use mapValues here because it creates a view!
val mVertices = vertices.map({ case (k, P.Vertex(condition, visual)) => (k, {println("creating vertex"); new M.Vertex(sm.create(formulaReplacer(condition)), loadVisualProperties(visual))})})
def loadVisualArc(arc : P.VisualArc) = arc match {
case P.VisualArc.Bezier(cp1, cp2) => MVisualArc.Bezier(sm.create(cp1), sm.create(cp2))
case P.VisualArc.Polyline(cps) => MVisualArc.Polyline(for(cp <- cps) yield sm.create(cp))
}
val mArcs = arcs.map({case P.Arc(first, second, condition, visual) => new M.Arc(mVertices(first), mVertices(second), sm.create(formulaReplacer(condition)), sm.create(loadVisualArc(visual)))})
val mRhoClauses = for(P.RhoClause(formula, visual) <- rhoClauses) yield M.RhoClause(sm.create(formulaReplacer(formula)), loadVisualProperties(visual))
org.workcraft.plugins.cpog.scala.CPOG(sm, mVariables.values.toList, mVertices.values.toList, mRhoClauses.toList, mArcs.toList)
}
}
| tuura/workcraft-2.2 | CpogsPlugin/src/main/scala/org/workcraft/plugins/cpog/scala/serialisation/SnapshotLoader.scala | Scala | gpl-3.0 | 2,393 |
package hydrograph.engine.spark.operation.handler
import hydrograph.engine.spark.core.reusablerow._
import hydrograph.engine.transformation.schema
import hydrograph.engine.transformation.schema.{Field, Schema}
import hydrograph.engine.transformation.userfunctions.base.GroupCombineTransformBase
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{StructField, _}
import scala.collection.JavaConversions._
/**
* The Class GroupCombineCustomHandler.
*
* @author Bitwise
*
*/
case class GroupCombineCustomHandler(groupCombineTransform: GroupCombineTransformBase, inSchema: StructType, outSchema: StructType, isDeterministic: Boolean) extends UserDefinedAggregateFunction {
val bufSchemaVal = bufferSchema
def inputSchema: StructType = inSchema
def deterministic = isDeterministic
val bufferMapper = new RowToReusableMapper(bufSchemaVal, bufSchemaVal.fieldNames)
val inputMapper = new RowToReusableMapper(inSchema, inSchema.fieldNames)
val outputMapper = new RowToReusableMapper(dataType.asInstanceOf[StructType], dataType.asInstanceOf[StructType].fieldNames)
def initialize(buffer: MutableAggregationBuffer) = {
try {
var brr = BufferReusableRow(buffer, bufferMapper)
groupCombineTransform.initialize(brr)
} catch {
case e: Exception => throw new RuntimeException("Exception in initialize() for Transform Class:[\\"" + groupCombineTransform + "\\"] for row:[\\"" + buffer.toString() + "\\"] error being:" ,e)
}
}
def update(buffer: MutableAggregationBuffer, input: Row) = {
try {
val brr = BufferReusableRow(buffer, bufferMapper)
groupCombineTransform.update(brr, InputReusableRow(input, inputMapper))
} catch {
case e: Exception => throw new RuntimeException("Exception in update() for Transform Class:[\\"" + groupCombineTransform + "\\"] for row:[\\"" + input.toString() + "\\"] error being:" ,e)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
try {
val brr = BufferReusableRow(buffer1, bufferMapper)
val irr = InputReusableRow(buffer2, bufferMapper)
groupCombineTransform.merge(brr, irr)
} catch {
case e: Exception => throw new RuntimeException("Exception in merge() for Transform Class:[\\"" + groupCombineTransform + "\\"] for row:[\\"" + buffer1.toString() + "\\"] error being:" ,e)
}
}
def evaluate(buffer: Row) = {
val output = new Array[Any](outSchema.size)
try {
val orr: OutputReusableRow = OutputReusableRow(output, outputMapper)
val irr = InputReusableRow(buffer, bufferMapper)
groupCombineTransform.evaluate(irr, orr)
} catch {
case e: Exception => throw new RuntimeException("Exception in evaluate() for Transform Class:[\\"" + groupCombineTransform + "\\"] for row:[\\"" + buffer.toString() + "\\"] error being:" ,e)
}
Row.fromSeq(output)
}
def bufferSchema = createBufferSchema(groupCombineTransform)
def createBufferSchema(aggregatorTransformBase: GroupCombineTransformBase): StructType = {
val inputSchema:Schema=new Schema
inSchema.foreach(sf=>inputSchema.addField(new Field.Builder(sf.name,getJavaDataType(sf.dataType)).build()))
val outputSchema:Schema=new Schema
outSchema.foreach(sf=>outputSchema.addField(new Field.Builder(sf.name,getJavaDataType(sf.dataType)).build()))
val bufferSchema: Schema = aggregatorTransformBase.initBufferSchema(inputSchema,outputSchema)
var bufferFieldMap: Map[String, Field] = Map()
for (bufferField <- bufferSchema.getSchema) {
bufferFieldMap += bufferField._1 -> bufferField._2
}
val array: Array[StructField] = new Array[StructField](bufferFieldMap.size())
var i: Int = 0
for (bs <- bufferFieldMap.values) {
array(i) = new StructField(bs.getFieldName, getSparkDataType(bs.getFieldType.toString, bs.getFieldFormat, bs.getFieldPrecision, bs.getFieldScale))
i = i + 1
}
StructType(array)
}
def getJavaDataType(structType: DataType): schema.DataType = structType match {
case _:IntegerType=> schema.DataType.Integer
case _:StringType=>schema.DataType.String
case _:LongType=> schema.DataType.Long
case _:ShortType=> schema.DataType.Short
case _:BooleanType=> schema.DataType.Boolean
case _:FloatType=> schema.DataType.Float
case _:DoubleType=> schema.DataType.Double
case _:TimestampType=> schema.DataType.Date
case _:DateType=> schema.DataType.Date
case _:DecimalType=> schema.DataType.BigDecimal
}
def getSparkDataType(dataType: String, format: String, precision: Int, scale: Int): DataType = dataType match {
case "Integer" => DataTypes.IntegerType
case "String" => DataTypes.StringType
case "Long" => DataTypes.LongType
case "Short" => DataTypes.ShortType
case "Boolean" => DataTypes.BooleanType
case "Float" => DataTypes.FloatType
case "Double" => DataTypes.DoubleType
case "Date" if format.matches(".*[H|m|s|S].*") => DataTypes.TimestampType
case "Date" => DataTypes.DateType
case "BigDecimal" => DataTypes.createDecimalType(checkPrecision(precision), scale)
}
def checkPrecision(precision: Int): Int = if (precision == -999) 38 else precision
def dataType: DataType = outSchema
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/operation/handler/GroupCombineCustomHandler.scala | Scala | apache-2.0 | 5,325 |
package com.highperformancespark.examples.wordcount
/**
* What sort of big data book would this be if we didn't mention wordcount?
*/
import org.apache.spark.rdd._
object WordCount {
// bad idea: uses group by key
def badIdea(rdd: RDD[String]): RDD[(String, Int)] = {
val words = rdd.flatMap(_.split(" "))
val wordPairs = words.map((_, 1))
val grouped = wordPairs.groupByKey()
val wordCounts = grouped.mapValues(_.sum)
wordCounts
}
// good idea: doesn't use group by key
//tag::simpleWordCount[]
def simpleWordCount(rdd: RDD[String]): RDD[(String, Int)] = {
val words = rdd.flatMap(_.split(" "))
val wordPairs = words.map((_, 1))
val wordCounts = wordPairs.reduceByKey(_ + _)
wordCounts
}
//end::simpleWordCount
/**
* Come up with word counts but filter out the illegal tokens and stop words
*/
//tag::wordCountStopwords[]
def withStopWordsFiltered(rdd : RDD[String], illegalTokens : Array[Char],
stopWords : Set[String]): RDD[(String, Int)] = {
val seperators = illegalTokens ++ Array[Char](' ')
val tokens: RDD[String] = rdd.flatMap(_.split(seperators).
map(_.trim.toLowerCase))
val words = tokens.filter(token =>
!stopWords.contains(token) && (token.length > 0) )
val wordPairs = words.map((_, 1))
val wordCounts = wordPairs.reduceByKey(_ + _)
wordCounts
}
//end::wordCountStopwords[]
}
| mahmoudhanafy/high-performance-spark-examples | src/main/scala/com/high-performance-spark-examples/wordcount/WordCount.scala | Scala | apache-2.0 | 1,406 |
package com.arcusys.learn.liferay.update.version300
import java.sql.Connection
import com.arcusys.learn.liferay.update.version300.migrations.scorm.ScormUserMigration
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.schema.ScormUserComponent
import org.scalatest.{BeforeAndAfter, FunSuite}
import scala.slick.driver.{H2Driver, JdbcProfile}
import scala.slick.jdbc.{JdbcBackend, StaticQuery}
class ScormUserMigrationTest(val driver: JdbcProfile)
extends FunSuite
with BeforeAndAfter
with ScormUserComponent
with SlickProfile {
def this() {
this(H2Driver)
}
import driver.simple._
val db = Database.forURL("jdbc:h2:mem:migrationTest", driver = "org.h2.Driver")
var connection: Connection = _
before {
connection = db.source.createConnection()
db.withSession { implicit s =>
StaticQuery.updateNA(
"""create table Learn_LFUser (
lfid LONG not null primary key,
id_ INTEGER null,
name TEXT null,
preferredAudioLevel DOUBLE null,
preferredLanguage TEXT null,
preferredDeliverySpeed DOUBLE null,
preferredAudioCaptioning INTEGER null
);"""
).execute
scormUsersTQ.ddl.create
}
}
after {
db.withSession { implicit s =>
StaticQuery.updateNA(
"""drop table Learn_LFUser;"""
).execute
scormUsersTQ.ddl.drop
}
connection.close()
}
val courseId = 245
val lessonOwnerId = 354
test("empty source table") {
db.withSession{implicit s =>
val migration = new ScormUserMigration(db, driver)
migration.migrate()
val size = scormUsersTQ.length.run
assert(0 == size)
}
}
test("migrate") {
val userId = 10882
val name = "LB"
val preferredAudioLevel: Double = 1.0D
val preferredLanguage: String = "fi"
val preferredDeliverySpeed: Double = 2D
val preferredAudioCaptioning: Int = 1
db.withSession { implicit s =>
addScormUser(1, userId, name, preferredAudioLevel, preferredLanguage, preferredDeliverySpeed, preferredAudioCaptioning)
val migration = new ScormUserMigration(db, driver)
migration.migrate()
val grades = scormUsersTQ.list
assert(1 == grades.length)
val g = grades.head
assert(userId == g.userId)
assert(name == g.name)
assert(Some(preferredAudioLevel) == g.preferredAudioLevel)
assert(Some(preferredLanguage) == g.preferredLanguage)
assert(Some(preferredDeliverySpeed) == g.preferredDeliverySpeed)
assert(Some(preferredAudioCaptioning) == g.preferredAudioCaptioning)
}
}
private def addScormUser(lfid: Long,
id: Int,
name: String,
preferredAudioLevel: Double,
preferredLanguage: String,
preferredDeliverySpeed: Double,
preferredAudioCaptioning: Int
)(implicit s: JdbcBackend#Session): Unit = {
StaticQuery.updateNA(
s"""insert into Learn_LFUser
(lfid, id_ , name, preferredAudioLevel, preferredLanguage, preferredDeliverySpeed, preferredAudioCaptioning)
values ($lfid, $id , '$name', $preferredAudioLevel, '$preferredLanguage', $preferredDeliverySpeed, $preferredAudioCaptioning);"""
).execute
}
}
| igor-borisov/valamis | learn-portlet/src/test/scala/com/arcusys/learn/liferay/update/version300/ScormUserMigrationTest.scala | Scala | gpl-3.0 | 3,447 |
package bad.robot.radiate.monitor
import java.util.Arrays.asList
import java.util.concurrent.CopyOnWriteArrayList
import bad.robot.radiate.{Activity, Progress, Status}
import bad.robot.radiate.Error
class ThreadSafeObservable extends Observable {
private val observers = new CopyOnWriteArrayList[Observer]
def addObservers(observers: Observer*): Boolean = {
this.observers.addAll(asList(observers:_*))
}
def addObservers(observers: List[Observer]) {
observers.foreach(this.observers.add)
}
def removeObservers(observers: Observer*): Boolean = {
this.observers.removeAll(asList(observers))
}
def removeAllObservers() {
observers.clear()
}
def notifyObservers(status: Status) {
observers.forEach((observer: Observer) => observer.update(this, status))
}
def notifyObservers(exception: Exception) {
observers.forEach((observer: Observer) => observer.update(this, exception))
}
def notifyObservers(error: Error) {
observers.forEach((observer: Observer) => observer.update(this, error))
}
def notifyObservers(information: Information) {
observers.forEach((observer: Observer) => observer.update(this, information))
}
def notifyObservers(activity: Activity, progress: Progress) {
observers.forEach((observer: Observer) => observer.update(this, activity, progress))
}
} | tobyweston/radiate | src/main/scala/bad/robot/radiate/monitor/ThreadSafeObservable.scala | Scala | apache-2.0 | 1,352 |
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.hyperbus.util
import java.security.SecureRandom
import java.util.concurrent.atomic.AtomicInteger
// more unique than UUID & CPU hungry because of SecureRandom
// guarantees to grow monotonically until process is restarted
object IdGenerator extends IdGeneratorBase {
private val random = new SecureRandom()
private val counter = new AtomicInteger(random.nextInt(65536))
def create(): String = {
val sb = new StringBuilder(30)
appendInt(sb, (System.currentTimeMillis() / 10000l & 0xFFFFFFFFl).toInt)
appendInt(sb, counter.incrementAndGet())
appendInt(sb, random.nextInt())
appendInt(sb, random.nextInt())
appendInt(sb, random.nextInt())
appendInt(sb, random.nextInt())
sb.toString()
}
}
| hypertino/hyperbus | hyperbus/src/main/scala/com/hypertino/hyperbus/util/IdGenerator.scala | Scala | mpl-2.0 | 1,019 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.query.prepared
import com.websudos.phantom.PhantomSuite
import com.websudos.phantom.dsl._
import com.websudos.phantom.tables._
import com.websudos.util.testing._
class PreparedSelectQueryTest extends PhantomSuite {
override def beforeAll(): Unit = {
super.beforeAll()
System.setProperty("user.timezone", "Canada/Pacific") // perform these tests in non utc timezone
TestDatabase.recipes.insertSchema()
TestDatabase.articlesByAuthor.insertSchema()
TestDatabase.primitives.insertSchema()
if(session.v4orNewer) {
TestDatabase.primitivesCassandra22.insertSchema()
}
}
it should "serialise and execute a prepared select statement with the correct number of arguments" in {
val recipe = gen[Recipe]
val query = TestDatabase.recipes.select.p_where(_.url eqs ?).prepare()
val operation = for {
truncate <- TestDatabase.recipes.truncate.future
insertDone <- TestDatabase.recipes.store(recipe).future()
select <- query.bind(recipe.url).one()
} yield select
operation.successful {
items => {
items shouldBe defined
items.value shouldEqual recipe
}
}
}
it should "serialise and execute a prepared statement with 2 arguments" in {
val sample = gen[Article]
val sample2 = gen[Article]
val owner = gen[UUID]
val category = gen[UUID]
val category2 = gen[UUID]
val query = TestDatabase.articlesByAuthor.select.p_where(_.author_id eqs ?).p_and(_.category eqs ?).prepare()
val op = for {
store <- TestDatabase.articlesByAuthor.store(owner, category, sample).future()
store2 <- TestDatabase.articlesByAuthor.store(owner, category2, sample2).future()
get <- query.bind(owner, category).one()
get2 <- query.bind(owner, category2).one()
} yield (get, get2)
whenReady(op) {
case (res, res2) => {
res shouldBe defined
res.value shouldEqual sample
res2 shouldBe defined
res2.value shouldEqual sample2
}
}
}
it should "serialise and execute a primitives prepared select statement with the correct number of arguments" in {
val primitive = gen[Primitive]
val query = TestDatabase.primitives.select.p_where(_.pkey eqs ?).prepare()
val operation = for {
truncate <- TestDatabase.primitives.truncate.future
insertDone <- TestDatabase.primitives.store(primitive).future()
select <- query.bind(primitive.pkey).one()
} yield select
operation.successful {
items => {
items shouldBe defined
items.value shouldEqual primitive
}
}
}
if(session.v4orNewer) {
it should "serialise and execute a primitives cassandra 2.2 prepared select statement with the correct number of arguments" in {
val primitive = gen[PrimitiveCassandra22]
val query = TestDatabase.primitivesCassandra22.select.p_where(_.pkey eqs ?).prepare()
val operation = for {
truncate <- TestDatabase.primitivesCassandra22.truncate.future
insertDone <- TestDatabase.primitivesCassandra22.store(primitive).future()
select <- query.bind(primitive.pkey).one()
} yield select
operation.successful {
items => {
items shouldBe defined
items.value shouldEqual primitive
}
}
}
}
}
| levinson/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/builder/query/prepared/PreparedSelectQueryTest.scala | Scala | bsd-2-clause | 4,852 |
package edu.berkeley.nlp.entity.wiki
import edu.berkeley.nlp.entity.coref.Mention
import edu.berkeley.nlp.futile.util.Counter
trait Wikifier {
def wikify(docName: String, ment: Mention): String;
def wikifyGetTitleSet(docName: String, ment: Mention): Seq[String];
def wikifyGetPriorForJointModel(docName: String, ment: Mention): Counter[String];
def oracleWikifyNil(docName: String, ment: Mention);
def oracleWikify(docName: String, ment: Mention, goldTitles: Seq[String]);
def printDiagnostics();
}
| malcolmgreaves/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/wiki/Wikifier.scala | Scala | gpl-3.0 | 529 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.examples
import breeze.linalg.DenseVector
import io.github.mandar2812.dynaml.graphics.charts.Highcharts._
import io.github.mandar2812.dynaml.DynaMLPipe
import io.github.mandar2812.dynaml.evaluation.RegressionMetrics
import io.github.mandar2812.dynaml.graph.FFNeuralGraph
import io.github.mandar2812.dynaml.models.neuralnets.CommitteeNetwork
import io.github.mandar2812.dynaml.pipes.DataPipe
import org.apache.log4j.Logger
/**
* Created by mandar on 11/2/16.
*/
object TestCommitteeNNOmni {
def apply(
year: Int,
yeartest: Int,
hidden: Int = 1,
nCounts: List[Int] = List(),
acts: List[String],
delta: Int,
timeLag: Int,
stepAhead: Int,
num_training: Int,
num_test: Int,
column: Int,
stepSize: Double = 0.05,
maxIt: Int = 200,
mini: Double = 1.0,
alpha: Double = 0.0,
regularization: Double = 0.5
): Unit =
runExperiment(
year,
yeartest,
hidden,
nCounts,
acts,
delta,
timeLag,
stepAhead,
num_training,
num_test,
column,
Map(
"tolerance" -> "0.0001",
"step" -> stepSize.toString,
"maxIterations" -> maxIt.toString,
"miniBatchFraction" -> mini.toString,
"momentum" -> alpha.toString,
"regularization" -> regularization.toString
)
)
def runExperiment(
year: Int = 2006,
yearTest: Int = 2007,
hidden: Int = 2,
nCounts: List[Int] = List(),
act: List[String],
deltaT: Int = 2,
timelag: Int = 0,
stepPred: Int = 3,
num_training: Int = 200,
num_test: Int = 50,
column: Int = 40,
opt: Map[String, String]
): Seq[Seq[AnyVal]] = {
//Load Omni data into a stream
//Extract the time and Dst values
val logger = Logger.getLogger(this.getClass)
//pipe training data to model and then generate test predictions
//create RegressionMetrics instance and produce plots
val modelTrainTest =
(trainTest: (
(
Iterable[(DenseVector[Double], Double)],
Iterable[(DenseVector[Double], Double)]
),
(DenseVector[Double], DenseVector[Double])
)) => {
val configs = for (c <- nCounts; a <- act) yield (c, a)
val networks = configs.map(couple => {
FFNeuralGraph(
trainTest._1._1.head._1.length,
1,
1,
List(couple._2, "recLinear"),
List(couple._1)
)
})
val transform = DataPipe(
(d: Stream[(DenseVector[Double], Double)]) =>
d.map(el => (el._1, DenseVector(el._2)))
)
val model =
new CommitteeNetwork[Stream[(DenseVector[Double], Double)]](
trainTest._1._1.toStream,
transform,
networks: _*
)
model.baseOptimizer
.setStepSize(opt("step").toDouble)
.setNumIterations(opt("maxIterations").toInt)
.setMomentum(opt("momentum").toDouble)
.setRegParam(opt("regularization").toDouble)
model.learn()
val res = model.test(trainTest._1._2.toStream)
val scoresAndLabelsPipe =
DataPipe(
(res: Seq[(DenseVector[Double], DenseVector[Double])]) =>
res.map(i => (i._1(0), i._2(0))).toList
) >
DataPipe(
(list: List[(Double, Double)]) =>
list.map { l =>
(
l._1 * trainTest._2._2(-1) + trainTest._2._1(-1),
l._2 * trainTest._2._2(-1) + trainTest._2._1(-1)
)
}
)
val scoresAndLabels = scoresAndLabelsPipe.run(res)
val metrics =
new RegressionMetrics(scoresAndLabels, scoresAndLabels.length)
metrics.print()
metrics.generatePlots()
//Plotting time series prediction comparisons
line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._2))
hold()
line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._1))
legend(List("Time Series", "Predicted Time Series (one hour ahead)"))
unhold()
val incrementsPipe = DataPipe(
(list: List[(Double, Double)]) =>
list
.sliding(2)
.map(i => (i(1)._1 - i.head._1, i(1)._2 - i.head._2))
.toList
)
val increments = incrementsPipe.run(scoresAndLabels)
val incrementMetrics =
new RegressionMetrics(increments, increments.length)
logger.info("Results for Prediction of increments")
incrementMetrics.print()
incrementMetrics.generatePlots()
line((1 to increments.length).toList, increments.map(_._2))
hold()
line((1 to increments.length).toList, increments.map(_._1))
legend(
List(
"Increment Time Series",
"Predicted Increment Time Series (one hour ahead)"
)
)
unhold()
Seq(
Seq(
year,
yearTest,
deltaT,
1,
num_training,
num_test,
metrics.mae,
metrics.rmse,
metrics.Rsq,
metrics.corr,
metrics.modelYield
)
)
}
val preProcessPipe = DynaMLPipe.fileToStream >
DynaMLPipe.replaceWhiteSpaces >
DynaMLPipe.extractTrainingFeatures(
List(0, 1, 2, column),
Map(
16 -> "999.9",
21 -> "999.9",
24 -> "9999.",
23 -> "999.9",
40 -> "99999",
22 -> "9999999.",
25 -> "999.9",
28 -> "99.99",
27 -> "9.999",
39 -> "999",
45 -> "99999.99",
46 -> "99999.99",
47 -> "99999.99"
)
) > DynaMLPipe.removeMissingLines >
DynaMLPipe.extractTimeSeries((year, day, hour) => (day * 24) + hour) >
DynaMLPipe.deltaOperation(deltaT, timelag)
val trainTestPipe = DynaMLPipe.duplicate(preProcessPipe) >
DynaMLPipe.splitTrainingTest(num_training, num_test) >
DynaMLPipe.trainTestGaussianStandardization >
DataPipe(modelTrainTest)
trainTestPipe.run(
("data/omni2_" + year + ".csv", "data/omni2_" + yearTest + ".csv")
)
}
}
| transcendent-ai-labs/DynaML | dynaml-examples/src/main/scala/io/github/mandar2812/dynaml/examples/TestCommitteeNNOmni.scala | Scala | apache-2.0 | 7,164 |
package chapter21
import scala.LowPriorityImplicits
import scala.LowPriorityImplicits
/**
* 21.7 여러 변환을 사용하는 경우
*
* 스칼라 2.7버전까지는 컴파일러가 처리를 거부했으나, 2.8부터는 가능한 변환 중
* 하나가 다른 하나보다 절대적으로 더 구체적이라면, 컴파일러는 더 구체적인 것을 선택한다.
*
* 메소드가 하나는 String을, 다른 하나는 Any를 받을 수 있다면, 최종적으로 String쪽을
* 선택한다. 더 구체적인 경우는 두 규칙을 보면 된다.
*
* - 전자의 인자 타입이 후자의 서브타입이다.
* - 두 변환 모두 메소드인데, 전자를 둘러싼 클래스가 후자를 둘러싼 클래스를 확장한다.
*
* 이 주제를 다시 검토해 규칙을 변경하게 된 동기는, 자바 컬렉션과 스칼라 컬렉션, 그리고 문자열의
* 상호작용성을 향상하기 위해서 였다.
*/
object c21_i07 extends App {
val cba = "abc".reverse
/*
* cba의 타입을 유추하자면 직관적으로는 String이 되어야 할 것 같지만, 스칼라 2.7에서는
* abc를 스칼라 컬렉션으로 변환하는 것이었꼬, 이것을 다시 뒤집으면 다시 컬렉션이 나오므로
* cba의 타입은 컬렉션이었다. 물론 문자열로 돌려주는 암시적 변환도 있었찌만, 그 변환이
* 모든 문제를 해결해주지는 못했다.
*
* 예를 들어 스칼라 2.8이전에는 "abc" == "abc".reverse.reverse 가 false 였다.
*
* 현재는 String에서 StringOps라는 새로운 타입으로 변환하는 더 구체적인 암시적 변환이 하나 생겼다.
* StringOps에는 reverse 등의 여러 메소드가 있으며, 컬렉션을 반환하는 대신, String을 반환한다.
* StringOps로 변환하는 기능은 Predef에 들어 있는 반면, 스칼라 컬렉션으로 변환하는 것은 새로운
* 클래스인 LowPriorityImplicits에 들어 있다.
*
* 그리고 Predef 는 LowPriorityImplicits를 확장한다.
*
* object Predef extends LowPriorityImplicits with DeprecatedPredef ..
*
* 따라서, StringOps 변환이 더 구체적이 되는 것이다.
*/
} | seraekim/srkim-lang-scala | src/main/java/chapter21/c21_i07.scala | Scala | bsd-3-clause | 2,220 |
package fr.inria.spirals.sigma.ttc14.fixml.objlang.support
import fr.inria.spirals.sigma.ttc14.fixml.objlang.IntegerLiteral;
import fr.unice.i3s.sigma.support.EMFProxyBuilder;
import fr.unice.i3s.sigma.support.EMFScalaSupport;
trait IntegerLiteralScalaSupport extends EMFScalaSupport {
type IntegerLiteral = fr.inria.spirals.sigma.ttc14.fixml.objlang.IntegerLiteral
protected implicit val _integerliteralProxyBuilder = new EMFProxyBuilder[IntegerLiteral](ObjLang._objlangBuilder)
object IntegerLiteral {
def apply(value: Int = 0): IntegerLiteral = {
val _instance = ObjLang._objlangBuilder.create[IntegerLiteral]
if (value != 0) _instance.setValue(value)
_instance
}
def unapply(that: IntegerLiteral): Option[(Int)] =
Some((that.getValue))
}
}
object IntegerLiteralScalaSupport extends IntegerLiteralScalaSupport
| fikovnik/ttc14-fixml-sigma | ttc14-fixml-extension-3/src-gen/fr/inria/spirals/sigma/ttc14/fixml/objlang/support/IntegerLiteralScalaSupport.scala | Scala | epl-1.0 | 886 |
package io.finch.request
import com.twitter.finagle.httpx.Request
import com.twitter.util.{Await, Future}
import org.scalatest.{Matchers, FlatSpec}
class RequiredParamSpec extends FlatSpec with Matchers {
"A RequiredParam" should "be properly parsed if it exists" in {
val request: Request = Request(("foo", "5"))
val futureResult: Future[String] = param("foo")(request)
Await.result(futureResult) shouldBe "5"
}
it should "produce an error if the param is empty" in {
val request: Request = Request(("foo", ""))
val futureResult: Future[String] = param("foo")(request)
a [NotValid] shouldBe thrownBy(Await.result(futureResult))
}
it should "produce an error if the param does not exist" in {
val request: Request = Request(("bar", "foo"))
val futureResult: Future[String] = param("foo")(request)
a [NotPresent] shouldBe thrownBy(Await.result(futureResult))
}
it should "have a matching RequestItem" in {
val p = "foo"
param(p).item shouldBe items.ParamItem(p)
}
it should "return the correct result when mapped over" in {
val request: Request = Request(("foo", "5"))
val reader: RequestReader[String] = param("foo").map(_ * 3)
Await.result(reader(request)) shouldBe "555"
}
it should "return the correct result when mapped over with arrow syntax" in {
val request: Request = Request(("foo", "5"))
val reader: RequestReader[String] = param("foo") ~> (_ * 3)
Await.result(reader(request)) shouldBe "555"
}
it should "return the correct result when embedFlatMapped over" in {
val request: Request = Request(("foo", "5"))
val reader: RequestReader[String] = param("foo").embedFlatMap { foo =>
Future.value(foo * 4)
}
Await.result(reader(request)) shouldBe "5555"
}
it should "return the correct result when embedFlatMapped over with arrow syntax" in {
val request: Request = Request(("foo", "5"))
val reader: RequestReader[String] = param("foo") ~~> { foo =>
Future.value(foo * 4)
}
Await.result(reader(request)) shouldBe "5555"
}
"A RequiredBooleanParam" should "be parsed as a boolean" in {
val request: Request = Request(("foo", "true"))
val futureResult: Future[Boolean] = param("foo").as[Boolean].apply(request)
Await.result(futureResult) shouldBe true
}
it should "produce an error if the param is not a boolean" in {
val request: Request = Request(("foo", "5"))
val futureResult: Future[Boolean] = param("foo").as[Boolean].apply(request)
a [NotParsed] shouldBe thrownBy(Await.result(futureResult))
}
"A RequiredIntParam" should "be parsed as an integer" in {
val request: Request = Request(("foo", "5"))
val futureResult: Future[Int] = param("foo").as[Int].apply(request)
Await.result(futureResult) shouldBe 5
}
it should "produce an error if the param is not an integer" in {
val request: Request = Request(("foo", "non-number"))
val futureResult: Future[Int] = param("foo").as[Int].apply(request)
a [NotParsed] shouldBe thrownBy(Await.result(futureResult))
}
"A RequiredLongParam" should "be parsed as a long" in {
val request: Request = Request(("foo", "9000000000000000"))
val futureResult: Future[Long] = param("foo").as[Long].apply(request)
Await.result(futureResult) shouldBe 9000000000000000L
}
it should "produce an error if the param is not a long" in {
val request: Request = Request(("foo", "non-number"))
val futureResult: Future[Long] = param("foo").as[Long].apply(request)
a [NotParsed] shouldBe thrownBy(Await.result(futureResult))
}
"A RequiredFloatParam" should "be parsed as a float" in {
val request: Request = Request(("foo", "5.123"))
val futureResult: Future[Float] = param("foo").as[Float].apply(request)
Await.result(futureResult) shouldBe 5.123f
}
it should "produce an error if the param is not a float" in {
val request: Request = Request(("foo", "non-number"))
val futureResult: Future[Float] = param("foo").as[Float].apply(request)
a [NotParsed] shouldBe thrownBy(Await.result(futureResult))
}
"A RequiredDoubleParam" should "be parsed as a double" in {
val request: Request = Request(("foo", "100.0"))
val futureResult: Future[Double] = param("foo").as[Double].apply(request)
Await.result(futureResult) shouldBe 100.0
}
it should "produce an error if the param is not a double" in {
val request: Request = Request(("foo", "non-number"))
val futureResult: Future[Double] = param("foo").as[Double].apply(request)
a [NotParsed] shouldBe thrownBy(Await.result(futureResult))
}
}
| peel/finch | core/src/test/scala/io/finch/request/RequiredParamSpec.scala | Scala | apache-2.0 | 4,637 |
package abeel.genometools.kmer
import java.io.File
import net.sf.samtools.SAMFileReader
import atk.compbio.DNAString
import scala.collection.JavaConversions._
import java.io.PrintWriter
import abeel.genometools.Main
import net.sf.samtools.SAMFileReader.ValidationStringency
import atk.util.TimeInterval
import atk.compbio.DNAHash
object ReduceKmer extends Main {
case class Config(val inputFile: File = null, val outputFile: File = null, val count: Int = 5)
override val description = "Tool to reduce kmer file size by filtering by count."
override val version = """
2016/09/27 Initial version included in genometools
"""
override def main(args: Array[String]) {
val parser = new scopt.OptionParser[Config]("java -jar genometools.jar reducekmer") {
opt[File]('i', "input") required () action { (x, c) => c.copy(inputFile = x) } text ("Input BAM file. ")
opt[File]('o', "output") required () action { (x, c) => c.copy(outputFile = x) } text ("File where you want the output to be written")
opt[Int]('c', "count") action { (x, c) => c.copy(count= x) } text ("Minimum count to keep, default = " + new Config().count)
}
parser.parse(args, Config()) map { config =>
assume(config.inputFile != null)
assume(config.outputFile != null)
processFile(config)
}
}
private def processFile(config: Config) {
val pw = new PrintWriter(config.outputFile)
pw.println(generatorInfo(config))
var counter = 1
val startTime = System.currentTimeMillis()
var discard = 0
for(line<-tLinesIterator(config.inputFile)){
val arr=line.split("\\t")
val (kmer,count)=arr(0)->arr(1).toInt
if (counter % 100000 == 0) {
val interval = System.currentTimeMillis() - startTime
println("Processing: " + counter + "\\t" + new TimeInterval(interval) + "\\t" + nf.format(counter*1000L / (interval + .1)) + " kmers/s")
}
counter += 1
if(count>=config.count){
pw.println(kmer+"\\t"+count)
}else
discard += 1
}
pw.println("# Processed "+counter+" kmers")
pw.println("# Discarded "+discard+" kmers")
pw.println("# Done")
pw.close
}
} | AbeelLab/genometools | scala/abeel/genometools/kmer/ReduceKmer.scala | Scala | gpl-3.0 | 2,249 |
package scrupal.admin
import play.api.routing.sird._
import scrupal.core._
class AdminModuleProvider extends {
val id = 'AdminModuleProvider
} with Provider with Enablee {
def provide = {
case GET(p"/module/") ⇒ Reactor.from { Response("foo", Successful) }
}
}
| scrupal/scrupal-core | scrupal-server/src/main/scala/scrupal/admin/AdminModuleProvider.scala | Scala | apache-2.0 | 276 |
package org.hqjpa.generator
/**
* Companion object for related class.<br/>
* <br/>
* Static members are thread safe, instance members are not.
*/
object HqjpaMetadataGenerator {
/** Name of meta-data file. */
val fileName = "HqjpaMetadata.scala";
}
/**
* Generates text of HQJPA meta-data object and trait used for imports of entity
* meta-data artifacts into scopes.<br/>
* <br/>
* Static members are thread safe, instance members are not.
*
* @param packageName Name of package to place generate artifacts in.
* @param entityClassNames Class names for entities to generate meta-data for.
*/
class HqjpaMetadataGenerator(var packageName : String, var entityClassNames : Seq[String]) {
import HqjpaMetadataGenerator._
/**
* Runs the generator.
* @return Text of meta-data file.
*/
def run() : String = {
var result : String = "";
//add package name
result += s"package ${packageName};\\n\\n";
//add imports
result += "import javax.annotation.Generated;\\n";
result += "\\n";
//generate meta-data object
{
//start object
result += "@Generated(Array(\\"org.hqjpa.generator.HqjpaMetadataGenerator\\"))\\n";
result += "object HqjpaMetadata {\\n"
//add entity fields
result += generateEntityFields(entityClassNames);
//close object
result += "}\\n";
result += "\\n";
}
//TODO: generate meta-data trait
{
//start trait
result += "@Generated(Array(\\"org.hqjpa.generator.HqjpaMetadataGenerator\\"))\\n";
result += "trait HqjpaMetadata {\\n"
//add entity fields
result += generateEntityFields(entityClassNames);
//close trait
result += "}\\n";
result += "\\n";
}
//
return result;
}
/**
* Generates entity fields for inclusion in meta-data object and trait.
* @param entityClassNames Class names for entities to generate meta-data for.
* @return Text of entity fields block.
*/
private def generateEntityFields(entityClassNames : Seq[String]) : String = {
var result : String = "";
//generate field lines
entityClassNames.foreach { entityClassName =>
val fieldLine = s"\\t val ${entityClassName} = new ${entityClassName}${EntityProxyGenerator.classNameSuffix}[Null](None);\\n"
//add to result
result += fieldLine;
}
//
return result;
}
} | vejobrolis/hqjpa | hqjpa/src/org/hqjpa/generator/HqjpaMetadataGenerator.scala | Scala | lgpl-3.0 | 2,290 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.math.RoundingMode
import java.util.Locale
import com.google.common.math.{DoubleMath, IntMath, LongMath}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode}
import org.apache.spark.sql.catalyst.util.IntervalUtils
import org.apache.spark.sql.catalyst.util.IntervalUtils._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
abstract class ExtractIntervalPart(
child: Expression,
val dataType: DataType,
func: CalendarInterval => Any,
funcName: String)
extends UnaryExpression with ExpectsInputTypes with NullIntolerant with Serializable {
override def inputTypes: Seq[AbstractDataType] = Seq(CalendarIntervalType)
override protected def nullSafeEval(interval: Any): Any = {
func(interval.asInstanceOf[CalendarInterval])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$iu.$funcName($c)")
}
}
case class ExtractIntervalYears(child: Expression)
extends ExtractIntervalPart(child, IntegerType, getYears, "getYears") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalYears =
copy(child = newChild)
}
case class ExtractIntervalMonths(child: Expression)
extends ExtractIntervalPart(child, ByteType, getMonths, "getMonths") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalMonths =
copy(child = newChild)
}
case class ExtractIntervalDays(child: Expression)
extends ExtractIntervalPart(child, IntegerType, getDays, "getDays") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalDays =
copy(child = newChild)
}
case class ExtractIntervalHours(child: Expression)
extends ExtractIntervalPart(child, LongType, getHours, "getHours") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalHours =
copy(child = newChild)
}
case class ExtractIntervalMinutes(child: Expression)
extends ExtractIntervalPart(child, ByteType, getMinutes, "getMinutes") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalMinutes =
copy(child = newChild)
}
case class ExtractIntervalSeconds(child: Expression)
extends ExtractIntervalPart(child, DecimalType(8, 6), getSeconds, "getSeconds") {
override protected def withNewChildInternal(newChild: Expression): ExtractIntervalSeconds =
copy(child = newChild)
}
object ExtractIntervalPart {
def parseExtractField(
extractField: String,
source: Expression,
errorHandleFunc: => Nothing): Expression = extractField.toUpperCase(Locale.ROOT) match {
case "YEAR" | "Y" | "YEARS" | "YR" | "YRS" => ExtractIntervalYears(source)
case "MONTH" | "MON" | "MONS" | "MONTHS" => ExtractIntervalMonths(source)
case "DAY" | "D" | "DAYS" => ExtractIntervalDays(source)
case "HOUR" | "H" | "HOURS" | "HR" | "HRS" => ExtractIntervalHours(source)
case "MINUTE" | "M" | "MIN" | "MINS" | "MINUTES" => ExtractIntervalMinutes(source)
case "SECOND" | "S" | "SEC" | "SECONDS" | "SECS" => ExtractIntervalSeconds(source)
case _ => errorHandleFunc
}
}
abstract class IntervalNumOperation(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
protected val operation: (CalendarInterval, Double) => CalendarInterval
protected def operationName: String
override def inputTypes: Seq[AbstractDataType] = Seq(CalendarIntervalType, DoubleType)
override def dataType: DataType = CalendarIntervalType
override def nullable: Boolean = true
override def nullSafeEval(interval: Any, num: Any): Any = {
operation(interval.asInstanceOf[CalendarInterval], num.asInstanceOf[Double])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (interval, num) => s"$iu.$operationName($interval, $num)")
}
override def prettyName: String = operationName.stripSuffix("Exact") + "_interval"
}
case class MultiplyInterval(
interval: Expression,
num: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends IntervalNumOperation(interval, num) {
override protected val operation: (CalendarInterval, Double) => CalendarInterval =
if (failOnError) multiplyExact else multiply
override protected def operationName: String = if (failOnError) "multiplyExact" else "multiply"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyInterval =
copy(interval = newLeft, num = newRight)
}
case class DivideInterval(
interval: Expression,
num: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends IntervalNumOperation(interval, num) {
override protected val operation: (CalendarInterval, Double) => CalendarInterval =
if (failOnError) divideExact else divide
override protected def operationName: String = if (failOnError) "divideExact" else "divide"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideInterval =
copy(interval = newLeft, num = newRight)
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(years, months, weeks, days, hours, mins, secs) - Make interval from years, months, weeks, days, hours, mins and secs.",
arguments = """
Arguments:
* years - the number of years, positive or negative
* months - the number of months, positive or negative
* weeks - the number of weeks, positive or negative
* days - the number of days, positive or negative
* hours - the number of hours, positive or negative
* mins - the number of minutes, positive or negative
* secs - the number of seconds with the fractional part in microsecond precision.
""",
examples = """
Examples:
> SELECT _FUNC_(100, 11, 1, 1, 12, 30, 01.001001);
100 years 11 months 8 days 12 hours 30 minutes 1.001001 seconds
> SELECT _FUNC_(100, null, 3);
NULL
> SELECT _FUNC_(0, 1, 0, 1, 0, 0, 100.000001);
1 months 1 days 1 minutes 40.000001 seconds
""",
since = "3.0.0",
group = "datetime_funcs")
// scalastyle:on line.size.limit
case class MakeInterval(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression,
secs: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends SeptenaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression,
sec: Expression) = {
this(years, months, weeks, days, hours, mins, sec, SQLConf.get.ansiEnabled)
}
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression,
mins: Expression) = {
this(years, months, weeks, days, hours, mins, Literal(Decimal(0, Decimal.MAX_LONG_DIGITS, 6)),
SQLConf.get.ansiEnabled)
}
def this(
years: Expression,
months: Expression,
weeks: Expression,
days: Expression,
hours: Expression) = {
this(years, months, weeks, days, hours, Literal(0))
}
def this(years: Expression, months: Expression, weeks: Expression, days: Expression) =
this(years, months, weeks, days, Literal(0))
def this(years: Expression, months: Expression, weeks: Expression) =
this(years, months, weeks, Literal(0))
def this(years: Expression, months: Expression) = this(years, months, Literal(0))
def this(years: Expression) = this(years, Literal(0))
def this() = this(Literal(0))
override def children: Seq[Expression] = Seq(years, months, weeks, days, hours, mins, secs)
// Accept `secs` as DecimalType to avoid loosing precision of microseconds while converting
// them to the fractional part of `secs`.
override def inputTypes: Seq[AbstractDataType] = Seq(IntegerType, IntegerType, IntegerType,
IntegerType, IntegerType, IntegerType, DecimalType(Decimal.MAX_LONG_DIGITS, 6))
override def dataType: DataType = CalendarIntervalType
override def nullable: Boolean = if (failOnError) children.exists(_.nullable) else true
override def nullSafeEval(
year: Any,
month: Any,
week: Any,
day: Any,
hour: Any,
min: Any,
sec: Option[Any]): Any = {
try {
IntervalUtils.makeInterval(
year.asInstanceOf[Int],
month.asInstanceOf[Int],
week.asInstanceOf[Int],
day.asInstanceOf[Int],
hour.asInstanceOf[Int],
min.asInstanceOf[Int],
sec.map(_.asInstanceOf[Decimal]).getOrElse(Decimal(0, Decimal.MAX_LONG_DIGITS, 6)))
} catch {
case _: ArithmeticException if !failOnError => null
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (year, month, week, day, hour, min, sec) => {
val iu = IntervalUtils.getClass.getName.stripSuffix("$")
val secFrac = sec.getOrElse("0")
val failOnErrorBranch = if (failOnError) "throw e;" else s"${ev.isNull} = true;"
s"""
try {
${ev.value} = $iu.makeInterval($year, $month, $week, $day, $hour, $min, $secFrac);
} catch (java.lang.ArithmeticException e) {
$failOnErrorBranch
}
"""
})
}
override def prettyName: String = "make_interval"
// Seq(years, months, weeks, days, hours, mins, secs)
override protected def withNewChildrenInternal(
newChildren: IndexedSeq[Expression]): MakeInterval =
copy(
years = newChildren(0),
months = newChildren(1),
weeks = newChildren(2),
days = newChildren(3),
hours = newChildren(4),
mins = newChildren(5),
secs = newChildren(6)
)
}
// Multiply an year-month interval by a numeric
case class MultiplyYMInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(YearMonthIntervalType, NumericType)
override def dataType: DataType = YearMonthIntervalType
@transient
private lazy val evalFunc: (Int, Any) => Any = right.dataType match {
case ByteType | ShortType | IntegerType => (months: Int, num) =>
Math.multiplyExact(months, num.asInstanceOf[Number].intValue())
case LongType => (months: Int, num) =>
Math.toIntExact(Math.multiplyExact(months, num.asInstanceOf[Long]))
case FloatType | DoubleType => (months: Int, num) =>
DoubleMath.roundToInt(months * num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
case _: DecimalType => (months: Int, num) =>
val decimalRes = ((new Decimal).set(months) * num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Int], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case ByteType | ShortType | IntegerType =>
defineCodeGen(ctx, ev, (m, n) => s"java.lang.Math.multiplyExact($m, $n)")
case LongType =>
val jlm = classOf[Math].getName
defineCodeGen(ctx, ev, (m, n) => s"$jlm.toIntExact($jlm.multiplyExact($m, $n))")
case FloatType | DoubleType =>
val dm = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$dm.roundToInt($m * (double)$n, java.math.RoundingMode.HALF_UP)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$times($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()")
}
override def toString: String = s"($left * $right)"
override def sql: String = s"(${left.sql} * ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyYMInterval =
copy(interval = newLeft, num = newRight)
}
// Multiply a day-time interval by a numeric
case class MultiplyDTInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(DayTimeIntervalType, NumericType)
override def dataType: DataType = DayTimeIntervalType
@transient
private lazy val evalFunc: (Long, Any) => Any = right.dataType match {
case _: IntegralType => (micros: Long, num) =>
Math.multiplyExact(micros, num.asInstanceOf[Number].longValue())
case _: DecimalType => (micros: Long, num) =>
val decimalRes = ((new Decimal).set(micros) * num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, RoundingMode.HALF_UP).longValueExact()
case _: FractionalType => (micros: Long, num) =>
DoubleMath.roundToLong(micros * num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Long], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case _: IntegralType =>
defineCodeGen(ctx, ev, (m, n) => s"java.lang.Math.multiplyExact($m, $n)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$times($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()")
case _: FractionalType =>
val dm = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$dm.roundToLong($m * (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left * $right)"
override def sql: String = s"(${left.sql} * ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): MultiplyDTInterval =
copy(interval = newLeft, num = newRight)
}
// Divide an year-month interval by a numeric
case class DivideYMInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(YearMonthIntervalType, NumericType)
override def dataType: DataType = YearMonthIntervalType
@transient
private lazy val evalFunc: (Int, Any) => Any = right.dataType match {
case LongType => (months: Int, num) =>
// Year-month interval has `Int` as the internal type. The result of the divide operation
// of `Int` by `Long` must fit to `Int`. So, the casting to `Int` cannot cause overflow.
LongMath.divide(months, num.asInstanceOf[Long], RoundingMode.HALF_UP).toInt
case _: IntegralType => (months: Int, num) =>
IntMath.divide(months, num.asInstanceOf[Number].intValue(), RoundingMode.HALF_UP)
case _: DecimalType => (months: Int, num) =>
val decimalRes = ((new Decimal).set(months) / num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()
case _: FractionalType => (months: Int, num) =>
DoubleMath.roundToInt(months / num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Int], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case LongType =>
val math = classOf[LongMath].getName
val javaType = CodeGenerator.javaType(dataType)
defineCodeGen(ctx, ev, (m, n) =>
// Similarly to non-codegen code. The result of `divide(Int, Long, ...)` must fit to `Int`.
// Casting to `Int` is safe here.
s"($javaType)($math.divide($m, $n, java.math.RoundingMode.HALF_UP))")
case _: IntegralType =>
val math = classOf[IntMath].getName
defineCodeGen(ctx, ev, (m, n) => s"$math.divide($m, $n, java.math.RoundingMode.HALF_UP)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$div($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).intValueExact()")
case _: FractionalType =>
val math = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$math.roundToInt($m / (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left / $right)"
override def sql: String = s"(${left.sql} / ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideYMInterval =
copy(interval = newLeft, num = newRight)
}
// Divide a day-time interval by a numeric
case class DivideDTInterval(
interval: Expression,
num: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant with Serializable {
override def left: Expression = interval
override def right: Expression = num
override def inputTypes: Seq[AbstractDataType] = Seq(DayTimeIntervalType, NumericType)
override def dataType: DataType = DayTimeIntervalType
@transient
private lazy val evalFunc: (Long, Any) => Any = right.dataType match {
case _: IntegralType => (micros: Long, num) =>
LongMath.divide(micros, num.asInstanceOf[Number].longValue(), RoundingMode.HALF_UP)
case _: DecimalType => (micros: Long, num) =>
val decimalRes = ((new Decimal).set(micros) / num.asInstanceOf[Decimal]).toJavaBigDecimal
decimalRes.setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()
case _: FractionalType => (micros: Long, num) =>
DoubleMath.roundToLong(micros / num.asInstanceOf[Number].doubleValue(), RoundingMode.HALF_UP)
}
override def nullSafeEval(interval: Any, num: Any): Any = {
evalFunc(interval.asInstanceOf[Long], num)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = right.dataType match {
case _: IntegralType =>
val math = classOf[LongMath].getName
defineCodeGen(ctx, ev, (m, n) => s"$math.divide($m, $n, java.math.RoundingMode.HALF_UP)")
case _: DecimalType =>
defineCodeGen(ctx, ev, (m, n) =>
s"((new Decimal()).set($m).$$div($n)).toJavaBigDecimal()" +
".setScale(0, java.math.RoundingMode.HALF_UP).longValueExact()")
case _: FractionalType =>
val math = classOf[DoubleMath].getName
defineCodeGen(ctx, ev, (m, n) =>
s"$math.roundToLong($m / (double)$n, java.math.RoundingMode.HALF_UP)")
}
override def toString: String = s"($left / $right)"
override def sql: String = s"(${left.sql} / ${right.sql})"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): DivideDTInterval =
copy(interval = newLeft, num = newRight)
}
| BryanCutler/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/intervalExpressions.scala | Scala | apache-2.0 | 20,362 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.rdd
import scala.reflect.ClassTag
import scala.util.Random
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.mllib.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.random.RandomDataGenerator
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils
private[mllib] class RandomRDDPartition[T](override val index: Int,
val size: Int,
val generator: RandomDataGenerator[T],
val seed: Long) extends Partition {
require(size >= 0, "Non-negative partition size required.")
}
// These two classes are necessary since Range objects in Scala cannot have size > Int.MaxValue
private[mllib] class RandomRDD[T: ClassTag](sc: SparkContext,
size: Long,
numPartitions: Int,
@transient private val rng: RandomDataGenerator[T],
@transient private val seed: Long = Utils.random.nextLong) extends RDD[T](sc, Nil) {
require(size > 0, "Positive RDD size required.")
require(numPartitions > 0, "Positive number of partitions required")
require(math.ceil(size.toDouble / numPartitions) <= Int.MaxValue,
"Partition size cannot exceed Int.MaxValue")
override def compute(splitIn: Partition, context: TaskContext): Iterator[T] = {
val split = splitIn.asInstanceOf[RandomRDDPartition[T]]
RandomRDD.getPointIterator[T](split)
}
override def getPartitions: Array[Partition] = {
RandomRDD.getPartitions(size, numPartitions, rng, seed)
}
}
private[mllib] class RandomVectorRDD(sc: SparkContext,
size: Long,
vectorSize: Int,
numPartitions: Int,
@transient private val rng: RandomDataGenerator[Double],
@transient private val seed: Long = Utils.random.nextLong) extends RDD[Vector](sc, Nil) {
require(size > 0, "Positive RDD size required.")
require(numPartitions > 0, "Positive number of partitions required")
require(vectorSize > 0, "Positive vector size required.")
require(math.ceil(size.toDouble / numPartitions) <= Int.MaxValue,
"Partition size cannot exceed Int.MaxValue")
override def compute(splitIn: Partition, context: TaskContext): Iterator[Vector] = {
val split = splitIn.asInstanceOf[RandomRDDPartition[Double]]
RandomRDD.getVectorIterator(split, vectorSize)
}
override protected def getPartitions: Array[Partition] = {
RandomRDD.getPartitions(size, numPartitions, rng, seed)
}
}
private[mllib] object RandomRDD {
def getPartitions[T](size: Long,
numPartitions: Int,
rng: RandomDataGenerator[T],
seed: Long): Array[Partition] = {
val partitions = new Array[RandomRDDPartition[T]](numPartitions)
var i = 0
var start: Long = 0
var end: Long = 0
val random = new Random(seed)
while (i < numPartitions) {
end = ((i + 1) * size) / numPartitions
partitions(i) = new RandomRDDPartition(i, (end - start).toInt, rng, random.nextLong())
start = end
i += 1
}
partitions.asInstanceOf[Array[Partition]]
}
// The RNG has to be reset every time the iterator is requested to guarantee same data
// every time the content of the RDD is examined.
def getPointIterator[T: ClassTag](partition: RandomRDDPartition[T]): Iterator[T] = {
val generator = partition.generator.copy()
generator.setSeed(partition.seed)
Iterator.fill(partition.size)(generator.nextValue())
}
// The RNG has to be reset every time the iterator is requested to guarantee same data
// every time the content of the RDD is examined.
def getVectorIterator(
partition: RandomRDDPartition[Double],
vectorSize: Int): Iterator[Vector] = {
val generator = partition.generator.copy()
generator.setSeed(partition.seed)
Iterator.fill(partition.size)(new DenseVector(Array.fill(vectorSize)(generator.nextValue())))
}
}
| wangyixiaohuihui/spark2-annotation | mllib/src/main/scala/org/apache/spark/mllib/rdd/RandomRDD.scala | Scala | apache-2.0 | 4,708 |
package com.optrak.testakka.utils
import java.time.Duration
import java.util.UUID
import play.api.data.validation.ValidationError
import play.api.libs.json._
import scala.util.Try
object JsonFormats {
def enumReads[E <: Enumeration](enum: E): Reads[E#Value] = Reads {
case JsString(s) =>
try {
JsSuccess(enum.withName(s).asInstanceOf[E#Value])
} catch {
case _: NoSuchElementException =>
JsError(s"Enumeration expected of type: '${enum.getClass}', but it does not contain '$s'")
}
case _ => JsError("String value expected")
}
def enumWrites[E <: Enumeration]: Writes[E#Value] = Writes(v => JsString(v.toString))
def enumFormat[E <: Enumeration](enum: E): Format[E#Value] = {
Format(enumReads(enum), enumWrites)
}
def singletonReads[O](singleton: O): Reads[O] = {
(__ \ "value").read[String].collect(
ValidationError(s"Expected a JSON object with a single field with key 'value' and value '${singleton.getClass.getSimpleName}'")
) {
case s if s == singleton.getClass.getSimpleName => singleton
}
}
def singletonWrites[O]: Writes[O] = Writes { singleton =>
Json.obj("value" -> singleton.getClass.getSimpleName)
}
def singletonFormat[O](singleton: O): Format[O] = {
Format(singletonReads(singleton), singletonWrites)
}
implicit val uuidReads: Reads[UUID] = implicitly[Reads[String]]
.collect(ValidationError("Invalid UUID"))(Function.unlift { str =>
Try(UUID.fromString(str)).toOption
})
implicit val uuidWrites: Writes[UUID] = Writes { uuid =>
JsString(uuid.toString)
}
implicit val durationReads: Reads[Duration] = implicitly[Reads[String]]
.collect(ValidationError("Invalid duration"))(Function.unlift { str =>
Try(Duration.parse(str)).toOption
})
implicit val durationWrites: Writes[Duration] = Writes { duration =>
JsString(duration.toString)
}
}
| Optrak/lagom-testbed | test-akka-integration/utils/src/main/scala/com/optrak/testakka/utils/JsonFormats.scala | Scala | apache-2.0 | 1,919 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.Set
import com.yammer.metrics.core.Gauge
import java.lang.{IllegalStateException, Object}
import java.util.concurrent.TimeUnit
import kafka.admin.AdminUtils
import kafka.admin.PreferredReplicaLeaderElectionCommand
import kafka.api._
import kafka.cluster.Broker
import kafka.common._
import kafka.log.LogConfig
import kafka.metrics.{KafkaTimer, KafkaMetricsGroup}
import kafka.utils.ZkUtils._
import kafka.utils._
import kafka.utils.Utils._
import org.apache.zookeeper.Watcher.Event.KeeperState
import org.I0Itec.zkclient.{IZkDataListener, IZkStateListener, ZkClient}
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException}
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.locks.ReentrantLock
import scala.None
import kafka.server._
import scala.Some
import kafka.common.TopicAndPartition
class ControllerContext(val zkClient: ZkClient,
val zkSessionTimeout: Int) {
var controllerChannelManager: ControllerChannelManager = null
val controllerLock: ReentrantLock = new ReentrantLock()
var shuttingDownBrokerIds: mutable.Set[Int] = mutable.Set.empty
val brokerShutdownLock: Object = new Object
var epoch: Int = KafkaController.InitialControllerEpoch - 1
var epochZkVersion: Int = KafkaController.InitialControllerEpochZkVersion - 1
val correlationId: AtomicInteger = new AtomicInteger(0)
var allTopics: Set[String] = Set.empty
var partitionReplicaAssignment: mutable.Map[TopicAndPartition, Seq[Int]] = mutable.Map.empty
var partitionLeadershipInfo: mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = mutable.Map.empty
var partitionsBeingReassigned: mutable.Map[TopicAndPartition, ReassignedPartitionsContext] = new mutable.HashMap
var partitionsUndergoingPreferredReplicaElection: mutable.Set[TopicAndPartition] = new mutable.HashSet
private var liveBrokersUnderlying: Set[Broker] = Set.empty
private var liveBrokerIdsUnderlying: Set[Int] = Set.empty
// setter
def liveBrokers_=(brokers: Set[Broker]) {
liveBrokersUnderlying = brokers
liveBrokerIdsUnderlying = liveBrokersUnderlying.map(_.id)
}
// getter
def liveBrokers = liveBrokersUnderlying.filter(broker => !shuttingDownBrokerIds.contains(broker.id))
def liveBrokerIds = liveBrokerIdsUnderlying.filter(brokerId => !shuttingDownBrokerIds.contains(brokerId))
def liveOrShuttingDownBrokerIds = liveBrokerIdsUnderlying
def liveOrShuttingDownBrokers = liveBrokersUnderlying
def partitionsOnBroker(brokerId: Int): Set[TopicAndPartition] = {
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => replicas.contains(brokerId) }
.map { case(topicAndPartition, replicas) => topicAndPartition }
.toSet
}
def replicasOnBrokers(brokerIds: Set[Int]): Set[PartitionAndReplica] = {
brokerIds.map { brokerId =>
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => replicas.contains(brokerId) }
.map { case(topicAndPartition, replicas) =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, brokerId) }
}.flatten.toSet
}
def replicasForTopic(topic: String): Set[PartitionAndReplica] = {
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => topicAndPartition.topic.equals(topic) }
.map { case(topicAndPartition, replicas) =>
replicas.map { r =>
new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, r)
}
}.flatten.toSet
}
def partitionsForTopic(topic: String): collection.Set[TopicAndPartition] = {
partitionReplicaAssignment
.filter { case(topicAndPartition, replicas) => topicAndPartition.topic.equals(topic) }.keySet
}
def allLiveReplicas(): Set[PartitionAndReplica] = {
replicasOnBrokers(liveBrokerIds)
}
def replicasForPartition(partitions: collection.Set[TopicAndPartition]): collection.Set[PartitionAndReplica] = {
partitions.map { p =>
val replicas = partitionReplicaAssignment(p)
replicas.map(r => new PartitionAndReplica(p.topic, p.partition, r))
}.flatten
}
def removeTopic(topic: String) = {
partitionLeadershipInfo = partitionLeadershipInfo.dropWhile(p => p._1.topic.equals(topic))
partitionReplicaAssignment = partitionReplicaAssignment.dropWhile(p => p._1.topic.equals(topic))
allTopics -= topic
}
}
object KafkaController extends Logging {
val stateChangeLogger = new StateChangeLogger("state.change.logger")
val InitialControllerEpoch = 1
val InitialControllerEpochZkVersion = 1
case class StateChangeLogger(override val loggerName: String) extends Logging
def parseControllerId(controllerInfoString: String): Int = {
try {
Json.parseFull(controllerInfoString) match {
case Some(m) =>
val controllerInfo = m.asInstanceOf[Map[String, Any]]
return controllerInfo.get("brokerid").get.asInstanceOf[Int]
case None => throw new KafkaException("Failed to parse the controller info json [%s].".format(controllerInfoString))
}
} catch {
case t: Throwable =>
// It may be due to an incompatible controller register version
warn("Failed to parse the controller info as json. "
+ "Probably this controller is still using the old format [%s] to store the broker id in zookeeper".format(controllerInfoString))
try {
return controllerInfoString.toInt
} catch {
case t: Throwable => throw new KafkaException("Failed to parse the controller info: " + controllerInfoString + ". This is neither the new or the old format.", t)
}
}
}
}
class KafkaController(val config : KafkaConfig, zkClient: ZkClient, val brokerState: BrokerState) extends Logging with KafkaMetricsGroup {
this.logIdent = "[Controller " + config.brokerId + "]: "
private var isRunning = true
private val stateChangeLogger = KafkaController.stateChangeLogger
val controllerContext = new ControllerContext(zkClient, config.zkSessionTimeoutMs)
val partitionStateMachine = new PartitionStateMachine(this)
val replicaStateMachine = new ReplicaStateMachine(this)
private val controllerElector = new ZookeeperLeaderElector(controllerContext, ZkUtils.ControllerPath, onControllerFailover,
onControllerResignation, config.brokerId)
// have a separate scheduler for the controller to be able to start and stop independently of the
// kafka server
private val autoRebalanceScheduler = new KafkaScheduler(1)
var deleteTopicManager: TopicDeletionManager = null
val offlinePartitionSelector = new OfflinePartitionLeaderSelector(controllerContext, config)
private val reassignedPartitionLeaderSelector = new ReassignedPartitionLeaderSelector(controllerContext)
private val preferredReplicaPartitionLeaderSelector = new PreferredReplicaPartitionLeaderSelector(controllerContext)
private val controlledShutdownPartitionLeaderSelector = new ControlledShutdownLeaderSelector(controllerContext)
private val brokerRequestBatch = new ControllerBrokerRequestBatch(this)
newGauge(
"ActiveControllerCount",
new Gauge[Int] {
def value() = if (isActive) 1 else 0
}
)
newGauge(
"OfflinePartitionsCount",
new Gauge[Int] {
def value(): Int = {
inLock(controllerContext.controllerLock) {
if (!isActive())
0
else
controllerContext.partitionLeadershipInfo.count(p => !controllerContext.liveOrShuttingDownBrokerIds.contains(p._2.leaderAndIsr.leader))
}
}
}
)
newGauge(
"PreferredReplicaImbalanceCount",
new Gauge[Int] {
def value(): Int = {
inLock(controllerContext.controllerLock) {
if (!isActive())
0
else
controllerContext.partitionReplicaAssignment.count {
case (topicPartition, replicas) => controllerContext.partitionLeadershipInfo(topicPartition).leaderAndIsr.leader != replicas.head
}
}
}
}
)
def epoch = controllerContext.epoch
def clientId = "id_%d-host_%s-port_%d".format(config.brokerId, config.hostName, config.port)
/**
* On clean shutdown, the controller first determines the partitions that the
* shutting down broker leads, and moves leadership of those partitions to another broker
* that is in that partition's ISR.
*
* @param id Id of the broker to shutdown.
* @return The number of partitions that the broker still leads.
*/
def shutdownBroker(id: Int) : Set[TopicAndPartition] = {
if (!isActive()) {
throw new ControllerMovedException("Controller moved to another broker. Aborting controlled shutdown")
}
controllerContext.brokerShutdownLock synchronized {
info("Shutting down broker " + id)
inLock(controllerContext.controllerLock) {
if (!controllerContext.liveOrShuttingDownBrokerIds.contains(id))
throw new BrokerNotAvailableException("Broker id %d does not exist.".format(id))
controllerContext.shuttingDownBrokerIds.add(id)
debug("All shutting down brokers: " + controllerContext.shuttingDownBrokerIds.mkString(","))
debug("Live brokers: " + controllerContext.liveBrokerIds.mkString(","))
}
val allPartitionsAndReplicationFactorOnBroker: Set[(TopicAndPartition, Int)] =
inLock(controllerContext.controllerLock) {
controllerContext.partitionsOnBroker(id)
.map(topicAndPartition => (topicAndPartition, controllerContext.partitionReplicaAssignment(topicAndPartition).size))
}
allPartitionsAndReplicationFactorOnBroker.foreach {
case(topicAndPartition, replicationFactor) =>
// Move leadership serially to relinquish lock.
inLock(controllerContext.controllerLock) {
controllerContext.partitionLeadershipInfo.get(topicAndPartition).foreach { currLeaderIsrAndControllerEpoch =>
if (replicationFactor > 1) {
if (currLeaderIsrAndControllerEpoch.leaderAndIsr.leader == id) {
// If the broker leads the topic partition, transition the leader and update isr. Updates zk and
// notifies all affected brokers
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition,
controlledShutdownPartitionLeaderSelector)
} else {
// Stop the replica first. The state change below initiates ZK changes which should take some time
// before which the stop replica request should be completed (in most cases)
brokerRequestBatch.newBatch()
brokerRequestBatch.addStopReplicaRequestForBrokers(Seq(id), topicAndPartition.topic,
topicAndPartition.partition, deletePartition = false)
brokerRequestBatch.sendRequestsToBrokers(epoch, controllerContext.correlationId.getAndIncrement)
// If the broker is a follower, updates the isr in ZK and notifies the current leader
replicaStateMachine.handleStateChanges(Set(PartitionAndReplica(topicAndPartition.topic,
topicAndPartition.partition, id)), OfflineReplica)
}
}
}
}
}
def replicatedPartitionsBrokerLeads() = inLock(controllerContext.controllerLock) {
trace("All leaders = " + controllerContext.partitionLeadershipInfo.mkString(","))
controllerContext.partitionLeadershipInfo.filter {
case (topicAndPartition, leaderIsrAndControllerEpoch) =>
leaderIsrAndControllerEpoch.leaderAndIsr.leader == id && controllerContext.partitionReplicaAssignment(topicAndPartition).size > 1
}.map(_._1)
}
replicatedPartitionsBrokerLeads().toSet
}
}
/**
* This callback is invoked by the zookeeper leader elector on electing the current broker as the new controller.
* It does the following things on the become-controller state change -
* 1. Register controller epoch changed listener
* 2. Increments the controller epoch
* 3. Initializes the controller's context object that holds cache objects for current topics, live brokers and
* leaders for all existing partitions.
* 4. Starts the controller's channel manager
* 5. Starts the replica state machine
* 6. Starts the partition state machine
* If it encounters any unexpected exception/error while becoming controller, it resigns as the current controller.
* This ensures another controller election will be triggered and there will always be an actively serving controller
*/
def onControllerFailover() {
if(isRunning) {
info("Broker %d starting become controller state transition".format(config.brokerId))
//read controller epoch from zk
readControllerEpochFromZookeeper()
// increment the controller epoch
incrementControllerEpoch(zkClient)
// before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks
registerReassignedPartitionsListener()
registerPreferredReplicaElectionListener()
partitionStateMachine.registerListeners()
replicaStateMachine.registerListeners()
initializeControllerContext()
replicaStateMachine.startup()
partitionStateMachine.startup()
// register the partition change listeners for all existing topics on failover
controllerContext.allTopics.foreach(topic => partitionStateMachine.registerPartitionChangeListener(topic))
info("Broker %d is ready to serve as the new controller with epoch %d".format(config.brokerId, epoch))
brokerState.newState(RunningAsController)
maybeTriggerPartitionReassignment()
maybeTriggerPreferredReplicaElection()
/* send partition leadership info to all live brokers */
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq)
if (config.autoLeaderRebalanceEnable) {
info("starting the partition rebalance scheduler")
autoRebalanceScheduler.startup()
autoRebalanceScheduler.schedule("partition-rebalance-thread", checkAndTriggerPartitionRebalance,
5, config.leaderImbalanceCheckIntervalSeconds, TimeUnit.SECONDS)
}
deleteTopicManager.start()
}
else
info("Controller has been shut down, aborting startup/failover")
}
/**
* This callback is invoked by the zookeeper leader elector when the current broker resigns as the controller. This is
* required to clean up internal controller data structures
*/
def onControllerResignation() {
if (deleteTopicManager != null)
deleteTopicManager.shutdown()
inLock(controllerContext.controllerLock) {
if (config.autoLeaderRebalanceEnable)
autoRebalanceScheduler.shutdown()
partitionStateMachine.shutdown()
replicaStateMachine.shutdown()
if(controllerContext.controllerChannelManager != null) {
controllerContext.controllerChannelManager.shutdown()
controllerContext.controllerChannelManager = null
}
controllerContext.epoch=0
controllerContext.epochZkVersion=0
brokerState.newState(RunningAsBroker)
}
}
/**
* Returns true if this broker is the current controller.
*/
def isActive(): Boolean = {
inLock(controllerContext.controllerLock) {
controllerContext.controllerChannelManager != null
}
}
/**
* This callback is invoked by the replica state machine's broker change listener, with the list of newly started
* brokers as input. It does the following -
* 1. Triggers the OnlinePartition state change for all new/offline partitions
* 2. It checks whether there are reassigned replicas assigned to any newly started brokers. If
* so, it performs the reassignment logic for each topic/partition.
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point for two reasons:
* 1. The partition state machine, when triggering online state change, will refresh leader and ISR for only those
* partitions currently new or offline (rather than every partition this controller is aware of)
* 2. Even if we do refresh the cache, there is no guarantee that by the time the leader and ISR request reaches
* every broker that it is still valid. Brokers check the leader epoch to determine validity of the request.
*/
def onBrokerStartup(newBrokers: Seq[Int]) {
info("New broker startup callback for %s".format(newBrokers.mkString(",")))
val newBrokersSet = newBrokers.toSet
// send update metadata request for all partitions to the newly restarted brokers. In cases of controlled shutdown
// leaders will not be elected when a new broker comes up. So at least in the common controlled shutdown case, the
// metadata will reach the new brokers faster
sendUpdateMetadataRequest(newBrokers)
// the very first thing to do when a new broker comes up is send it the entire list of partitions that it is
// supposed to host. Based on that the broker starts the high watermark threads for the input list of partitions
val allReplicasOnNewBrokers = controllerContext.replicasOnBrokers(newBrokersSet)
replicaStateMachine.handleStateChanges(allReplicasOnNewBrokers, OnlineReplica)
// when a new broker comes up, the controller needs to trigger leader election for all new and offline partitions
// to see if these brokers can become leaders for some/all of those
partitionStateMachine.triggerOnlinePartitionStateChange()
// check if reassignment of some partitions need to be restarted
val partitionsWithReplicasOnNewBrokers = controllerContext.partitionsBeingReassigned.filter {
case (topicAndPartition, reassignmentContext) => reassignmentContext.newReplicas.exists(newBrokersSet.contains(_))
}
partitionsWithReplicasOnNewBrokers.foreach(p => onPartitionReassignment(p._1, p._2))
// check if topic deletion needs to be resumed. If at least one replica that belongs to the topic being deleted exists
// on the newly restarted brokers, there is a chance that topic deletion can resume
val replicasForTopicsToBeDeleted = allReplicasOnNewBrokers.filter(p => deleteTopicManager.isTopicQueuedUpForDeletion(p.topic))
if(replicasForTopicsToBeDeleted.size > 0) {
info(("Some replicas %s for topics scheduled for deletion %s are on the newly restarted brokers %s. " +
"Signaling restart of topic deletion for these topics").format(replicasForTopicsToBeDeleted.mkString(","),
deleteTopicManager.topicsToBeDeleted.mkString(","), newBrokers.mkString(",")))
deleteTopicManager.resumeDeletionForTopics(replicasForTopicsToBeDeleted.map(_.topic))
}
}
/**
* This callback is invoked by the replica state machine's broker change listener with the list of failed brokers
* as input. It does the following -
* 1. Mark partitions with dead leaders as offline
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. Invokes the OfflineReplica state change on the input list of newly started brokers
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point. This is because
* the partition state machine will refresh our cache for us when performing leader election for all new/offline
* partitions coming online.
*/
def onBrokerFailure(deadBrokers: Seq[Int]) {
info("Broker failure callback for %s".format(deadBrokers.mkString(",")))
val deadBrokersThatWereShuttingDown =
deadBrokers.filter(id => controllerContext.shuttingDownBrokerIds.remove(id))
info("Removed %s from list of shutting down brokers.".format(deadBrokersThatWereShuttingDown))
val deadBrokersSet = deadBrokers.toSet
// trigger OfflinePartition state for all partitions whose current leader is one amongst the dead brokers
val partitionsWithoutLeader = controllerContext.partitionLeadershipInfo.filter(partitionAndLeader =>
deadBrokersSet.contains(partitionAndLeader._2.leaderAndIsr.leader) &&
!deleteTopicManager.isTopicQueuedUpForDeletion(partitionAndLeader._1.topic)).keySet
partitionStateMachine.handleStateChanges(partitionsWithoutLeader, OfflinePartition)
// trigger OnlinePartition state changes for offline or new partitions
partitionStateMachine.triggerOnlinePartitionStateChange()
// filter out the replicas that belong to topics that are being deleted
var allReplicasOnDeadBrokers = controllerContext.replicasOnBrokers(deadBrokersSet)
val activeReplicasOnDeadBrokers = allReplicasOnDeadBrokers.filterNot(p => deleteTopicManager.isTopicQueuedUpForDeletion(p.topic))
// handle dead replicas
replicaStateMachine.handleStateChanges(activeReplicasOnDeadBrokers, OfflineReplica)
// check if topic deletion state for the dead replicas needs to be updated
val replicasForTopicsToBeDeleted = allReplicasOnDeadBrokers.filter(p => deleteTopicManager.isTopicQueuedUpForDeletion(p.topic))
if(replicasForTopicsToBeDeleted.size > 0) {
// it is required to mark the respective replicas in TopicDeletionFailed state since the replica cannot be
// deleted when the broker is down. This will prevent the replica from being in TopicDeletionStarted state indefinitely
// since topic deletion cannot be retried until at least one replica is in TopicDeletionStarted state
deleteTopicManager.failReplicaDeletion(replicasForTopicsToBeDeleted)
}
}
/**
* This callback is invoked by the partition state machine's topic change listener with the list of new topics
* and partitions as input. It does the following -
* 1. Registers partition change listener. This is not required until KAFKA-347
* 2. Invokes the new partition callback
* 3. Send metadata request with the new topic to all brokers so they allow requests for that topic to be served
*/
def onNewTopicCreation(topics: Set[String], newPartitions: Set[TopicAndPartition]) {
info("New topic creation callback for %s".format(newPartitions.mkString(",")))
// subscribe to partition changes
topics.foreach(topic => partitionStateMachine.registerPartitionChangeListener(topic))
onNewPartitionCreation(newPartitions)
}
/**
* This callback is invoked by the topic change callback with the list of failed brokers as input.
* It does the following -
* 1. Move the newly created partitions to the NewPartition state
* 2. Move the newly created partitions from NewPartition->OnlinePartition state
*/
def onNewPartitionCreation(newPartitions: Set[TopicAndPartition]) {
info("New partition creation callback for %s".format(newPartitions.mkString(",")))
partitionStateMachine.handleStateChanges(newPartitions, NewPartition)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions), NewReplica)
partitionStateMachine.handleStateChanges(newPartitions, OnlinePartition, offlinePartitionSelector)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions), OnlineReplica)
}
/**
* This callback is invoked by the reassigned partitions listener. When an admin command initiates a partition
* reassignment, it creates the /admin/reassign_partitions path that triggers the zookeeper listener.
* Reassigning replicas for a partition goes through a few steps listed in the code.
* RAR = Reassigned replicas
* OAR = Original list of replicas for partition
* AR = current assigned replicas
*
* 1. Update AR in ZK with OAR + RAR.
* 2. Send LeaderAndIsr request to every replica in OAR + RAR (with AR as OAR + RAR). We do this by forcing an update
* of the leader epoch in zookeeper.
* 3. Start new replicas RAR - OAR by moving replicas in RAR - OAR to NewReplica state.
* 4. Wait until all replicas in RAR are in sync with the leader.
* 5 Move all replicas in RAR to OnlineReplica state.
* 6. Set AR to RAR in memory.
* 7. If the leader is not in RAR, elect a new leader from RAR. If new leader needs to be elected from RAR, a LeaderAndIsr
* will be sent. If not, then leader epoch will be incremented in zookeeper and a LeaderAndIsr request will be sent.
* In any case, the LeaderAndIsr request will have AR = RAR. This will prevent the leader from adding any replica in
* RAR - OAR back in the isr.
* 8. Move all replicas in OAR - RAR to OfflineReplica state. As part of OfflineReplica state change, we shrink the
* isr to remove OAR - RAR in zookeeper and sent a LeaderAndIsr ONLY to the Leader to notify it of the shrunk isr.
* After that, we send a StopReplica (delete = false) to the replicas in OAR - RAR.
* 9. Move all replicas in OAR - RAR to NonExistentReplica state. This will send a StopReplica (delete = false) to
* the replicas in OAR - RAR to physically delete the replicas on disk.
* 10. Update AR in ZK with RAR.
* 11. Update the /admin/reassign_partitions path in ZK to remove this partition.
* 12. After electing leader, the replicas and isr information changes. So resend the update metadata request to every broker.
*
* For example, if OAR = {1, 2, 3} and RAR = {4,5,6}, the values in the assigned replica (AR) and leader/isr path in ZK
* may go through the following transition.
* AR leader/isr
* {1,2,3} 1/{1,2,3} (initial state)
* {1,2,3,4,5,6} 1/{1,2,3} (step 2)
* {1,2,3,4,5,6} 1/{1,2,3,4,5,6} (step 4)
* {1,2,3,4,5,6} 4/{1,2,3,4,5,6} (step 7)
* {1,2,3,4,5,6} 4/{4,5,6} (step 8)
* {4,5,6} 4/{4,5,6} (step 10)
*
* Note that we have to update AR in ZK with RAR last since it's the only place where we store OAR persistently.
* This way, if the controller crashes before that step, we can still recover.
*/
def onPartitionReassignment(topicAndPartition: TopicAndPartition, reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
areReplicasInIsr(topicAndPartition.topic, topicAndPartition.partition, reassignedReplicas) match {
case false =>
info("New replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned not yet caught up with the leader")
val newReplicasNotInOldReplicaList = reassignedReplicas.toSet -- controllerContext.partitionReplicaAssignment(topicAndPartition).toSet
val newAndOldReplicas = (reassignedPartitionContext.newReplicas ++ controllerContext.partitionReplicaAssignment(topicAndPartition)).toSet
//1. Update AR in ZK with OAR + RAR.
updateAssignedReplicasForPartition(topicAndPartition, newAndOldReplicas.toSeq)
//2. Send LeaderAndIsr request to every replica in OAR + RAR (with AR as OAR + RAR).
updateLeaderEpochAndSendRequest(topicAndPartition, controllerContext.partitionReplicaAssignment(topicAndPartition),
newAndOldReplicas.toSeq)
//3. replicas in RAR - OAR -> NewReplica
startNewReplicasForReassignedPartition(topicAndPartition, reassignedPartitionContext, newReplicasNotInOldReplicaList)
info("Waiting for new replicas %s for partition %s being ".format(reassignedReplicas.mkString(","), topicAndPartition) +
"reassigned to catch up with the leader")
case true =>
//4. Wait until all replicas in RAR are in sync with the leader.
val oldReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition).toSet -- reassignedReplicas.toSet
//5. replicas in RAR -> OnlineReplica
reassignedReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition,
replica)), OnlineReplica)
}
//6. Set AR to RAR in memory.
//7. Send LeaderAndIsr request with a potential new leader (if current leader not in RAR) and
// a new AR (using RAR) and same isr to every broker in RAR
moveReassignedPartitionLeaderIfRequired(topicAndPartition, reassignedPartitionContext)
//8. replicas in OAR - RAR -> Offline (force those replicas out of isr)
//9. replicas in OAR - RAR -> NonExistentReplica (force those replicas to be deleted)
stopOldReplicasOfReassignedPartition(topicAndPartition, reassignedPartitionContext, oldReplicas)
//10. Update AR in ZK with RAR.
updateAssignedReplicasForPartition(topicAndPartition, reassignedReplicas)
//11. Update the /admin/reassign_partitions path in ZK to remove this partition.
removePartitionFromReassignedPartitions(topicAndPartition)
info("Removed partition %s from the list of reassigned partitions in zookeeper".format(topicAndPartition))
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
//12. After electing leader, the replicas and isr information changes, so resend the update metadata request to every broker
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(topicAndPartition))
// signal delete topic thread if reassignment for some partitions belonging to topics being deleted just completed
deleteTopicManager.resumeDeletionForTopics(Set(topicAndPartition.topic))
}
}
private def watchIsrChangesForReassignedPartition(topic: String,
partition: Int,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val isrChangeListener = new ReassignedPartitionsIsrChangeListener(this, topic, partition,
reassignedReplicas.toSet)
reassignedPartitionContext.isrChangeListener = isrChangeListener
// register listener on the leader and isr path to wait until they catch up with the current leader
zkClient.subscribeDataChanges(ZkUtils.getTopicPartitionLeaderAndIsrPath(topic, partition), isrChangeListener)
}
def initiateReassignReplicasForTopicPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val newReplicas = reassignedPartitionContext.newReplicas
val topic = topicAndPartition.topic
val partition = topicAndPartition.partition
val aliveNewReplicas = newReplicas.filter(r => controllerContext.liveBrokerIds.contains(r))
try {
val assignedReplicasOpt = controllerContext.partitionReplicaAssignment.get(topicAndPartition)
assignedReplicasOpt match {
case Some(assignedReplicas) =>
if(assignedReplicas == newReplicas) {
throw new KafkaException("Partition %s to be reassigned is already assigned to replicas".format(topicAndPartition) +
" %s. Ignoring request for partition reassignment".format(newReplicas.mkString(",")))
} else {
if(aliveNewReplicas == newReplicas) {
info("Handling reassignment of partition %s to new replicas %s".format(topicAndPartition, newReplicas.mkString(",")))
// first register ISR change listener
watchIsrChangesForReassignedPartition(topic, partition, reassignedPartitionContext)
controllerContext.partitionsBeingReassigned.put(topicAndPartition, reassignedPartitionContext)
// mark topic ineligible for deletion for the partitions being reassigned
deleteTopicManager.markTopicIneligibleForDeletion(Set(topic))
onPartitionReassignment(topicAndPartition, reassignedPartitionContext)
} else {
// some replica in RAR is not alive. Fail partition reassignment
throw new KafkaException("Only %s replicas out of the new set of replicas".format(aliveNewReplicas.mkString(",")) +
" %s for partition %s to be reassigned are alive. ".format(newReplicas.mkString(","), topicAndPartition) +
"Failing partition reassignment")
}
}
case None => throw new KafkaException("Attempt to reassign partition %s that doesn't exist"
.format(topicAndPartition))
}
} catch {
case e: Throwable => error("Error completing reassignment of partition %s".format(topicAndPartition), e)
// remove the partition from the admin path to unblock the admin client
removePartitionFromReassignedPartitions(topicAndPartition)
}
}
def onPreferredReplicaElection(partitions: Set[TopicAndPartition], isTriggeredByAutoRebalance: Boolean = false) {
info("Starting preferred replica leader election for partitions %s".format(partitions.mkString(",")))
try {
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitions
deleteTopicManager.markTopicIneligibleForDeletion(partitions.map(_.topic))
partitionStateMachine.handleStateChanges(partitions, OnlinePartition, preferredReplicaPartitionLeaderSelector)
} catch {
case e: Throwable => error("Error completing preferred replica leader election for partitions %s".format(partitions.mkString(",")), e)
} finally {
removePartitionsFromPreferredReplicaElection(partitions, isTriggeredByAutoRebalance)
deleteTopicManager.resumeDeletionForTopics(partitions.map(_.topic))
}
}
/**
* Invoked when the controller module of a Kafka server is started up. This does not assume that the current broker
* is the controller. It merely registers the session expiration listener and starts the controller leader
* elector
*/
def startup() = {
inLock(controllerContext.controllerLock) {
info("Controller starting up");
registerSessionExpirationListener()
isRunning = true
controllerElector.startup
info("Controller startup complete")
}
}
/**
* Invoked when the controller module of a Kafka server is shutting down. If the broker was the current controller,
* it shuts down the partition and replica state machines. If not, those are a no-op. In addition to that, it also
* shuts down the controller channel manager, if one exists (i.e. if it was the current controller)
*/
def shutdown() = {
inLock(controllerContext.controllerLock) {
isRunning = false
}
onControllerResignation()
}
def sendRequest(brokerId : Int, request : RequestOrResponse, callback: (RequestOrResponse) => Unit = null) = {
controllerContext.controllerChannelManager.sendRequest(brokerId, request, callback)
}
def incrementControllerEpoch(zkClient: ZkClient) = {
try {
var newControllerEpoch = controllerContext.epoch + 1
val (updateSucceeded, newVersion) = ZkUtils.conditionalUpdatePersistentPathIfExists(zkClient,
ZkUtils.ControllerEpochPath, newControllerEpoch.toString, controllerContext.epochZkVersion)
if(!updateSucceeded)
throw new ControllerMovedException("Controller moved to another broker. Aborting controller startup procedure")
else {
controllerContext.epochZkVersion = newVersion
controllerContext.epoch = newControllerEpoch
}
} catch {
case nne: ZkNoNodeException =>
// if path doesn't exist, this is the first controller whose epoch should be 1
// the following call can still fail if another controller gets elected between checking if the path exists and
// trying to create the controller epoch path
try {
zkClient.createPersistent(ZkUtils.ControllerEpochPath, KafkaController.InitialControllerEpoch.toString)
controllerContext.epoch = KafkaController.InitialControllerEpoch
controllerContext.epochZkVersion = KafkaController.InitialControllerEpochZkVersion
} catch {
case e: ZkNodeExistsException => throw new ControllerMovedException("Controller moved to another broker. " +
"Aborting controller startup procedure")
case oe: Throwable => error("Error while incrementing controller epoch", oe)
}
case oe: Throwable => error("Error while incrementing controller epoch", oe)
}
info("Controller %d incremented epoch to %d".format(config.brokerId, controllerContext.epoch))
}
private def registerSessionExpirationListener() = {
zkClient.subscribeStateChanges(new SessionExpirationListener())
}
private def initializeControllerContext() {
// update controller cache with delete topic information
controllerContext.liveBrokers = ZkUtils.getAllBrokersInCluster(zkClient).toSet
controllerContext.allTopics = ZkUtils.getAllTopics(zkClient).toSet
controllerContext.partitionReplicaAssignment = ZkUtils.getReplicaAssignmentForTopics(zkClient, controllerContext.allTopics.toSeq)
controllerContext.partitionLeadershipInfo = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
controllerContext.shuttingDownBrokerIds = mutable.Set.empty[Int]
// update the leader and isr cache for all existing partitions from Zookeeper
updateLeaderAndIsrCache()
// start the channel manager
startChannelManager()
initializePreferredReplicaElection()
initializePartitionReassignment()
initializeTopicDeletion()
info("Currently active brokers in the cluster: %s".format(controllerContext.liveBrokerIds))
info("Currently shutting brokers in the cluster: %s".format(controllerContext.shuttingDownBrokerIds))
info("Current list of topics in the cluster: %s".format(controllerContext.allTopics))
}
private def initializePreferredReplicaElection() {
// initialize preferred replica election state
val partitionsUndergoingPreferredReplicaElection = ZkUtils.getPartitionsUndergoingPreferredReplicaElection(zkClient)
// check if they are already completed or topic was deleted
val partitionsThatCompletedPreferredReplicaElection = partitionsUndergoingPreferredReplicaElection.filter { partition =>
val replicasOpt = controllerContext.partitionReplicaAssignment.get(partition)
val topicDeleted = replicasOpt.isEmpty
val successful =
if(!topicDeleted) controllerContext.partitionLeadershipInfo(partition).leaderAndIsr.leader == replicasOpt.get.head else false
successful || topicDeleted
}
controllerContext.partitionsUndergoingPreferredReplicaElection ++= partitionsUndergoingPreferredReplicaElection
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsThatCompletedPreferredReplicaElection
info("Partitions undergoing preferred replica election: %s".format(partitionsUndergoingPreferredReplicaElection.mkString(",")))
info("Partitions that completed preferred replica election: %s".format(partitionsThatCompletedPreferredReplicaElection.mkString(",")))
info("Resuming preferred replica election for partitions: %s".format(controllerContext.partitionsUndergoingPreferredReplicaElection.mkString(",")))
}
private def initializePartitionReassignment() {
// read the partitions being reassigned from zookeeper path /admin/reassign_partitions
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient)
// check if they are already completed or topic was deleted
val reassignedPartitions = partitionsBeingReassigned.filter { partition =>
val replicasOpt = controllerContext.partitionReplicaAssignment.get(partition._1)
val topicDeleted = replicasOpt.isEmpty
val successful = if(!topicDeleted) replicasOpt.get == partition._2.newReplicas else false
topicDeleted || successful
}.map(_._1)
reassignedPartitions.foreach(p => removePartitionFromReassignedPartitions(p))
var partitionsToReassign: mutable.Map[TopicAndPartition, ReassignedPartitionsContext] = new mutable.HashMap
partitionsToReassign ++= partitionsBeingReassigned
partitionsToReassign --= reassignedPartitions
controllerContext.partitionsBeingReassigned ++= partitionsToReassign
info("Partitions being reassigned: %s".format(partitionsBeingReassigned.toString()))
info("Partitions already reassigned: %s".format(reassignedPartitions.toString()))
info("Resuming reassignment of partitions: %s".format(partitionsToReassign.toString()))
}
private def initializeTopicDeletion() {
val topicsQueuedForDeletion = ZkUtils.getChildrenParentMayNotExist(zkClient, ZkUtils.DeleteTopicsPath).toSet
val topicsWithReplicasOnDeadBrokers = controllerContext.partitionReplicaAssignment.filter { case(partition, replicas) =>
replicas.exists(r => !controllerContext.liveBrokerIds.contains(r)) }.keySet.map(_.topic)
val topicsForWhichPartitionReassignmentIsInProgress = controllerContext.partitionsUndergoingPreferredReplicaElection.map(_.topic)
val topicsForWhichPreferredReplicaElectionIsInProgress = controllerContext.partitionsBeingReassigned.keySet.map(_.topic)
val topicsIneligibleForDeletion = topicsWithReplicasOnDeadBrokers | topicsForWhichPartitionReassignmentIsInProgress |
topicsForWhichPreferredReplicaElectionIsInProgress
info("List of topics to be deleted: %s".format(topicsQueuedForDeletion.mkString(",")))
info("List of topics ineligible for deletion: %s".format(topicsIneligibleForDeletion.mkString(",")))
// initialize the topic deletion manager
deleteTopicManager = new TopicDeletionManager(this, topicsQueuedForDeletion, topicsIneligibleForDeletion)
}
private def maybeTriggerPartitionReassignment() {
controllerContext.partitionsBeingReassigned.foreach { topicPartitionToReassign =>
initiateReassignReplicasForTopicPartition(topicPartitionToReassign._1, topicPartitionToReassign._2)
}
}
private def maybeTriggerPreferredReplicaElection() {
onPreferredReplicaElection(controllerContext.partitionsUndergoingPreferredReplicaElection.toSet)
}
private def startChannelManager() {
controllerContext.controllerChannelManager = new ControllerChannelManager(controllerContext, config)
controllerContext.controllerChannelManager.startup()
}
private def updateLeaderAndIsrCache() {
val leaderAndIsrInfo = ZkUtils.getPartitionLeaderAndIsrForTopics(zkClient, controllerContext.partitionReplicaAssignment.keySet)
for((topicPartition, leaderIsrAndControllerEpoch) <- leaderAndIsrInfo)
controllerContext.partitionLeadershipInfo.put(topicPartition, leaderIsrAndControllerEpoch)
}
private def areReplicasInIsr(topic: String, partition: Int, replicas: Seq[Int]): Boolean = {
getLeaderAndIsrForPartition(zkClient, topic, partition) match {
case Some(leaderAndIsr) =>
val replicasNotInIsr = replicas.filterNot(r => leaderAndIsr.isr.contains(r))
replicasNotInIsr.isEmpty
case None => false
}
}
private def moveReassignedPartitionLeaderIfRequired(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext) {
val reassignedReplicas = reassignedPartitionContext.newReplicas
val currentLeader = controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.leader
// change the assigned replica list to just the reassigned replicas in the cache so it gets sent out on the LeaderAndIsr
// request to the current or new leader. This will prevent it from adding the old replicas to the ISR
val oldAndNewReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
controllerContext.partitionReplicaAssignment.put(topicAndPartition, reassignedReplicas)
if(!reassignedPartitionContext.newReplicas.contains(currentLeader)) {
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is not in the new list of replicas %s. Re-electing leader".format(reassignedReplicas.mkString(",")))
// move the leader to one of the alive and caught up new replicas
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
} else {
// check if the leader is alive or not
controllerContext.liveBrokerIds.contains(currentLeader) match {
case true =>
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s and is alive".format(reassignedReplicas.mkString(",")))
// shrink replication factor and update the leader epoch in zookeeper to use on the next LeaderAndIsrRequest
updateLeaderEpochAndSendRequest(topicAndPartition, oldAndNewReplicas, reassignedReplicas)
case false =>
info("Leader %s for partition %s being reassigned, ".format(currentLeader, topicAndPartition) +
"is already in the new list of replicas %s but is dead".format(reassignedReplicas.mkString(",")))
partitionStateMachine.handleStateChanges(Set(topicAndPartition), OnlinePartition, reassignedPartitionLeaderSelector)
}
}
}
private def stopOldReplicasOfReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext,
oldReplicas: Set[Int]) {
val topic = topicAndPartition.topic
val partition = topicAndPartition.partition
// first move the replica to offline state (the controller removes it from the ISR)
val replicasToBeDeleted = oldReplicas.map(r => PartitionAndReplica(topic, partition, r))
replicaStateMachine.handleStateChanges(replicasToBeDeleted, OfflineReplica)
// send stop replica command to the old replicas
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionStarted)
// TODO: Eventually partition reassignment could use a callback that does retries if deletion failed
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionSuccessful)
replicaStateMachine.handleStateChanges(replicasToBeDeleted, NonExistentReplica)
}
private def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
replicas: Seq[Int]) {
val partitionsAndReplicasForThisTopic = controllerContext.partitionReplicaAssignment.filter(_._1.topic.equals(topicAndPartition.topic))
partitionsAndReplicasForThisTopic.put(topicAndPartition, replicas)
updateAssignedReplicasForPartition(topicAndPartition, partitionsAndReplicasForThisTopic)
info("Updated assigned replicas for partition %s being reassigned to %s ".format(topicAndPartition, replicas.mkString(",")))
// update the assigned replica list after a successful zookeeper write
controllerContext.partitionReplicaAssignment.put(topicAndPartition, replicas)
}
private def startNewReplicasForReassignedPartition(topicAndPartition: TopicAndPartition,
reassignedPartitionContext: ReassignedPartitionsContext,
newReplicas: Set[Int]) {
// send the start replica request to the brokers in the reassigned replicas list that are not in the assigned
// replicas list
newReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Set(new PartitionAndReplica(topicAndPartition.topic, topicAndPartition.partition, replica)), NewReplica)
}
}
private def updateLeaderEpochAndSendRequest(topicAndPartition: TopicAndPartition, replicasToReceiveRequest: Seq[Int], newAssignedReplicas: Seq[Int]) {
brokerRequestBatch.newBatch()
updateLeaderEpoch(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(replicasToReceiveRequest, topicAndPartition.topic,
topicAndPartition.partition, updatedLeaderIsrAndControllerEpoch, newAssignedReplicas)
brokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch, controllerContext.correlationId.getAndIncrement)
stateChangeLogger.trace(("Controller %d epoch %d sent LeaderAndIsr request %s with new assigned replica list %s " +
"to leader %d for partition being reassigned %s").format(config.brokerId, controllerContext.epoch, updatedLeaderIsrAndControllerEpoch,
newAssignedReplicas.mkString(","), updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader, topicAndPartition))
case None => // fail the reassignment
stateChangeLogger.error(("Controller %d epoch %d failed to send LeaderAndIsr request with new assigned replica list %s " +
"to leader for partition being reassigned %s").format(config.brokerId, controllerContext.epoch,
newAssignedReplicas.mkString(","), topicAndPartition))
}
}
private def registerReassignedPartitionsListener() = {
zkClient.subscribeDataChanges(ZkUtils.ReassignPartitionsPath, new PartitionsReassignedListener(this))
}
private def registerPreferredReplicaElectionListener() {
zkClient.subscribeDataChanges(ZkUtils.PreferredReplicaLeaderElectionPath, new PreferredReplicaElectionListener(this))
}
private def readControllerEpochFromZookeeper() {
// initialize the controller epoch and zk version by reading from zookeeper
if(ZkUtils.pathExists(controllerContext.zkClient, ZkUtils.ControllerEpochPath)) {
val epochData = ZkUtils.readData(controllerContext.zkClient, ZkUtils.ControllerEpochPath)
controllerContext.epoch = epochData._1.toInt
controllerContext.epochZkVersion = epochData._2.getVersion
info("Initialized controller epoch to %d and zk version %d".format(controllerContext.epoch, controllerContext.epochZkVersion))
}
}
def removePartitionFromReassignedPartitions(topicAndPartition: TopicAndPartition) {
if(controllerContext.partitionsBeingReassigned.get(topicAndPartition).isDefined) {
// stop watching the ISR changes for this partition
zkClient.unsubscribeDataChanges(ZkUtils.getTopicPartitionLeaderAndIsrPath(topicAndPartition.topic, topicAndPartition.partition),
controllerContext.partitionsBeingReassigned(topicAndPartition).isrChangeListener)
}
// read the current list of reassigned partitions from zookeeper
val partitionsBeingReassigned = ZkUtils.getPartitionsBeingReassigned(zkClient)
// remove this partition from that list
val updatedPartitionsBeingReassigned = partitionsBeingReassigned - topicAndPartition
// write the new list to zookeeper
ZkUtils.updatePartitionReassignmentData(zkClient, updatedPartitionsBeingReassigned.mapValues(_.newReplicas))
// update the cache. NO-OP if the partition's reassignment was never started
controllerContext.partitionsBeingReassigned.remove(topicAndPartition)
}
def updateAssignedReplicasForPartition(topicAndPartition: TopicAndPartition,
newReplicaAssignmentForTopic: Map[TopicAndPartition, Seq[Int]]) {
try {
val zkPath = ZkUtils.getTopicPath(topicAndPartition.topic)
val jsonPartitionMap = ZkUtils.replicaAssignmentZkData(newReplicaAssignmentForTopic.map(e => (e._1.partition.toString -> e._2)))
ZkUtils.updatePersistentPath(zkClient, zkPath, jsonPartitionMap)
debug("Updated path %s with %s for replica assignment".format(zkPath, jsonPartitionMap))
} catch {
case e: ZkNoNodeException => throw new IllegalStateException("Topic %s doesn't exist".format(topicAndPartition.topic))
case e2: Throwable => throw new KafkaException(e2.toString)
}
}
def removePartitionsFromPreferredReplicaElection(partitionsToBeRemoved: Set[TopicAndPartition],
isTriggeredByAutoRebalance : Boolean) {
for(partition <- partitionsToBeRemoved) {
// check the status
val currentLeader = controllerContext.partitionLeadershipInfo(partition).leaderAndIsr.leader
val preferredReplica = controllerContext.partitionReplicaAssignment(partition).head
if(currentLeader == preferredReplica) {
info("Partition %s completed preferred replica leader election. New leader is %d".format(partition, preferredReplica))
} else {
warn("Partition %s failed to complete preferred replica leader election. Leader is %d".format(partition, currentLeader))
}
}
if (!isTriggeredByAutoRebalance)
ZkUtils.deletePath(zkClient, ZkUtils.PreferredReplicaLeaderElectionPath)
controllerContext.partitionsUndergoingPreferredReplicaElection --= partitionsToBeRemoved
}
/**
* Send the leader information for selected partitions to selected brokers so that they can correctly respond to
* metadata requests
* @param brokers The brokers that the update metadata request should be sent to
*/
def sendUpdateMetadataRequest(brokers: Seq[Int], partitions: Set[TopicAndPartition] = Set.empty[TopicAndPartition]) {
brokerRequestBatch.newBatch()
brokerRequestBatch.addUpdateMetadataRequestForBrokers(brokers, partitions)
brokerRequestBatch.sendRequestsToBrokers(epoch, controllerContext.correlationId.getAndIncrement)
}
/**
* Removes a given partition replica from the ISR; if it is not the current
* leader and there are sufficient remaining replicas in ISR.
* @param topic topic
* @param partition partition
* @param replicaId replica Id
* @return the new leaderAndIsr (with the replica removed if it was present),
* or None if leaderAndIsr is empty.
*/
def removeReplicaFromIsr(topic: String, partition: Int, replicaId: Int): Option[LeaderIsrAndControllerEpoch] = {
val topicAndPartition = TopicAndPartition(topic, partition)
debug("Removing replica %d from ISR %s for partition %s.".format(replicaId,
controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.isr.mkString(","), topicAndPartition))
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
val leaderIsrAndEpochOpt = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition)
zkWriteCompleteOrUnnecessary = leaderIsrAndEpochOpt match {
case Some(leaderIsrAndEpoch) => // increment the leader epoch even if the ISR changes
val leaderAndIsr = leaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
if (leaderAndIsr.isr.contains(replicaId)) {
// if the replica to be removed from the ISR is also the leader, set the new leader value to -1
val newLeader = if (replicaId == leaderAndIsr.leader) LeaderAndIsr.NoLeader else leaderAndIsr.leader
var newIsr = leaderAndIsr.isr.filter(b => b != replicaId)
// if the replica to be removed from the ISR is the last surviving member of the ISR and unclean leader election
// is disallowed for the corresponding topic, then we must preserve the ISR membership so that the replica can
// eventually be restored as the leader.
if (newIsr.isEmpty && !LogConfig.fromProps(config.props.props, AdminUtils.fetchTopicConfig(zkClient,
topicAndPartition.topic)).uncleanLeaderElectionEnable) {
info("Retaining last ISR %d of partition %s since unclean leader election is disabled".format(replicaId, topicAndPartition))
newIsr = leaderAndIsr.isr
}
val newLeaderAndIsr = new LeaderAndIsr(newLeader, leaderAndIsr.leaderEpoch + 1,
newIsr, leaderAndIsr.zkVersion + 1)
// update the new leadership decision in zookeeper or retry
val (updateSucceeded, newVersion) = ReplicationUtils.updateLeaderAndIsr(zkClient, topic, partition,
newLeaderAndIsr, epoch, leaderAndIsr.zkVersion)
newLeaderAndIsr.zkVersion = newVersion
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(newLeaderAndIsr, epoch))
controllerContext.partitionLeadershipInfo.put(topicAndPartition, finalLeaderIsrAndControllerEpoch.get)
if (updateSucceeded)
info("New leader and ISR for partition %s is %s".format(topicAndPartition, newLeaderAndIsr.toString()))
updateSucceeded
} else {
warn("Cannot remove replica %d from ISR of partition %s since it is not in the ISR. Leader = %d ; ISR = %s"
.format(replicaId, topicAndPartition, leaderAndIsr.leader, leaderAndIsr.isr))
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(leaderAndIsr, epoch))
controllerContext.partitionLeadershipInfo.put(topicAndPartition, finalLeaderIsrAndControllerEpoch.get)
true
}
case None =>
warn("Cannot remove replica %d from ISR of %s - leaderAndIsr is empty.".format(replicaId, topicAndPartition))
true
}
}
finalLeaderIsrAndControllerEpoch
}
/**
* Does not change leader or isr, but just increments the leader epoch
* @param topic topic
* @param partition partition
* @return the new leaderAndIsr with an incremented leader epoch, or None if leaderAndIsr is empty.
*/
private def updateLeaderEpoch(topic: String, partition: Int): Option[LeaderIsrAndControllerEpoch] = {
val topicAndPartition = TopicAndPartition(topic, partition)
debug("Updating leader epoch for partition %s.".format(topicAndPartition))
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
val leaderIsrAndEpochOpt = ReplicationUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition)
zkWriteCompleteOrUnnecessary = leaderIsrAndEpochOpt match {
case Some(leaderIsrAndEpoch) =>
val leaderAndIsr = leaderIsrAndEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndEpoch.controllerEpoch
if(controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably" +
"means the current controller with epoch %d went through a soft failure and another ".format(epoch) +
"controller was elected with epoch %d. Aborting state change by this controller".format(controllerEpoch))
// increment the leader epoch even if there are no leader or isr changes to allow the leader to cache the expanded
// assigned replica list
val newLeaderAndIsr = new LeaderAndIsr(leaderAndIsr.leader, leaderAndIsr.leaderEpoch + 1,
leaderAndIsr.isr, leaderAndIsr.zkVersion + 1)
// update the new leadership decision in zookeeper or retry
val (updateSucceeded, newVersion) = ReplicationUtils.updateLeaderAndIsr(zkClient, topic,
partition, newLeaderAndIsr, epoch, leaderAndIsr.zkVersion)
newLeaderAndIsr.zkVersion = newVersion
finalLeaderIsrAndControllerEpoch = Some(LeaderIsrAndControllerEpoch(newLeaderAndIsr, epoch))
if (updateSucceeded)
info("Updated leader epoch for partition %s to %d".format(topicAndPartition, newLeaderAndIsr.leaderEpoch))
updateSucceeded
case None =>
throw new IllegalStateException(("Cannot update leader epoch for partition %s as leaderAndIsr path is empty. " +
"This could mean we somehow tried to reassign a partition that doesn't exist").format(topicAndPartition))
true
}
}
finalLeaderIsrAndControllerEpoch
}
class SessionExpirationListener() extends IZkStateListener with Logging {
this.logIdent = "[SessionExpirationListener on " + config.brokerId + "], "
@throws(classOf[Exception])
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleNewSession() {
info("ZK expired; shut down all controller components and try to re-elect")
inLock(controllerContext.controllerLock) {
onControllerResignation()
controllerElector.elect
}
}
}
private def checkAndTriggerPartitionRebalance(): Unit = {
if (isActive()) {
trace("checking need to trigger partition rebalance")
// get all the active brokers
var preferredReplicasForTopicsByBrokers: Map[Int, Map[TopicAndPartition, Seq[Int]]] = null
inLock(controllerContext.controllerLock) {
preferredReplicasForTopicsByBrokers =
controllerContext.partitionReplicaAssignment.filterNot(p => deleteTopicManager.isTopicQueuedUpForDeletion(p._1.topic)).groupBy {
case(topicAndPartition, assignedReplicas) => assignedReplicas.head
}
}
debug("preferred replicas by broker " + preferredReplicasForTopicsByBrokers)
// for each broker, check if a preferred replica election needs to be triggered
preferredReplicasForTopicsByBrokers.foreach {
case(leaderBroker, topicAndPartitionsForBroker) => {
var imbalanceRatio: Double = 0
var topicsNotInPreferredReplica: Map[TopicAndPartition, Seq[Int]] = null
inLock(controllerContext.controllerLock) {
topicsNotInPreferredReplica =
topicAndPartitionsForBroker.filter {
case(topicPartition, replicas) => {
controllerContext.partitionLeadershipInfo.contains(topicPartition) &&
controllerContext.partitionLeadershipInfo(topicPartition).leaderAndIsr.leader != leaderBroker
}
}
debug("topics not in preferred replica " + topicsNotInPreferredReplica)
val totalTopicPartitionsForBroker = topicAndPartitionsForBroker.size
val totalTopicPartitionsNotLedByBroker = topicsNotInPreferredReplica.size
imbalanceRatio = totalTopicPartitionsNotLedByBroker.toDouble / totalTopicPartitionsForBroker
trace("leader imbalance ratio for broker %d is %f".format(leaderBroker, imbalanceRatio))
}
// check ratio and if greater than desired ratio, trigger a rebalance for the topic partitions
// that need to be on this broker
if (imbalanceRatio > (config.leaderImbalancePerBrokerPercentage.toDouble / 100)) {
topicsNotInPreferredReplica.foreach {
case(topicPartition, replicas) => {
inLock(controllerContext.controllerLock) {
// do this check only if the broker is live and there are no partitions being reassigned currently
// and preferred replica election is not in progress
if (controllerContext.liveBrokerIds.contains(leaderBroker) &&
controllerContext.partitionsBeingReassigned.size == 0 &&
controllerContext.partitionsUndergoingPreferredReplicaElection.size == 0 &&
!deleteTopicManager.isTopicQueuedUpForDeletion(topicPartition.topic) &&
controllerContext.allTopics.contains(topicPartition.topic)) {
onPreferredReplicaElection(Set(topicPartition), true)
}
}
}
}
}
}
}
}
}
}
/**
* Starts the partition reassignment process unless -
* 1. Partition previously existed
* 2. New replicas are the same as existing replicas
* 3. Any replica in the new set of replicas are dead
* If any of the above conditions are satisfied, it logs an error and removes the partition from list of reassigned
* partitions.
*/
class PartitionsReassignedListener(controller: KafkaController) extends IZkDataListener with Logging {
this.logIdent = "[PartitionsReassignedListener on " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions are reassigned by the admin command
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
debug("Partitions reassigned listener fired for path %s. Record partitions to be reassigned %s"
.format(dataPath, data))
val partitionsReassignmentData = ZkUtils.parsePartitionReassignmentData(data.toString)
val partitionsToBeReassigned = inLock(controllerContext.controllerLock) {
partitionsReassignmentData.filterNot(p => controllerContext.partitionsBeingReassigned.contains(p._1))
}
partitionsToBeReassigned.foreach { partitionToBeReassigned =>
inLock(controllerContext.controllerLock) {
if(controller.deleteTopicManager.isTopicQueuedUpForDeletion(partitionToBeReassigned._1.topic)) {
error("Skipping reassignment of partition %s for topic %s since it is currently being deleted"
.format(partitionToBeReassigned._1, partitionToBeReassigned._1.topic))
controller.removePartitionFromReassignedPartitions(partitionToBeReassigned._1)
} else {
val context = new ReassignedPartitionsContext(partitionToBeReassigned._2)
controller.initiateReassignReplicasForTopicPartition(partitionToBeReassigned._1, context)
}
}
}
}
/**
* Called when the leader information stored in zookeeper has been delete. Try to elect as the leader
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
class ReassignedPartitionsIsrChangeListener(controller: KafkaController, topic: String, partition: Int,
reassignedReplicas: Set[Int])
extends IZkDataListener with Logging {
this.logIdent = "[ReassignedPartitionsIsrChangeListener on controller " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions need to move leader to preferred replica
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
inLock(controllerContext.controllerLock) {
debug("Reassigned partitions isr change listener fired for path %s with children %s".format(dataPath, data))
val topicAndPartition = TopicAndPartition(topic, partition)
try {
// check if this partition is still being reassigned or not
controllerContext.partitionsBeingReassigned.get(topicAndPartition) match {
case Some(reassignedPartitionContext) =>
// need to re-read leader and isr from zookeeper since the zkclient callback doesn't return the Stat object
val newLeaderAndIsrOpt = ZkUtils.getLeaderAndIsrForPartition(zkClient, topic, partition)
newLeaderAndIsrOpt match {
case Some(leaderAndIsr) => // check if new replicas have joined ISR
val caughtUpReplicas = reassignedReplicas & leaderAndIsr.isr.toSet
if(caughtUpReplicas == reassignedReplicas) {
// resume the partition reassignment process
info("%d/%d replicas have caught up with the leader for partition %s being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topicAndPartition) +
"Resuming partition reassignment")
controller.onPartitionReassignment(topicAndPartition, reassignedPartitionContext)
}
else {
info("%d/%d replicas have caught up with the leader for partition %s being reassigned."
.format(caughtUpReplicas.size, reassignedReplicas.size, topicAndPartition) +
"Replica(s) %s still need to catch up".format((reassignedReplicas -- leaderAndIsr.isr.toSet).mkString(",")))
}
case None => error("Error handling reassignment of partition %s to replicas %s as it was never created"
.format(topicAndPartition, reassignedReplicas.mkString(",")))
}
case None =>
}
} catch {
case e: Throwable => error("Error while handling partition reassignment", e)
}
}
}
/**
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
/**
* Starts the preferred replica leader election for the list of partitions specified under
* /admin/preferred_replica_election -
*/
class PreferredReplicaElectionListener(controller: KafkaController) extends IZkDataListener with Logging {
this.logIdent = "[PreferredReplicaElectionListener on " + controller.config.brokerId + "]: "
val zkClient = controller.controllerContext.zkClient
val controllerContext = controller.controllerContext
/**
* Invoked when some partitions are reassigned by the admin command
* @throws Exception On any error.
*/
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
debug("Preferred replica election listener fired for path %s. Record partitions to undergo preferred replica election %s"
.format(dataPath, data.toString))
inLock(controllerContext.controllerLock) {
val partitionsForPreferredReplicaElection = PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(data.toString)
if(controllerContext.partitionsUndergoingPreferredReplicaElection.size > 0)
info("These partitions are already undergoing preferred replica election: %s"
.format(controllerContext.partitionsUndergoingPreferredReplicaElection.mkString(",")))
val partitions = partitionsForPreferredReplicaElection -- controllerContext.partitionsUndergoingPreferredReplicaElection
val partitionsForTopicsToBeDeleted = partitions.filter(p => controller.deleteTopicManager.isTopicQueuedUpForDeletion(p.topic))
if(partitionsForTopicsToBeDeleted.size > 0) {
error("Skipping preferred replica election for partitions %s since the respective topics are being deleted"
.format(partitionsForTopicsToBeDeleted))
}
else
controller.onPreferredReplicaElection(partitions -- partitionsForTopicsToBeDeleted)
}
}
/**
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
}
}
case class ReassignedPartitionsContext(var newReplicas: Seq[Int] = Seq.empty,
var isrChangeListener: ReassignedPartitionsIsrChangeListener = null)
case class PartitionAndReplica(topic: String, partition: Int, replica: Int) {
override def toString(): String = {
"[Topic=%s,Partition=%d,Replica=%d]".format(topic, partition, replica)
}
}
case class LeaderIsrAndControllerEpoch(val leaderAndIsr: LeaderAndIsr, controllerEpoch: Int) {
override def toString(): String = {
val leaderAndIsrInfo = new StringBuilder
leaderAndIsrInfo.append("(Leader:" + leaderAndIsr.leader)
leaderAndIsrInfo.append(",ISR:" + leaderAndIsr.isr.mkString(","))
leaderAndIsrInfo.append(",LeaderEpoch:" + leaderAndIsr.leaderEpoch)
leaderAndIsrInfo.append(",ControllerEpoch:" + controllerEpoch + ")")
leaderAndIsrInfo.toString()
}
}
object ControllerStats extends KafkaMetricsGroup {
val uncleanLeaderElectionRate = newMeter("UncleanLeaderElectionsPerSec", "elections", TimeUnit.SECONDS)
val leaderElectionTimer = new KafkaTimer(newTimer("LeaderElectionRateAndTimeMs", TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
}
| jkreps/kafka | core/src/main/scala/kafka/controller/KafkaController.scala | Scala | apache-2.0 | 73,490 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models.User
object Application extends Controller with Secured{
case class UserSignup(email: String, password: String, passwordConfirm: String)
val userSignUpForm = Form(
mapping(
"email" -> email,
"password" -> nonEmptyText,
"passwordConfirm" -> nonEmptyText)(UserSignup.apply)(UserSignup.unapply)
verifying ("Passwords must match", f => f.password == f.passwordConfirm)
verifying ("Email already exist", f => User.getByEmail(f.email) match {
case None => true
case Some(u) => false
}))
def index = Action {
implicit request =>
Ok(views.html.index("Welcome to Open Innovation.", GetLogedUser(request)))
}
def signup = Action {
Ok(views.html.signup(userSignUpForm))
}
def newUser = Action { implicit request =>
userSignUpForm.bindFromRequest.fold(
errors => BadRequest(views.html.signup(errors)),
userSubmit => {
models.User.add(userSubmit.email, userSubmit.password) match {
case None => Ok(views.html.index("Error: User not added", None))
case Some(user) => Redirect(routes.Application.index).withSession("email" -> user.email)
}
})
}
// -- Authentication
val loginForm = Form(
tuple(
"email" -> text,
"password" -> text) verifying ("Invalid email or password", result => result match {
case (email, password) => User.authenticate(email, password).isDefined
}))
/**
* Login page.
*/
def login = Action { implicit request =>
Ok(views.html.login(loginForm))
}
/**
* Handle login form submission.
*/
def authenticate = Action { implicit request =>
loginForm.bindFromRequest.fold(
formWithErrors => BadRequest(views.html.login(formWithErrors)),
user => Redirect(routes.Application.index).withSession("email" -> user._1))
}
/**
* Logout and clean the session.
*/
def logout = Action {
Redirect(routes.Application.login).withNewSession.flashing(
"success" -> "You've been logged out")
}
}
trait Secured {
/**
* Retrieve the connected user email.
*/
private def username(request: RequestHeader) = request.session.get("email")
/**
* Redirect to login if the user in not authorized.
*/
private def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.Application.login)
// --
/**
* Action for authenticated users.
*/
def IsAuthenticated(f: => String => Request[AnyContent] => Result) = Security.Authenticated(username, onUnauthorized) { user =>
Action(request => f(user)(request))
}
def GetLogedUser(request: Request[AnyContent]): Option[User] = {
request.session.get("email").map { email =>
User.getByEmail(email)
}.getOrElse {
None
}
}
}
| fbozoglilanian/openinnovation | app/controllers/Application.scala | Scala | mit | 2,904 |
/*
* Copyright 2016 Michal Harish, michal.harish@gmail.com
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.core.serde
import akka.actor.{Actor, ActorSystem, Props}
import akka.serialization.SerializationExtension
import com.typesafe.config.{ConfigFactory, ConfigValueFactory}
import io.amient.affinity.avro.MemorySchemaRegistry
import io.amient.affinity.avro.record.AvroRecord
import io.amient.affinity.core.actor.{CreateKeyValueMediator, KeyValueMediatorCreated, RegisterMediatorSubscriber, Routed}
import io.amient.affinity.core.serde.collection.SeqSerde
import io.amient.affinity.core.serde.primitive.OptionSerde
import io.amient.affinity.core.util.{Reply, Scatter}
import io.amient.affinity.{AffinityActorSystem, Conf}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.collection.immutable.Seq
case class Key(key: Int) extends AvroRecord with Routed with Reply[Option[TestValue]]
case class NonAvroCase(key: Int)
case class TestValue(items: List[Int]) extends AvroRecord {
def withAddedItem(item: Int) = TestValue(items :+ item)
def withRemovedItem(item: Int) = TestValue(items.filter(_ != item))
}
case class CountMsgAvro() extends AvroRecord with Scatter[Long] {
override def gather(r1: Long, r2: Long): Long = r1 + r2
}
class RefActor extends Actor {
override def receive: Receive = {
case _ => ()
}
}
class AkkaSerializationSpec extends WordSpecLike with BeforeAndAfterAll with Matchers {
val registry = new MemorySchemaRegistry()
val system: ActorSystem = AffinityActorSystem.create(ConfigFactory.load("akkaserializationspec"))
val ref = system.actorOf(Props[RefActor])
override protected def afterAll(): Unit = system.terminate()
"Akka-serialized AvroRecord bytes" must {
"be identical to AvroSerde bytes - this is important for murmur2 hash partitioner" in {
val in = Key(1)
val bytes = SerializationExtension(system).serialize(in).get
val bytes2 = registry.toBytes(in)
bytes.mkString(".") should be(bytes2.mkString("."))
}
}
"Akka-serialized String bytes" must {
"be identical to AvroSerde bytes - this is important for murmur2 hash partitioner" in {
val in = "test-string"
val bytes = SerializationExtension(system).serialize(in).get
val bytes2 = registry.toBytes(in)
bytes.mkString(".") should be(bytes2.mkString("."))
}
}
"TupleSerde" must {
"work with wrapped tuple3" in {
val in = (1000, 1.1, "graph")
val bytes = SerializationExtension(system).serialize(in).get
val out = SerializationExtension(system).deserialize(bytes, classOf[Tuple3[Int, Double, String]]).get
out should be(in)
}
}
"Java serializer" must {
"work with internal messages" in {
val in = NonAvroCase(123)
val bytes = SerializationExtension(system).serialize(in).get
SerializationExtension(system).deserialize(bytes, classOf[NonAvroCase]).get should be (in)
}
}
"OptionSerde" must {
val serde = SerializationExtension(system).serializerOf(classOf[OptionSerde].getName).get
"work with with None" in {
val bytes = serde.toBinary(None)
bytes.length should equal(0)
serde.fromBinary(bytes) should be(None)
}
"work with with wrapped string" in {
val stringSerde = SerializationExtension(system).serializerFor(classOf[String])
val string = stringSerde.toBinary("XYZ")
string.mkString(".") should equal("0.0.0.0.6.6.88.89.90")
stringSerde.fromBinary(string) should be("XYZ")
val bytes = serde.toBinary(Some("XYZ"))
bytes.mkString(".") should equal("0.0.0.-56.0.0.0.0.6.6.88.89.90")
serde.fromBinary(bytes) should be(Some("XYZ"))
}
"work with wrapped unit" in {
val bytes = serde.toBinary(Some(()))
bytes.mkString(".") should equal("0.0.0.100")
serde.fromBinary(bytes) should be(Some(()))
}
"work with wrapped tuple" in {
val bytes = serde.toBinary(Some(("XYZ", 10)))
bytes.mkString(".") should equal("0.0.0.-124.0.0.0.2.0.0.0.13.0.0.0.-56.0.0.0.0.6.6.88.89.90.0.0.0.10.0.0.0.-56.0.0.0.0.2.20")
serde.fromBinary(bytes) should be(Some(("XYZ", 10)))
}
}
"List" must {
"serialize correctly when elements are AvroRecords" in {
val x: Seq[Key] = List(Key(1), Key(2), Key(3))
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
val z: Seq[Key] = SerializationExtension(system).deserialize(y, classOf[List[Key]]).get
z should be(x)
}
"can be constructed without actor system context" in {
Serde.of[List[Long]](ConfigFactory.empty.withValue(
Conf.Affi.Avro.Class.path, ConfigValueFactory.fromAnyRef(classOf[MemorySchemaRegistry].getName)))
.isInstanceOf[SeqSerde] should be(true)
}
"serialize correctly when elements are simple case classes" in {
assert(classOf[java.io.Serializable].isAssignableFrom(classOf[NonAvroCase]))
val x: List[NonAvroCase] = NonAvroCase(3) :: List(NonAvroCase(1), NonAvroCase(2))
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
val z: Seq[Key] = SerializationExtension(system).deserialize(y, classOf[List[Key]]).get
z should be(x)
}
}
"Set" must {
"serialize correctly when elements are AvroRecords" in {
val x: Set[Key] = Set(Key(1), Key(2), Key(3))
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
val z: Set[Key] = SerializationExtension(system).deserialize(y, classOf[Set[Key]]).get
z should be(x)
}
}
"Parameterless Scatter message with AvroRecord" must {
"be serializable" in {
val x = CountMsgAvro()
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
val z = SerializationExtension(system).deserialize(y, classOf[CountMsgAvro]).get
z should be(x)
}
}
"Internal message" must {
"serialize efficiently CreateKeyValueMediator message" in {
val x = CreateKeyValueMediator("hello", 1000)
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
y.mkString(".") should be ("1.0.5.104.101.108.108.111.0.11.0.0.0.-56.0.0.0.0.2.-48.15")
val z = SerializationExtension(system).deserialize(y, classOf[CreateKeyValueMediator]).get
z should be(x)
}
"serialize efficiently KeyValueMediatorCreated message" in {
val x = KeyValueMediatorCreated(ref)
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
y.mkString(".") should startWith ("2.")
val z = SerializationExtension(system).deserialize(y, classOf[KeyValueMediatorCreated]).get
z should be(x)
}
"serialize efficiently RegisterMediatorSubscriber message" in {
val x = RegisterMediatorSubscriber(ref)
val y: Array[Byte] = SerializationExtension(system).serialize(x).get
y.mkString(".") should startWith ("3.")
val z = SerializationExtension(system).deserialize(y, classOf[RegisterMediatorSubscriber]).get
z should be(x)
}
}
}
| amient/affinity | core/src/test/scala/io/amient/affinity/core/serde/AkkaSerializationSpec.scala | Scala | apache-2.0 | 7,803 |
package ui.user
import javax.swing.JFrame
import main.MuseCharRenderer
import utilities.{MuseCharMapLoader, RNG}
/**
* Created by weijiayi on 3/10/16.
*/
object RenderTest {
def main(args: Array[String]) {
val result = renderText()
val dotsPerUnit = 50.0
val pixelPerUnit = 14
val screenFactor = 2
val parameters = new PaintableResult(result, dotsPerUnit, pixelPerUnit, edgeSpace = EdgeSpace(2,2,4,2), screenPixelFactor = screenFactor, thicknessScale = 1.8)
val p = parameters.showInAnimation(penSpeed = 40, frameRate = 60, shouldRun = true)
// val p = parameters.showInScrollPane()
new JFrame("Rendering Result"){
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
setContentPane(p)
pack()
setVisible(true)
}
}
def renderText() = {
val renderer = new MuseCharRenderer(letterSpacing = 0.0, spaceWidth = 0.8, symbolFrontSpace = 0.2)
val letterMap = MuseCharMapLoader.loadDefaultCharMap()
// val text = "None of this had even a hope of any practical application in my life. But ten years later, when we were designing the first Macintosh computer, it all came back to me. And we designed it all into the Mac. It was the first computer with beautiful typography. If I had never dropped in on that single course in college, the Mac would have never had multiple typefaces or proportionally spaced fonts. And since Windows just copied the Mac, its likely that no personal computer would have them. If I had never dropped out, I would have never dropped in on this calligraphy class, and personal computers might not have the wonderful typography that they do. Of course it was impossible to connect the dots looking forward when I was in college. But it was very, very clear looking backwards ten years later."
val text = "Thousands cities from home, wander into the unknown. Chances are here I was told, Crossing the footsteps of new and of old"
renderer.renderTextInParallel(letterMap, lean = 0.3, maxLineWidth = 30, breakWordThreshold = 5,
lineSpacing = 4, randomness = 0.04, lineRandomness = 0.1)(text)(RNG(1))._1
}
}
| MrVPlusOne/Muse-CGH | src/ui/user/RenderTest.scala | Scala | mit | 2,126 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class SimulationStructure extends Simulation {
val httpConf = http
//#headers
val headers_10 = Map("Content-Type" -> """application/x-www-form-urlencoded""")
//#headers
//#scenario-definition
val scn = scenario("ScenarioName") // etc...
//#scenario-definition
//#http-request-sample
// Here's an example of a POST request
http("request_10")
.post("/computers")
.headers(headers_10)
.formParam("name", "Beautiful Computer")
.formParam("introduced", "2012-05-30")
.formParam("discontinued", "")
.formParam("company", "37")
//#http-request-sample
//#setUp
setUp(
scn.inject(atOnceUsers(1)) // (1)
.protocols(httpConf) // (2)
)
//#setUp
//#hooks
before {
println("Simulation is about to start!")
}
after {
println("Simulation is finished!")
}
//#hooks
}
| timve/gatling | src/sphinx/general/code/SimulationStructure.scala | Scala | apache-2.0 | 1,524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.functions.timestamps.AscendingTimestampExtractor
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.StreamingWithMiniBatchTestBase.{MiniBatchMode, MiniBatchOn}
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils._
import org.apache.flink.table.utils.LegacyRowResource
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.{Assume, Rule, Test}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.mutable
import scala.collection.JavaConversions._
@RunWith(classOf[Parameterized])
class DeduplicateITCase(miniBatch: MiniBatchMode, mode: StateBackendMode)
extends StreamingWithMiniBatchTestBase(miniBatch, mode) {
@Rule
def usesLegacyRows: LegacyRowResource = LegacyRowResource.INSTANCE
lazy val rowtimeTestData = new mutable.MutableList[(Int, Long, String)]
rowtimeTestData.+=((1, 1L, "Hi"))
rowtimeTestData.+=((1, 3L, "Hello"))
rowtimeTestData.+=((1, 2L, "Hello world"))
rowtimeTestData.+=((2, 3L, "I am fine."))
rowtimeTestData.+=((2, 6L, "Comment#1"))
rowtimeTestData.+=((3, 5L, "Comment#2"))
rowtimeTestData.+=((3, 4L, "Comment#2"))
rowtimeTestData.+=((4, 4L, "Comment#3"))
@Test
def testFirstRowOnProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3)
.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "2,2,Hello", "4,3,Hello world, how are you?",
"7,4,Comment#1", "11,5,Comment#5", "16,6,Comment#10")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testFirstRowOnBuiltinProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime()) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "2,2,Hello", "4,3,Hello world, how are you?",
"7,4,Comment#1", "11,5,Comment#5", "16,6,Comment#10")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testLastRowOnProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3)
.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime DESC) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "3,2,Hello world", "6,3,Luke Skywalker",
"10,4,Comment#4", "15,5,Comment#9", "21,6,Comment#15")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testLastRowOnBuiltinProctime(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, b, c
|FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY proctime() DESC) as rowNum
| FROM T
|)
|WHERE rowNum = 1
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,Hi", "3,2,Hello world", "6,3,Luke Skywalker",
"10,4,Comment#4", "15,5,Comment#9", "21,6,Comment#15")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testFirstRowOnRowtime(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(3,5,Comment#2,1970-01-01T00:00:00.005)",
"-U(3,5,Comment#2,1970-01-01T00:00:00.005)",
"+U(3,4,Comment#2,1970-01-01T00:00:00.004)",
"+I(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testFirstRowWithoutAllChangelogOnRowtime(): Unit = {
Assume.assumeTrue("Without all change log only for minibatch.", miniBatch == MiniBatchOn)
tEnv.getConfig.set(
ExecutionConfigOptions.TABLE_EXEC_DEDUPLICATE_MINIBATCH_COMPACT_CHANGES_ENABLED,
Boolean.box(true))
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(3,4,Comment#2,1970-01-01T00:00:00.004)",
"+I(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testFirstRowOnRowTimeFollowedByUnboundedAgg(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
tEnv.executeSql(
s"""
|CREATE TABLE rowtime_sink (
| cnt BIGINT
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
val sql =
"""
|INSERT INTO rowtime_sink
|SELECT COUNT(b) FROM (
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime) as rowNum
| FROM T
| )
| WHERE rowNum = 1
| )
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getResults("rowtime_sink")
val expected = List("6")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testLastRowOnRowtime(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime DESC) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(1,3,Hello,1970-01-01T00:00:00.003)",
"+I(1,2,Hello world,1970-01-01T00:00:00.002)",
"-U(1,3,Hello,1970-01-01T00:00:00.003)",
"+U(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(2,6,Comment#1,1970-01-01T00:00:00.006)",
"+I(3,5,Comment#2,1970-01-01T00:00:00.005)",
"+I(3,4,Comment#2,1970-01-01T00:00:00.004)",
"-U(3,4,Comment#2,1970-01-01T00:00:00.004)",
"+U(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testLastRowWithoutAllChangelogOnRowtime(): Unit = {
Assume.assumeTrue("Without all change log only for minibatch.", miniBatch == MiniBatchOn)
tEnv.getConfig.set(
ExecutionConfigOptions.TABLE_EXEC_DEDUPLICATE_MINIBATCH_COMPACT_CHANGES_ENABLED,
Boolean.box(true))
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
createSinkTable("rowtime_sink")
val sql =
"""
|INSERT INTO rowtime_sink
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime DESC) as rowNum
| FROM T
| )
| WHERE rowNum = 1
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getRawResults("rowtime_sink")
val expected = List(
"+I(1,1,Hi,1970-01-01T00:00:00.001)",
"+I(1,2,Hello world,1970-01-01T00:00:00.002)",
"+I(2,3,I am fine.,1970-01-01T00:00:00.003)",
"+I(2,6,Comment#1,1970-01-01T00:00:00.006)",
"+I(3,5,Comment#2,1970-01-01T00:00:00.005)",
"+I(4,4,Comment#3,1970-01-01T00:00:00.004)")
assertEquals(expected.sorted, rawResult.sorted)
}
@Test
def testLastRowOnRowTimeFollowedByUnboundedAgg(): Unit = {
val t = env.fromCollection(rowtimeTestData)
.assignTimestampsAndWatermarks(new RowtimeExtractor)
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime())
tEnv.registerTable("T", t)
tEnv.executeSql(
s"""
|CREATE TABLE rowtime_sink (
| cnt BIGINT
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
val sql =
"""
|INSERT INTO rowtime_sink
|SELECT COUNT(b) FROM (
| SELECT a, b, c, rowtime
| FROM (
| SELECT *,
| ROW_NUMBER() OVER (PARTITION BY b ORDER BY rowtime DESC) as rowNum
| FROM T
| )
| WHERE rowNum = 1
| )
""".stripMargin
tEnv.executeSql(sql).await()
val rawResult = TestValuesTableFactory.getResults("rowtime_sink")
val expected = List("6")
assertEquals(expected.sorted, rawResult.sorted)
}
def createSinkTable(tableName: String): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE $tableName (
| a INT,
| b BIGINT,
| c STRING,
| rowtime TIMESTAMP(3)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'false',
| 'changelog-mode' = 'I,UA,D'
|)
|""".stripMargin)
}
}
class RowtimeExtractor extends AscendingTimestampExtractor[(Int, Long, String)] {
override def extractAscendingTimestamp(element: (Int, Long, String)): Long = element._2
}
| xccui/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/DeduplicateITCase.scala | Scala | apache-2.0 | 12,890 |
package com.wavesplatform.protobuf.utils
import cats.syntax.applicativeError._
import com.google.protobuf.CodedOutputStream
import com.wavesplatform.common.state.ByteStr
import scalapb.{GeneratedMessage, GeneratedMessageCompanion}
import scala.util.control.NonFatal
object PBUtils {
def encodeDeterministic(msg: GeneratedMessage): Array[Byte] = {
val outArray = new Array[Byte](msg.serializedSize)
val outputStream = CodedOutputStream.newInstance(outArray)
outputStream.useDeterministicSerialization() // Adds this
msg.writeTo(outputStream)
try outputStream.checkNoSpaceLeft()
catch {
case NonFatal(e) =>
throw new RuntimeException(s"Error serializing PB message: $msg (bytes = ${ByteStr(msg.toByteArray)})", e)
}
outArray
}
def decode[A <: GeneratedMessage](msg: Array[Byte], cmp: GeneratedMessageCompanion[A]): Either[Throwable, A] =
cmp
.validate(msg)
.toEither
.adaptErr {
case err => new RuntimeException(s"Error deserializing PB message: $cmp (bytes = ${ByteStr(msg)})", err)
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/protobuf/utils/PBUtils.scala | Scala | mit | 1,085 |
package com.softwaremill.mqperf
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import java.util.concurrent.{ExecutorService, Executors}
import akka.dispatch.ExecutionContexts
import com.softwaremill.mqperf.mq.Mq
import com.softwaremill.mqperf.util.FakeClock
import io.prometheus.client.{Counter, Gauge, Histogram}
import org.joda.time.DateTime
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers, Suite}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
class ReceiverRunnableTest extends FlatSpec with ReceiverTestKit {
behavior of "ReceiverRunnable"
it should "wait for idle timeout before stop" in new ReceiverTestCase {
receiverShouldBeRunning
queueShouldBeEmpty
sendMsgAndWaitForReceive()
receiverShouldBeRunning
// Almost max idle time
addClockTicks(receiverTimeout - 1.nano)
receiverShouldBeRunning
// Exceed the idle timeout
addClockTicks(1.nano)
receiverShouldBeStopped
}
it should "reset idle time after receiving any message" in new ReceiverTestCase {
receiverShouldBeRunning
queueShouldBeEmpty
sendMsgAndWaitForReceive()
receiverShouldBeRunning
// Almost max idle time
addClockTicks(receiverTimeout - 1.nano)
receiverShouldBeRunning
sendMsgAndWaitForReceive()
receiverShouldBeRunning
// Almost max idle time
addClockTicks(receiverTimeout - 1.nano)
receiverShouldBeRunning
// Exceed the idle timeout
addClockTicks(1.nano)
receiverShouldBeStopped
}
it should "not timeout before receiving any message" in new ReceiverTestCase {
receiverShouldBeRunning
queueShouldBeEmpty
// Exceed the idle timeout
addClockTicks(receiverTimeout * 10)
receiverShouldBeRunning
queueShouldBeEmpty
sendMsgAndWaitForReceive()
receiverShouldBeRunning
// Exceed the idle timeout
addClockTicks(receiverTimeout)
receiverShouldBeStopped
}
}
trait ReceiverTestKit extends Matchers with Eventually with BeforeAndAfterAll {
self: Suite =>
val executor: ExecutorService = Executors.newCachedThreadPool()
implicit val ec: ExecutionContext = ExecutionContexts.fromExecutor(executor)
override protected def afterAll(): Unit = {
super.afterAll()
executor.shutdownNow()
}
trait ReceiverTestCase {
private val fakeMq = new FakeMq
private val fakeClock = new FakeClock
private val receiverRunnable = new ReceiverRunnable(
fakeMq,
"fakeMq",
1,
new DateTime(),
Counter.build("x", "x").create().labels(),
Histogram.build("x", "x").create().labels(),
Gauge.build("x", "x").create().labels(),
fakeClock
)
protected lazy val receiverFut: Future[Unit] =
Future {
receiverRunnable.run()
}
protected val receiverTimeout: FiniteDuration = receiverRunnable.timeout
def addClockTicks(duration: FiniteDuration): Unit = {
fakeClock.add(duration)
}
def queueShouldBeEmpty: Unit = eventually {
fakeMq.isEmpty shouldBe true
}
def receiverShouldBeRunning: Unit = receiverFut.isCompleted shouldBe false
def receiverShouldBeStopped: Unit = eventually {
receiverFut.isCompleted shouldBe true
}
def sendMsgAndWaitForReceive(): Unit = {
val numberOfMsgs = 1
val totalAckBefore = fakeMq.totalAcknowledged
fakeMq.put(numberOfMsgs)
// Be sure that message has been processed and lastReceived won't be updated AFTER we add clock ticks.
eventually {
fakeMq.totalAcknowledged shouldBe >=(totalAckBefore + numberOfMsgs)
}
}
}
}
class FakeMq extends Mq {
import scala.compat.java8.FunctionConverters._
override type MsgId = Long
private val queueSize = new AtomicInteger(0)
private val acknowledged = new AtomicLong(0)
override def createSender(): MqSender = new FakeMqSender
override def createReceiver(): MqReceiver = new FakeMqReceiver
def put(n: Int): Unit = queueSize.accumulateAndGet(n, asJavaIntBinaryOperator(_ + _))
def isEmpty: Boolean = queueSize.get() == 0
def totalAcknowledged: Long = acknowledged.get()
class FakeMqReceiver extends MqReceiver {
override def receive(maxMsgCount: Int): List[(Long, String)] = {
val prev = queueSize.getAndAccumulate(maxMsgCount, asJavaIntBinaryOperator {
(actual, x) => Math.max(actual - x, 0)
})
val numOfReceivedMsgs: Int = if (prev < maxMsgCount) {
prev
}
else {
maxMsgCount
}
List.fill(numOfReceivedMsgs) {
// Has to be parsable to a Long number
val msgBody = System.currentTimeMillis().toString
(1L, msgBody)
}
}
override def ack(ids: List[Long]): Unit = {
acknowledged.accumulateAndGet(ids.size, asJavaLongBinaryOperator(_ + _))
}
}
class FakeMqSender extends MqSender {
override def send(msgs: List[String]): Unit = queueSize.accumulateAndGet(msgs.size, asJavaIntBinaryOperator(_ + _))
}
}
| softwaremill/mqperf | src/test/scala/com/softwaremill/mqperf/ReceiverRunnableTest.scala | Scala | apache-2.0 | 5,055 |
package com.github.libsml.model.rdd
import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD
import scala.reflect.ClassTag
/**
* Created by huangyu on 15/8/28.
*/
class RDDFunctions[T: ClassTag](self: RDD[T]) {
def slaveReduce(f: (T, T) => T, numSlaves: Int): T = {
val depth = 2
var rdd = self
var numPartitions = rdd.partitions.size
if (numSlaves > 0) {
rdd = rdd.coalesce(numSlaves, false)
}
numPartitions = numSlaves
val scale = math.max(math.ceil(math.pow(numPartitions, 1.0 / depth)).toInt, 2)
while (numPartitions > scale + numPartitions / scale) {
numPartitions /= scale
val curNumPartitions = numPartitions
rdd = rdd.mapPartitionsWithIndex { (i, iter) =>
iter.map((i % curNumPartitions, _))
}.reduceByKey(new HashPartitioner(curNumPartitions), f).values
}
rdd.reduce(f)
}
}
object RDDFunctions {
/** Implicit conversion from an RDD to RDDFunctions. */
implicit def fromRDD[T: ClassTag](rdd: RDD[T]) = new RDDFunctions[T](rdd)
}
| libsml/libsml | model/src/main/scala/com/github/libsml/model/rdd/RDDFunctions.scala | Scala | apache-2.0 | 1,047 |
package org.jetbrains.jps.incremental.scala
package local
import java.io.File
import java.net.URLClassLoader
import org.jetbrains.jps.incremental.scala.data.{CompilerData, CompilerJars, SbtData}
import org.jetbrains.jps.incremental.scala.local.CompilerFactoryImpl._
import org.jetbrains.jps.incremental.scala.model.IncrementalityType
import sbt.compiler.{AggressiveCompile, AnalyzingCompiler, IC}
import sbt.inc.AnalysisStore
import sbt.{ClasspathOptions, Path, ScalaInstance}
import xsbti.{F0, Logger}
/**
* @author Pavel Fatin
*/
class CompilerFactoryImpl(sbtData: SbtData) extends CompilerFactory {
def createCompiler(compilerData: CompilerData, client: Client, fileToStore: File => AnalysisStore): Compiler = {
val scalac: Option[AnalyzingCompiler] = getScalac(sbtData, compilerData.compilerJars, client)
compilerData.compilerJars match {
case Some(jars) if jars.dotty.isDefined =>
return new DottyCompiler(createScalaInstance(jars), jars)
case _ =>
}
compilerData.incrementalType match {
case IncrementalityType.SBT =>
val javac = {
val scala = getScalaInstance(compilerData.compilerJars)
.getOrElse(new ScalaInstance("stub", null, new File(""), new File(""), Seq.empty, None))
val classpathOptions = ClasspathOptions.javac(compiler = false)
AggressiveCompile.directOrFork(scala, classpathOptions, compilerData.javaHome)
}
new SbtCompiler(javac, scalac, fileToStore)
case IncrementalityType.IDEA =>
if (scalac.isDefined) new IdeaIncrementalCompiler(scalac.get)
else throw new IllegalStateException("Could not create scalac instance")
}
}
def getScalac(sbtData: SbtData, compilerJars: Option[CompilerJars], client: Client): Option[AnalyzingCompiler] = {
getScalaInstance(compilerJars).map { scala =>
val compiledIntefaceJar = getOrCompileInterfaceJar(sbtData.interfacesHome, sbtData.sourceJar,
sbtData.interfaceJar, scala, sbtData.javaClassVersion, Option(client))
IC.newScalaCompiler(scala, compiledIntefaceJar, ClasspathOptions.javac(compiler = false))
}
}
private def getScalaInstance(compilerJars: Option[CompilerJars]): Option[ScalaInstance] =
compilerJars.map(createScalaInstance)
}
object CompilerFactoryImpl {
private val scalaInstanceCache = new Cache[CompilerJars, ScalaInstance](3)
def createScalaInstance(jars: CompilerJars): ScalaInstance = {
scalaInstanceCache.getOrUpdate(jars) {
val classLoader = {
val urls = Path.toURLs(jars.library +: jars.compiler +: jars.extra)
new URLClassLoader(urls, sbt.classpath.ClasspathUtilities.rootLoader)
}
val version = readScalaVersionIn(classLoader)
new ScalaInstance(version.getOrElse("unknown"), classLoader, jars.library, jars.compiler, jars.extra, version)
}
}
def readScalaVersionIn(classLoader: ClassLoader): Option[String] =
readProperty(classLoader, "compiler.properties", "version.number")
def getOrCompileInterfaceJar(home: File,
sourceJar: File,
interfaceJar: File,
scalaInstance: ScalaInstance,
javaClassVersion: String,
client: Option[Client]): File = {
val scalaVersion = scalaInstance.actualVersion
val interfaceId = "compiler-interface-" + scalaVersion + "-" + javaClassVersion
val targetJar = new File(home, interfaceId + ".jar")
if (!targetJar.exists) {
client.foreach(_.progress("Compiling Scalac " + scalaVersion + " interface"))
home.mkdirs()
IC.compileInterfaceJar(interfaceId, sourceJar, targetJar, interfaceJar, scalaInstance, NullLogger)
}
targetJar
}
}
object NullLogger extends Logger {
def error(p1: F0[String]) {}
def warn(p1: F0[String]) {}
def info(p1: F0[String]) {}
def debug(p1: F0[String]) {}
def trace(p1: F0[Throwable]) {}
} | ilinum/intellij-scala | jps-plugin/src/org/jetbrains/jps/incremental/scala/local/CompilerFactoryImpl.scala | Scala | apache-2.0 | 4,053 |
package org.unisonweb.util
abstract class Block[A] {
/** Create a copy of `len` elements of this `Block`, starting from index `from`. */
def copy(from: Int, len: Int): Block[A]
/** Set the `i` index of this `Block`, extending the backing storage if needed. */
def :+(i: Int, e: A): Block[A]
/** Copy `len` elements of es to the `i` index of this `Block`. */
def :++(i: Int, es: Array[A], len: Int): Block[A]
/** Copy the first `len` elements of this `Block` to the `destIndex` position of `dest`. */
def copyTo(destIndex: Int, dest: Block[A], len: Int): Block[A]
/** The element at the provided index. */
def apply(i: Int): A
/** Create an empty version of this same `Block`, using the same kind of backing storage. */
def empty: Block[A]
/** Convert the first `size` elements of this `Block` to an array. */
def toArray(size: Int): Array[A]
}
object Block {
def viewArray[A](arr: Array[A])(implicit newArray: NewArray[A]): Block[A] = new Block[A] {
def copy(from: Int, len: Int) = {
val arr2 = newArray(len)
Array.copy(arr, from, arr2, 0, arr2.length)
fromArray(arr2)
}
def copyTo(i: Int, b2: Block[A], len: Int) = b2 :++ (i, arr, len)
def apply(i: Int) = arr(i)
def :++(i: Int, src: Array[A], len: Int) = {
val arr2 = if (i + len >= arr.length) {
val arr2 = newArray((arr.length * 2) max (len + i))
Array.copy(arr, 0, arr2, 0, i)
arr2
} else arr
Array.copy(src, 0, arr2, i, len)
if (arr2 eq arr) this else viewArray(arr2)
}
def :+(i: Int, a: A) = {
val arr2 =
if (i >= arr.length) {
val arr2 = newArray((arr.length * 2) max (i + 1))
Array.copy(arr, 0, arr2, 0, arr.length)
arr2
}
else arr
arr2(i) = a
if (arr2 eq arr) this else viewArray(arr2)
}
def empty = fromArray(newArray(16))
def toArray(size: Int) = { val r = newArray(size); Array.copy(arr, 0, r, 0, r.length); r }
}
def fromArray[A](arr: Array[A])(implicit newArray: NewArray[A]): Block[A] =
viewArray(arr.clone)
def empty[A](implicit newArray: NewArray[A]): Block[A] =
viewArray(newArray(16))
abstract class NewArray[A] { def apply(size: Int): Array[A] }
object NewArray extends LowPriorityNewArrays {
implicit val Byte = new NewArray[Byte] { def apply(size: Int) = new Array[Byte](size) }
implicit val Double = new NewArray[Double] { def apply(size: Int) = new Array[Double](size) }
implicit val Long = new NewArray[Long] { def apply(size: Int) = new Array[Long](size) }
implicit val Int = new NewArray[Int] { def apply(size: Int) = new Array[Int](size) }
}
trait LowPriorityNewArrays {
val AnyRef = new NewArray[AnyRef] { def apply(size: Int) = new Array[AnyRef](size) }
implicit def Polymorphic[A]: NewArray[A] = AnyRef.asInstanceOf[NewArray[A]]
}
}
| paulp/unison | runtime-jvm/main/src/main/scala/util/Block.scala | Scala | mit | 2,889 |
/*
* Copyright 2021 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.node.serverless
import fs2.io.Writable
import scala.annotation.nowarn
import scala.scalajs.js
@js.native
@nowarn
trait ServerResponse extends js.Object with Writable {
def writeHead(
statusCode: Int,
statusMessage: String,
headers: js.Dictionary[String],
): ServerResponse = js.native
}
| http4s/http4s | node-serverless/src/main/scala/org/http4s/node/serverless/ServerResponse.scala | Scala | apache-2.0 | 926 |
package spark.examples
import java.util.Random
import scala.math.exp
import spark.util.Vector
import spark._
object SparkLR {
val N = 10000 // Number of data points
val D = 10 // Numer of dimensions
val R = 0.7 // Scaling factor
val ITERATIONS = 5
val rand = new Random(42)
case class DataPoint(x: Vector, y: Double)
def generateData = {
def generatePoint(i: Int) = {
val y = if(i % 2 == 0) -1 else 1
val x = Vector(D, _ => rand.nextGaussian + y * R)
DataPoint(x, y)
}
Array.tabulate(N)(generatePoint)
}
def main(args: Array[String]) {
if (args.length == 0) {
System.err.println("Usage: SparkLR <host> [<slices>]")
System.exit(1)
}
val sc = new SparkContext(args(0), "SparkLR")
val numSlices = if (args.length > 1) args(1).toInt else 2
val points = sc.parallelize(generateData, numSlices).cache()
// Initialize w to a random value
var w = Vector(D, _ => 2 * rand.nextDouble - 1)
println("Initial w: " + w)
for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
val gradient = points.map { p =>
(1 / (1 + exp(-p.y * (w dot p.x))) - 1) * p.y * p.x
}.reduce(_ + _)
w -= gradient
}
println("Final w: " + w)
System.exit(0)
}
}
| ankurdave/arthur | examples/src/main/scala/spark/examples/SparkLR.scala | Scala | bsd-3-clause | 1,279 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.openapi.project.DumbService
import com.intellij.psi.{PsiAnnotation, ResolveResult}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScPrimaryConstructor}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScStableCodeReferenceElementImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScTemplateDefinitionElementType
import org.jetbrains.plugins.scala.lang.resolve.processor.ResolveProcessor
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScAnnotation extends ScalaPsiElement with PsiAnnotation {
/**
* Return full annotation only without @ token.
* @return annotation expression
*/
def annotationExpr: ScAnnotationExpr = findChildByClassScala(classOf[ScAnnotationExpr])
/**
* Return constructor element af annotation expression. For example
* if annotation is <code>@Nullable</code> then method returns <code>
* Nullable</code> psiElement.
* @return constructor element
*/
def constructor: ScConstructor = annotationExpr.constr
def typeElement: ScTypeElement
def isMetaAnnotation: Boolean = {
def hasMetaAnnotation(results: Array[ResolveResult]) = results.map(_.getElement).exists {
case c: ScPrimaryConstructor => c.containingClass.isMetaAnnotatationImpl
case o: ScTypeDefinition => o.isMetaAnnotatationImpl
case _ => false
}
// do not resolve anything while the stubs are building to avoid deadlocks
if (ScTemplateDefinitionElementType.isStubBuilding.get() || DumbService.isDumb(getProject))
return false
constructor.reference.exists {
case stRef: ScStableCodeReferenceElementImpl =>
val processor = new ResolveProcessor(stRef.getKinds(incomplete = false), stRef, stRef.refName)
hasMetaAnnotation(stRef.doResolve(processor))
case _ => false
}
}
@volatile
var strip = false
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScAnnotation.scala | Scala | apache-2.0 | 2,120 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.client
import java.util.concurrent.ConcurrentHashMap
import javax.jms.{ Connection, Destination }
import io.gatling.commons.model.Credentials
import io.gatling.commons.util.Clock
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session._
import io.gatling.core.stats.StatsEngine
import io.gatling.jms.protocol.JmsMessageMatcher
import io.gatling.jms.request._
import akka.actor.ActorSystem
class JmsConnection(
connection: Connection,
val credentials: Option[Credentials],
system: ActorSystem,
statsEngine: StatsEngine,
clock: Clock,
configuration: GatlingConfiguration
) {
private val sessionPool = new JmsSessionPool(connection)
private val staticQueues = new ConcurrentHashMap[String, Destination]
private val staticTopics = new ConcurrentHashMap[String, Destination]
def destination(jmsDestination: JmsDestination): Expression[Destination] = {
val jmsSession = sessionPool.jmsSession()
jmsDestination match {
case JmsDestination.TemporaryQueue => jmsSession.createTemporaryQueue().expressionSuccess
case JmsDestination.TemporaryTopic => jmsSession.createTemporaryTopic().expressionSuccess
case JmsDestination.Queue(name) => name.map(n => staticQueues.computeIfAbsent(n, jmsSession.createQueue _))
case JmsDestination.Topic(name) => name.map(n => staticTopics.computeIfAbsent(n, jmsSession.createTopic _))
}
}
private val producerPool = new JmsProducerPool(sessionPool)
def producer(destination: Destination, deliveryMode: Int): JmsProducer =
producerPool.producer(destination, deliveryMode)
private val trackerPool = new JmsTrackerPool(sessionPool, system, statsEngine, clock, configuration)
def tracker(destination: Destination, selector: Option[String], listenerThreadCount: Int, messageMatcher: JmsMessageMatcher): JmsTracker =
trackerPool.tracker(destination, selector, listenerThreadCount, messageMatcher)
def close(): Unit = {
producerPool.close()
sessionPool.close()
connection.close()
}
}
| gatling/gatling | gatling-jms/src/main/scala/io/gatling/jms/client/JmsConnection.scala | Scala | apache-2.0 | 2,688 |
package scredis
import akka.actor._
import com.typesafe.config.Config
import scredis.commands._
import scredis.io.AkkaNonBlockingConnection
import scredis.util.UniqueNameGenerator
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Defines a `Redis` [[scredis.Client]] supporting all non-blocking commands along with a lazily
* initialized [[scredis.BlockingClient]] and [[scredis.SubscriberClient]].
*
* @define e [[scredis.exceptions.RedisErrorResponseException]]
* @define redis [[scredis.Redis]]
* @define tc com.typesafe.Config
*/
class Redis private[scredis] (
systemOrName: Either[ActorSystem, String],
host: String,
port: Int,
passwordOpt: Option[String],
database: Int,
nameOpt: Option[String],
connectTimeout: FiniteDuration,
receiveTimeoutOpt: Option[FiniteDuration],
maxWriteBatchSize: Int,
tcpSendBufferSizeHint: Int,
tcpReceiveBufferSizeHint: Int,
akkaListenerDispatcherPath: String,
akkaIODispatcherPath: String,
akkaDecoderDispatcherPath: String
) extends AkkaNonBlockingConnection(
system = systemOrName match {
case Left(system) => system
case Right(name) => ActorSystem(UniqueNameGenerator.getUniqueName(name))
},
host = host,
port = port,
passwordOpt = passwordOpt,
database = database,
nameOpt = nameOpt,
connectTimeout = connectTimeout,
receiveTimeoutOpt = receiveTimeoutOpt,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpSendBufferSizeHint,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath,
decodersCount = 2
) with ConnectionCommands
with ServerCommands
with KeyCommands
with StringCommands
with HashCommands
with ListCommands
with SetCommands
with SortedSetCommands
with ScriptingCommands
with HyperLogLogCommands
with PubSubCommands
with TransactionCommands {
private var shouldShutdownBlockingClient = false
private var shouldShutdownSubscriberClient = false
/**
* Lazily initialized [[scredis.BlockingClient]].
*/
lazy val blocking = {
shouldShutdownBlockingClient = true
BlockingClient(
host = host,
port = port,
passwordOpt = getPasswordOpt,
database = getDatabase,
nameOpt = getNameOpt,
connectTimeout = connectTimeout,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
)(system)
}
/**
* Lazily initialized [[scredis.SubscriberClient]].
*/
lazy val subscriber = {
shouldShutdownSubscriberClient = true
SubscriberClient(
host = host,
port = port,
passwordOpt = getPasswordOpt,
nameOpt = getNameOpt,
connectTimeout = connectTimeout,
receiveTimeoutOpt = receiveTimeoutOpt,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
)(system)
}
/**
* Constructs a $redis instance using provided parameters.
*
* @param host server address
* @param port server port
* @param passwordOpt optional server password
* @param database database index to select
* @param nameOpt optional client name (available since 2.6.9)
* @param connectTimeout connection timeout
* @param receiveTimeoutOpt optional batch receive timeout
* @param maxWriteBatchSize max number of bytes to send as part of a batch
* @param tcpSendBufferSizeHint size hint of the tcp send buffer, in bytes
* @param tcpReceiveBufferSizeHint size hint of the tcp receive buffer, in bytes
* @param actorSystemName name of the actor system
* @param akkaListenerDispatcherPath path to listener dispatcher definition
* @param akkaIODispatcherPath path to io dispatcher definition
* @param akkaDecoderDispatcherPath path to decoder dispatcher definition
* @return the constructed $redis
*/
def this(
host: String = RedisConfigDefaults.Redis.Host,
port: Int = RedisConfigDefaults.Redis.Port,
passwordOpt: Option[String] = RedisConfigDefaults.Redis.PasswordOpt,
database: Int = RedisConfigDefaults.Redis.Database,
nameOpt: Option[String] = RedisConfigDefaults.Redis.NameOpt,
connectTimeout: FiniteDuration = RedisConfigDefaults.IO.ConnectTimeout,
receiveTimeoutOpt: Option[FiniteDuration] = RedisConfigDefaults.IO.ReceiveTimeoutOpt,
maxWriteBatchSize: Int = RedisConfigDefaults.IO.MaxWriteBatchSize,
tcpSendBufferSizeHint: Int = RedisConfigDefaults.IO.TCPSendBufferSizeHint,
tcpReceiveBufferSizeHint: Int = RedisConfigDefaults.IO.TCPReceiveBufferSizeHint,
actorSystemName: String = RedisConfigDefaults.IO.Akka.ActorSystemName,
akkaListenerDispatcherPath: String = RedisConfigDefaults.IO.Akka.ListenerDispatcherPath,
akkaIODispatcherPath: String = RedisConfigDefaults.IO.Akka.IODispatcherPath,
akkaDecoderDispatcherPath: String = RedisConfigDefaults.IO.Akka.DecoderDispatcherPath
) = this(
systemOrName = Right(actorSystemName),
host = host,
port = port,
passwordOpt = passwordOpt,
database = database,
nameOpt = nameOpt,
connectTimeout = connectTimeout,
receiveTimeoutOpt = receiveTimeoutOpt,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
)
/**
* Constructs a $redis instance from a [[scredis.RedisConfig]].
*
* @return the constructed $redis
*/
def this(config: RedisConfig) = this(
host = config.Redis.Host,
port = config.Redis.Port,
passwordOpt = config.Redis.PasswordOpt,
database = config.Redis.Database,
nameOpt = config.Redis.NameOpt,
connectTimeout = config.IO.ConnectTimeout,
receiveTimeoutOpt = config.IO.ReceiveTimeoutOpt,
maxWriteBatchSize = config.IO.MaxWriteBatchSize,
tcpSendBufferSizeHint = config.IO.TCPSendBufferSizeHint,
tcpReceiveBufferSizeHint = config.IO.TCPReceiveBufferSizeHint,
actorSystemName = config.IO.Akka.ActorSystemName,
akkaListenerDispatcherPath = config.IO.Akka.ListenerDispatcherPath,
akkaIODispatcherPath = config.IO.Akka.IODispatcherPath,
akkaDecoderDispatcherPath = config.IO.Akka.DecoderDispatcherPath
)
/**
* Constructs a $redis instance using the default config.
*
* @return the constructed $redis
*/
def this() = this(RedisConfig())
/**
* Constructs a $redis instance from a $tc.
*
* @note The config must contain the scredis object at its root.
*
* @param config $tc
* @return the constructed $redis
*/
def this(config: Config) = this(RedisConfig(config))
/**
* Constructs a $redis instance from a config file.
*
* @note The config file must contain the scredis object at its root.
* This constructor is equivalent to {{{
* new Redis(configName, "scredis")
* }}}
*
* @param configName config filename
* @return the constructed $redis
*/
def this(configName: String) = this(RedisConfig(configName))
/**
* Constructs a $redis instance from a config file and using the provided path.
*
* @note The path must include to the scredis object, e.g. x.y.scredis.
*
* @param configName config filename
* @param path path pointing to the scredis config object
* @return the constructed $redis
*/
def this(configName: String, path: String) = this(RedisConfig(configName, path))
/**
* Authenticates to the server.
*
* @note Use the empty string to re-authenticate with no password.
*
* @param password the server password
* @throws $e if authentication failed
*
* @since 1.0.0
*/
override def auth(password: String): Future[Unit] = {
if (shouldShutdownBlockingClient) {
try {
blocking.auth(password)(5 seconds)
} catch {
case e: Throwable => logger.error("Could not authenticate blocking client", e)
}
}
val future = if (shouldShutdownSubscriberClient) {
subscriber.auth(password)
} else {
Future.successful(())
}
future.recover {
case e: Throwable => logger.error("Could not authenticate subscriber client", e)
}.flatMap { _ =>
super.auth(password)
}
}
/**
* Sets the current client name. If the empty string is provided, the name will be unset.
*
* @param name name to associate the client to, if empty, unsets the client name
*
* @since 2.6.9
*/
override def clientSetName(name: String): Future[Unit] = {
if (shouldShutdownBlockingClient) {
try {
blocking.clientSetName(name)(5 seconds)
} catch {
case e: Throwable => logger.error("Could not set client name on blocking client", e)
}
}
val future = if (shouldShutdownSubscriberClient) {
subscriber.clientSetName(name)
} else {
Future.successful(())
}
future.recover {
case e: Throwable => logger.error("Could not set client name on subscriber client", e)
}.flatMap { _ =>
super.clientSetName(name)
}
}
/**
* Closes the connection.
*
* @since 1.0.0
*/
override def quit(): Future[Unit] = {
if (shouldShutdownBlockingClient) {
try {
blocking.quit()(5 seconds)
blocking.awaitTermination(3 seconds)
} catch {
case e: Throwable => logger.error("Could not shutdown blocking client", e)
}
}
val future = if (shouldShutdownSubscriberClient) {
subscriber.quit().map { _ =>
subscriber.awaitTermination(3 seconds)
}
} else {
Future.successful(())
}
future.recover {
case e: Throwable => logger.error("Could not shutdown subscriber client", e)
}.flatMap { _ =>
super.quit()
}.map { _ =>
awaitTermination(3 seconds)
systemOrName match {
case Left(system) => // Do not shutdown provided ActorSystem
case Right(name) => system.shutdown()
}
}
}
/**
* Changes the selected database on the current connection.
*
* @param database database index
* @throws $e if the database index is invalid
*
* @since 1.0.0
*/
override def select(database: Int): Future[Unit] = {
if (shouldShutdownBlockingClient) {
try {
blocking.select(database)(5 seconds)
} catch {
case e: Throwable => Future.failed(e)
}
}
super.select(database)
}
watchTermination()
}
/**
* The companion object provides additional friendly constructors.
*
* @define redis [[scredis.Redis]]
* @define tc com.typesafe.Config
*/
object Redis {
/**
* Constructs a $redis instance using provided parameters.
*
* @param host server address
* @param port server port
* @param passwordOpt optional server password
* @param database database index to select
* @param nameOpt optional client name (available since 2.6.9)
* @param connectTimeout connection timeout
* @param receiveTimeoutOpt optional batch receive timeout
* @param maxWriteBatchSize max number of bytes to send as part of a batch
* @param tcpSendBufferSizeHint size hint of the tcp send buffer, in bytes
* @param tcpReceiveBufferSizeHint size hint of the tcp receive buffer, in bytes
* @param actorSystemName name of the actor system
* @param akkaListenerDispatcherPath path to listener dispatcher definition
* @param akkaIODispatcherPath path to io dispatcher definition
* @param akkaDecoderDispatcherPath path to decoder dispatcher definition
* @return the constructed $redis
*/
def apply(
host: String = RedisConfigDefaults.Redis.Host,
port: Int = RedisConfigDefaults.Redis.Port,
passwordOpt: Option[String] = RedisConfigDefaults.Redis.PasswordOpt,
database: Int = RedisConfigDefaults.Redis.Database,
nameOpt: Option[String] = RedisConfigDefaults.Redis.NameOpt,
connectTimeout: FiniteDuration = RedisConfigDefaults.IO.ConnectTimeout,
receiveTimeoutOpt: Option[FiniteDuration] = RedisConfigDefaults.IO.ReceiveTimeoutOpt,
maxWriteBatchSize: Int = RedisConfigDefaults.IO.MaxWriteBatchSize,
tcpSendBufferSizeHint: Int = RedisConfigDefaults.IO.TCPSendBufferSizeHint,
tcpReceiveBufferSizeHint: Int = RedisConfigDefaults.IO.TCPReceiveBufferSizeHint,
actorSystemName: String = RedisConfigDefaults.IO.Akka.ActorSystemName,
akkaListenerDispatcherPath: String = RedisConfigDefaults.IO.Akka.ListenerDispatcherPath,
akkaIODispatcherPath: String = RedisConfigDefaults.IO.Akka.IODispatcherPath,
akkaDecoderDispatcherPath: String = RedisConfigDefaults.IO.Akka.DecoderDispatcherPath
): Redis = new Redis(
host = host,
port = port,
passwordOpt = passwordOpt,
database = database,
nameOpt = nameOpt,
connectTimeout = connectTimeout,
receiveTimeoutOpt = receiveTimeoutOpt,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpSendBufferSizeHint,
actorSystemName = actorSystemName,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
)
/**
* Constructs a $redis instance using the default config.
*
* @return the constructed $redis
*/
def apply() = new Redis(RedisConfig())
/**
* Constructs a $redis instance from a [[scredis.RedisConfig]].
*
* @param config [[scredis.RedisConfig]]
* @return the constructed $redis
*/
def apply(config: RedisConfig): Redis = new Redis(config)
/**
* Constructs a $redis instance from a $tc.
*
* @note The config must contain the scredis object at its root.
*
* @param config $tc
* @return the constructed $redis
*/
def apply(config: Config): Redis = new Redis(config)
/**
* Constructs a $redis instance from a config file.
*
* @note The config file must contain the scredis object at its root.
* This constructor is equivalent to {{{
* Redis(configName, "scredis")
* }}}
*
* @param configName config filename
* @return the constructed $redis
*/
def apply(configName: String): Redis = new Redis(configName)
/**
* Constructs a $redis instance from a config file and using the provided path.
*
* @note The path must include to the scredis object, e.g. x.y.scredis
*
* @param configName config filename
* @param path path pointing to the scredis config object
* @return the constructed $redis
*/
def apply(configName: String, path: String): Redis = new Redis(configName, path)
/**
* Constructs a $redis instance using provided parameters.
*
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param host server address
* @param port server port
* @param passwordOpt optional server password
* @param database database index to select
* @param nameOpt optional client name (available since 2.6.9)
* @param connectTimeout connection timeout
* @param receiveTimeoutOpt optional batch receive timeout
* @param maxWriteBatchSize max number of bytes to send as part of a batch
* @param tcpSendBufferSizeHint size hint of the tcp send buffer, in bytes
* @param tcpReceiveBufferSizeHint size hint of the tcp receive buffer, in bytes
* @param akkaListenerDispatcherPath path to listener dispatcher definition
* @param akkaIODispatcherPath path to io dispatcher definition
* @param akkaDecoderDispatcherPath path to decoder dispatcher definition
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem(
host: String = RedisConfigDefaults.Redis.Host,
port: Int = RedisConfigDefaults.Redis.Port,
passwordOpt: Option[String] = RedisConfigDefaults.Redis.PasswordOpt,
database: Int = RedisConfigDefaults.Redis.Database,
nameOpt: Option[String] = RedisConfigDefaults.Redis.NameOpt,
connectTimeout: FiniteDuration = RedisConfigDefaults.IO.ConnectTimeout,
receiveTimeoutOpt: Option[FiniteDuration] = RedisConfigDefaults.IO.ReceiveTimeoutOpt,
maxWriteBatchSize: Int = RedisConfigDefaults.IO.MaxWriteBatchSize,
tcpSendBufferSizeHint: Int = RedisConfigDefaults.IO.TCPSendBufferSizeHint,
tcpReceiveBufferSizeHint: Int = RedisConfigDefaults.IO.TCPReceiveBufferSizeHint,
akkaListenerDispatcherPath: String = RedisConfigDefaults.IO.Akka.ListenerDispatcherPath,
akkaIODispatcherPath: String = RedisConfigDefaults.IO.Akka.IODispatcherPath,
akkaDecoderDispatcherPath: String = RedisConfigDefaults.IO.Akka.DecoderDispatcherPath
)(implicit system: ActorSystem): Redis = new Redis(
systemOrName = Left(system),
host = host,
port = port,
passwordOpt = passwordOpt,
database = database,
nameOpt = nameOpt,
connectTimeout = connectTimeout,
receiveTimeoutOpt = receiveTimeoutOpt,
maxWriteBatchSize = maxWriteBatchSize,
tcpSendBufferSizeHint = tcpSendBufferSizeHint,
tcpReceiveBufferSizeHint = tcpSendBufferSizeHint,
akkaListenerDispatcherPath = akkaListenerDispatcherPath,
akkaIODispatcherPath = akkaIODispatcherPath,
akkaDecoderDispatcherPath = akkaDecoderDispatcherPath
)
/**
* Constructs a $redis instance using the default config.
*
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem()(implicit system: ActorSystem): Redis = withActorSystem(RedisConfig())
/**
* Constructs a $redis instance from a [[scredis.RedisConfig]].
*
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param config [[scredis.RedisConfig]]
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem(config: RedisConfig)(implicit system: ActorSystem): Redis = new Redis(
systemOrName = Left(system),
host = config.Redis.Host,
port = config.Redis.Port,
passwordOpt = config.Redis.PasswordOpt,
database = config.Redis.Database,
nameOpt = config.Redis.NameOpt,
connectTimeout = config.IO.ConnectTimeout,
receiveTimeoutOpt = config.IO.ReceiveTimeoutOpt,
maxWriteBatchSize = config.IO.MaxWriteBatchSize,
tcpSendBufferSizeHint = config.IO.TCPSendBufferSizeHint,
tcpReceiveBufferSizeHint = config.IO.TCPReceiveBufferSizeHint,
akkaListenerDispatcherPath = config.IO.Akka.ListenerDispatcherPath,
akkaIODispatcherPath = config.IO.Akka.IODispatcherPath,
akkaDecoderDispatcherPath = config.IO.Akka.DecoderDispatcherPath
)
/**
* Constructs a $redis instance from a $tc.
*
* @note The config must contain the scredis object at its root.
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param config $tc
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem(config: Config)(implicit system: ActorSystem): Redis = withActorSystem(
RedisConfig(config)
)
/**
* Constructs a $redis instance from a config file.
*
* @note The config file must contain the scredis object at its root.
* This constructor is equivalent to {{{
* Redis(configName, "scredis")
* }}}
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param configName config filename
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem(configName: String)(implicit system: ActorSystem): Redis = withActorSystem(
RedisConfig(configName)
)
/**
* Constructs a $redis instance from a config file and using the provided path.
*
* @note The path must include to the scredis object, e.g. x.y.scredis
* @note The provided `ActorSystem` will not be shutdown after invoking `quit`.
*
* @param configName config filename
* @param path path pointing to the scredis config object
* @param system implicit `ActorSystem`
* @return the constructed $redis
*/
def withActorSystem(configName: String, path: String)(
implicit system: ActorSystem
): Redis = withActorSystem(RedisConfig(configName, path))
}
| 1and1/scredis | src/main/scala/scredis/Redis.scala | Scala | apache-2.0 | 20,932 |
/*
* Copyright 2012 Alexander Bertram
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fhwedel.antscout
package antnet.pheromoneMatrix
import akka.actor.ActorRef
import antnet.AntWay
import map.Node
/**
* Basis für die Berechnung der initialen Pheromon-Konzentrationen.
*
* @param nodes Knoten.
* @param sources Quellen.
* @param destinations Ziele.
*/
abstract class PheromoneMatrixInitializer(nodes: collection.Set[Node], sources: Set[Node], destinations: Set[Node]) {
/**
* Pheromone
*/
val pheromones: Map[ActorRef, Map[ActorRef, Option[Map[AntWay, Double]]]] = initPheromones
/**
* Initialisiert die Pheromone.
*
* @return Eine Map, dessen Schlüssel die Quell-Knoten sind. Die Werte sind wiederum Maps, dessen Schlüssel die
* Ziel-Knoten sind. Die Werte dieser Map sind Options, um abbilden zu können, dass es keinen Weg vom Quell-
* zum Ziel-Knoten gibt. Der Inhalt der Options ist eine Map, die ausgehende Wege auf
* Pheromon-Konzentrationen abbildet.
*
* Map[Quelle, Map[Ziel, Option[Map[Ausgehender Weg, Pheromon-Konzentration]]]]
*/
def initPheromones: Map[ActorRef, Map[ActorRef, Option[Map[AntWay, Double]]]]
}
| abertram/AntScout | src/main/scala/de.fhwedel.antscout/antnet/pheromoneMatrix/PheromoneMatrixInitializer.scala | Scala | apache-2.0 | 1,732 |
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
package ducttape.workflow.builder
import ducttape.workflow.Branch
import ducttape.workflow.BranchPoint
import ducttape.workflow.SpecTypes.SpecPair
import scala.collection.mutable
/** Stores information from TaskTemplateBuilder to be passed back to WorflowBuilder
* so that we know which specs and grafts should be used given some realization.
* This small tree snippet is rooted at a single task and records the inputs and parameters for
* that task. It is then used to create the full HyperDAG structure
* by the WorkflowBuilder in the traverse() and getHyperedges() methods.
*
* alternates with BranchInfoTree */
private[builder] class BranchPointTree(val branchPoint: BranchPoint) {
val children = new mutable.ArrayBuffer[BranchTree]
def getOrAdd(br: Branch): BranchTree =
children.find { child: BranchTree =>
(child.branch == br)
} match {
case Some(found) => found
case None => {
val child = new BranchTree(br)
children += child
child
}
}
// recursively enumerate all specs in this tree
def specs: Iterable[SpecPair] = children.flatMap { child: BranchTree =>
child.terminalData.flatMap { data: TerminalData => data.specs } ++
child.children.flatMap { grandchild: BranchPointTreeGrafts => grandchild.tree.specs }
}
override def toString() = "(BP=" + branchPoint + ": " + children.mkString + ")"
}
| jhclark/ducttape | src/main/scala/ducttape/workflow/builder/BranchPointTree.scala | Scala | mpl-2.0 | 1,598 |
/*
* Copyright (c) 2021. StulSoft
*/
package com.stulsoft.areas
/**
* @author Yuriy Stul
*/
case class Rectangle(width:Double, length:Double)
| ysden123/poc | cats-examples/first-steps/src/main/scala/com/stulsoft/areas/Rectangle.scala | Scala | mit | 148 |
package cephui.elements
import com.payalabs.scalajs.react.bridge.{ReactBridgeComponent, ComponentNamespace}
import scala.scalajs.js
import japgolly.scalajs.react._
/**
* Common class for all [ReactBootstrap](http://react-bootstrap.github.io/)'s components
*/
@ComponentNamespace("ReactBootstrap")
abstract class ReactBootstrapComponent extends ReactBridgeComponent
/**
* Bridge to [ReactBootstrap](http://react-bootstrap.github.io/)'s Button component
*/
case class Button(
id: js.UndefOr[String] = js.undefined, className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
bsStyle: js.UndefOr[String] = js.undefined, // one of: "success", "warning", "danger", "info", "default", "primary", "link"
bsSize: js.UndefOr[String] = js.undefined, // one of: "lg", "large", "sm", "small", "xs", "xsmall"
active: js.UndefOr[Boolean] = js.undefined,
block: js.UndefOr[Boolean] = js.undefined,
componentClass: js.UndefOr[String] = js.undefined, // You can use a custom element type for this component.
disabled: js.UndefOr[Boolean] = js.undefined,
href: js.UndefOr[String] = js.undefined,
onClick: js.UndefOr[() => Unit] = js.undefined,
`type`: js.UndefOr[String] = js.undefined // one of: 'button', 'reset', 'submit'
) extends ReactBootstrapComponent
case class Nav(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
activeKey: js.UndefOr[Any] = js.undefined,
bsStyle: js.UndefOr[String] = js.undefined)
extends ReactBootstrapComponent
case class NavItem(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
key: js.UndefOr[Any] = js.undefined,
eventKey: js.UndefOr[Any] = js.undefined,
href: js.UndefOr[String] = js.undefined,
title: js.UndefOr[String] = js.undefined,
onClick: js.UndefOr[() => Unit] = js.undefined,
onSelect: js.UndefOr[() => Unit] = js.undefined
)
extends ReactBootstrapComponent
case class Grid(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
key: js.UndefOr[Any] = js.undefined,
bsClass: js.UndefOr[String] = js.undefined,
componentClass: js.UndefOr[String] = js.undefined,
fluid: js.UndefOr[Boolean] = js.undefined)
extends ReactBootstrapComponent
case class Row(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
key: js.UndefOr[Any] = js.undefined,
bsClass: js.UndefOr[String] = js.undefined)
extends ReactBootstrapComponent
case class Col(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
// 'col' Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component.
bsClass: js.UndefOr[String] = js.undefined,
//'div' You can use a custom element type for this component.
componentClass: js.UndefOr[String] = js.undefined,
// The number of columns you wish to span for Large devices Desktops (≥1200px) class-prefix col-lg-
lg: js.UndefOr[Int] = js.undefined,
/*Hide column on Large devices Desktops adds class hidden-lg*/
lgHidden: js.UndefOr[Boolean] = js.undefined,
/*Move columns to the right for Large devices Desktops class-prefix col-lg-offset-*/
lgOffset: js.UndefOr[Int] = js.undefined,
/*Change the order of grid columns to the left for Large devices Desktops class-prefix col-lg-pull-*/
lgPull: js.UndefOr[Int] = js.undefined,
/* Change the order of grid columns to the right for Large devices Desktops class-prefix col-lg-push- */
lgPush: js.UndefOr[Int] = js.undefined,
/*The number of columns you wish to span for Medium devices Desktops (≥992px) class-prefix col-md-*/
md: js.UndefOr[Int] = js.undefined,
/*Hide column on Medium devices Desktops adds class hidden-md*/
mdHidden: js.UndefOr[Boolean] = js.undefined,
/*Move columns to the right for Medium devices Desktops class-prefix col-md-offset-*/
mdOffset: js.UndefOr[Int] = js.undefined,
/*Change the order of grid columns to the left for Medium devices Desktops class-prefix col-md-pull-*/
mdPull: js.UndefOr[Int] = js.undefined,
/*Change the order of grid columns to the right for Medium devices Desktops class-prefix col-md-push-*/
mdPush: js.UndefOr[Int] = js.undefined,
/*The number of columns you wish to span for Small devices Tablets (≥768px) class-prefix col-sm-*/
sm: js.UndefOr[Int] = js.undefined,
/*Hide column on Small devices Tablets adds class hidden-sm*/
smHidden: js.UndefOr[Boolean] = js.undefined,
/*Move columns to the right for Small devices Tablets class-prefix col-sm-offset-*/
smOffset: js.UndefOr[Int] = js.undefined,
/*Change the order of grid columns to the left for Small devices Tablets class-prefix col-sm-pull-*/
smPull: js.UndefOr[Int] = js.undefined,
/* Change the order of grid columns to the right for Small devices Tablets class-prefix col-sm-push-*/
smPush: js.UndefOr[Int] = js.undefined,
/*The number of columns you wish to span for Extra small devices Phones (<768px) class-prefix col-xs-*/
xs: js.UndefOr[Int] = js.undefined,
/*Hide column on Extra small devices Phones adds class hidden-xs*/
xsHidden: js.UndefOr[Boolean] = js.undefined,
/*Move columns to the right for Extra small devices Phones class-prefix col-xs-offset- */
xsOffset: js.UndefOr[Int] = js.undefined,
// Change the order of grid columns to the left for Extra small devices Phones class-prefix col-xs-pull-
xsPull: js.UndefOr[Int] = js.undefined,
// Change the order of grid columns to the right for Extra small devices Phones class-prefix col-xs-push-
xsPush: js.UndefOr[Int] = js.undefined)
extends ReactBootstrapComponent
case class Clearfix(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
/*'clearfix'Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component. */
bsClass: js.UndefOr[ String] = js.undefined,
/*'div'You can use a custom element type for this component.*/
componentClass: js.UndefOr[String] = js.undefined,
/* Apply clearfix on Large devices Desktops adds class visible-lg-block */
visibleLgBlock: js.UndefOr[Boolean] = js.undefined,
/* Apply clearfix on Medium devices Desktops adds class visible-md-block */
visibleMdBlock: js.UndefOr[Boolean] = js.undefined,
/* Apply clearfix on Small devices Tablets adds class visible-sm-block */
visibleSmBlock: js.UndefOr[Boolean] = js.undefined,
/* Apply clearfix on Extra small devices Phones adds class visible-xs-block */
visibleXsBlock: js.UndefOr[Boolean] = js.undefined)
extends ReactBootstrapComponent
case class Panel(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
bsClass: js.UndefOr[String] = "panel",
/*'panel' Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component.*/
bsStyle: js.UndefOr[String] = "default",
/* one of: "success", "warning", "danger", "info", "default", "primary" 'default' - Component visual or contextual style variants.*/
collapsible: js.UndefOr[Boolean] = js.undefined,
defaultExpanded: js.UndefOr[Boolean] = false,
eventKey: js.UndefOr[Any] = js.undefined,
expanded: js.UndefOr[Boolean] = js.undefined,
footer: js.UndefOr[Any] = js.undefined, // Node
header: js.UndefOr[js.|[ReactNode, String]] = js.undefined, // Node
headerRole: js.UndefOr[String] = js.undefined,
onEnter: js.UndefOr[() => Unit] = js.undefined,
onEntered: js.UndefOr[() => Unit] = js.undefined,
onEntering: js.UndefOr[() => Unit] = js.undefined,
onExit: js.UndefOr[() => Unit] = js.undefined,
onExited: js.UndefOr[() => Unit] = js.undefined,
onExiting: js.UndefOr[() => Unit] = js.undefined,
onSelect: js.UndefOr[() => Unit] = js.undefined,
panelRole: js.UndefOr[String] = js.undefined)
extends ReactBootstrapComponent
case class Accordion(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
bsClass: js.UndefOr[String] = "panel",
/*'panel' Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component.*/
bsStyle: js.UndefOr[String] = "default",
/* one of: "success", "warning", "danger", "info", "default", "primary" 'default' - Component visual or contextual style variants.*/
collapsible: js.UndefOr[Boolean] = js.undefined,
eventKey: js.UndefOr[Any] = js.undefined,
expanded: js.UndefOr[Boolean] = js.undefined,
footer: js.UndefOr[Any] = js.undefined, // Node
header: js.UndefOr[Any] = js.undefined, // Node
headerRole: js.UndefOr[String] = js.undefined,
onEnter: js.UndefOr[() => Unit] = js.undefined,
onEntered: js.UndefOr[() => Unit] = js.undefined,
onEntering: js.UndefOr[() => Unit] = js.undefined,
onExit: js.UndefOr[() => Unit] = js.undefined,
onExited: js.UndefOr[() => Unit] = js.undefined,
onExiting: js.UndefOr[() => Unit] = js.undefined,
onSelect: js.UndefOr[() => Unit] = js.undefined,
panelRole: js.UndefOr[String] = js.undefined)
extends ReactBootstrapComponent
case class OverlayTrigger(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
placement: js.UndefOr[String] = "left",
overlay: ReactNode)
extends ReactBootstrapComponent
case class Tooltip(
id: js.UndefOr[String], // An html id attribute, necessary for accessibility
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
arrowOffsetLeft: js.UndefOr[js.|[Int, String]] = js.undefined, // The "left" position value for the Tooltip arrow.
arrowOffsetTop: js.UndefOr[js.|[Int, String]] = js.undefined, // The "top" position value for the Tooltip arrow.
bsClass: String = "tooltip", // Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component.
placement: String = "right", // one of: 'top', 'right', 'bottom', 'left'; Sets the direction the Tooltip is positioned towards.
positionLeft: js.UndefOr[js.|[Int, String]] = js.undefined, // The "left" position value for the Tooltip.
positionTop: js.UndefOr[js.|[Int, String]] = js.undefined // The "top" position value for the Tooltip.
) extends ReactBootstrapComponent
case class Table(
id: js.UndefOr[String] = js.undefined, // An html id attribute, necessary for accessibility
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
bordered: Boolean = false,
bsClass: String = "table", // Base CSS class and prefix for the component. Generally one should only change bsClass to provide new, non-Bootstrap, CSS styles for a component.
condensed: Boolean = false,
hover: Boolean = false,
responsive: Boolean = false,
striped: Boolean = false
) extends ReactBootstrapComponent
case class Collapse(
id: js.UndefOr[String] = js.undefined, // An html id attribute, necessary for accessibility
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
dimension: String = "width",
getDimensionValue: js.UndefOr[() => Unit] = js.undefined,
in: Boolean = false, // Show the component; triggers the fade in or fade out animation
onEnter: js.UndefOr[() => Unit] = js.undefined, // Callback fired before the component fades in
onEntered: js.UndefOr[() => Unit] = js.undefined, // Callback fired after the has component faded in
onEntering: js.UndefOr[() => Unit] = js.undefined, // Callback fired after the component starts to fade in
onExit: js.UndefOr[() => Unit] = js.undefined, // Callback fired before the component fades out
onExited: js.UndefOr[() => Unit] = js.undefined, // Callback fired after the component has faded out
onExiting: js.UndefOr[() => Unit] = js.undefined, // Callback fired after the component starts to fade out
timeout: Int = 300, // Duration of the fade animation in milliseconds, to ensure that finishing callbacks are fired even if the original browser transition end events are canceled
transitionAppear: Boolean = false, // Run the fade in animation when the component mounts, if it is initially shown
unmountOnExit: Boolean = false // Unmount the component (remove it from the DOM) when it is faded out
) extends ReactBootstrapComponent
case class Alert(
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
/** Base CSS class and prefix for the component. Generally one should only change bsClass to provide new,
* non-Bootstrap, CSS styles for a component.*/
bsClass: js.UndefOr[String] = "alert",
/**one of: "success", "warning", "danger", "info"; Component visual or contextual style variants. */
bsStyle: js.UndefOr[String] = "info",
/**'Close alert' */
closeLabel: js.UndefOr[String] = js.undefined,
onDismiss: js.UndefOr[() => Unit] = js.undefined
) extends ReactBootstrapComponent
case class Glyphicon(
glyph: String,
id: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[Any] = js.undefined,
/**Base CSS class and prefix for the component. Generally one should only change bsClass to provide new,
*non-Bootstrap, CSS styles for a component.*/
bsClass: js.UndefOr[String] = "glyphicon"
) extends ReactBootstrapComponent
| vivint-smarthome/ceph-on-mesos | ui/src/main/scala/cephui/elements/ReactBootstrapComponent.scala | Scala | apache-2.0 | 14,341 |
package com.sksamuel.elastic4s.admin
import com.sksamuel.elastic4s.ElasticDsl
import org.scalatest.FunSuite
class RefreshIndexDslTest extends FunSuite with ElasticDsl {
test("refresh index dsl compiles") {
refresh index "myindex"
}
}
| alexander-svendsen/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/admin/RefreshIndexDslTest.scala | Scala | apache-2.0 | 245 |
package org.scalatra
import auth.strategy.{BasicAuthStrategy, BasicAuthSupport}
import auth.{ScentrySupport, ScentryConfig}
import org.scalatra.BasicAuthExample.AuthenticationSupport
object BasicAuthExample {
case class MyUser(id: String)
class OurBasicAuthStrategy(protected override val app: ScalatraKernel, realm: String)
extends BasicAuthStrategy[MyUser](app, realm) {
protected def validate(userName: String, password: String): Option[MyUser] = {
if(userName == "scalatra" && password == "scalatra") Some(MyUser("scalatra"))
else None
}
protected def getUserId(user: MyUser): String = user.id
}
trait AuthenticationSupport extends ScentrySupport[MyUser] with BasicAuthSupport[MyUser] { self: ScalatraKernel =>
val realm = "Scalatra Basic Auth Example"
protected def contextPath = request.getContextPath
protected def fromSession = { case id: String => MyUser(id) }
protected def toSession = { case usr: MyUser => usr.id }
protected val scentryConfig = (new ScentryConfig {}).asInstanceOf[ScentryConfiguration]
override protected def configureScentry = {
scentry.unauthenticated {
scentry.strategies('Basic).unauthenticated()
}
}
override protected def registerAuthStrategies = {
scentry.registerStrategy('Basic, app => new OurBasicAuthStrategy(app, realm))
}
}
}
class BasicAuthExample extends ScalatraServlet with AuthenticationSupport {
get("/?") {
basicAuth
<html>
<body>
<h1>Hello from Scalatra</h1>
<p><a href="/auth/linked" >click</a></p>
</body>
</html>
}
get("/linked") {
basicAuth
<html>
<body>
<h1>Hello again from Scalatra</h1>
<p><a href="/" >back</a></p>
</body>
</html>
}
}
| kuochaoyi/scalatra | example/src/main/scala/org/scalatra/BasicAuthExample.scala | Scala | bsd-2-clause | 1,801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import org.apache.calcite.plan._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.{JoinInfo, JoinRelType}
import org.apache.calcite.rel.{BiRel, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import org.apache.flink.api.common.functions.{FlatMapFunction, MapFunction}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.functions.NullByteKeySelector
import org.apache.flink.api.java.operators.join.JoinType
import org.apache.flink.api.java.tuple.Tuple
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.table.api.{StreamQueryConfig, TableException}
import org.apache.flink.table.plan.nodes.CommonJoin
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.plan.util.UpdatingPlanChecker
import org.apache.flink.table.planner.StreamPlanner
import org.apache.flink.table.runtime.CRowKeySelector
import org.apache.flink.table.runtime.join.{OuterJoinPaddingUtil, ProcTimeBoundedStreamJoin, RowTimeBoundedStreamJoin, WindowJoinUtil}
import org.apache.flink.table.runtime.operators.KeyedCoProcessOperatorWithWatermarkDelay
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.util.Logging
import org.apache.flink.util.Collector
/**
* RelNode for a time windowed stream join.
*/
class DataStreamWindowJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftNode: RelNode,
rightNode: RelNode,
joinCondition: RexNode,
joinType: JoinRelType,
leftSchema: RowSchema,
rightSchema: RowSchema,
schema: RowSchema,
isRowTime: Boolean,
leftLowerBound: Long,
leftUpperBound: Long,
leftTimeIdx: Int,
rightTimeIdx: Int,
remainCondition: Option[RexNode],
ruleDescription: String)
extends BiRel(cluster, traitSet, leftNode, rightNode)
with CommonJoin
with DataStreamRel
with Logging {
override def deriveRowType(): RelDataType = schema.relDataType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataStreamWindowJoin(
cluster,
traitSet,
inputs.get(0),
inputs.get(1),
joinCondition,
joinType,
leftSchema,
rightSchema,
schema,
isRowTime,
leftLowerBound,
leftUpperBound,
leftTimeIdx,
rightTimeIdx,
remainCondition,
ruleDescription)
}
override def toString: String = {
joinToString(
schema.relDataType,
joinCondition,
joinType,
getExpressionString)
}
override def explainTerms(pw: RelWriter): RelWriter = {
joinExplainTerms(
super.explainTerms(pw),
schema.relDataType,
joinCondition,
joinType,
getExpressionString)
}
override def translateToPlan(
planner: StreamPlanner,
queryConfig: StreamQueryConfig): DataStream[CRow] = {
val config = planner.getConfig
val isLeftAppendOnly = UpdatingPlanChecker.isAppendOnly(left)
val isRightAppendOnly = UpdatingPlanChecker.isAppendOnly(right)
if (!isLeftAppendOnly || !isRightAppendOnly) {
throw new TableException(
"Windowed stream join does not support updates.")
}
val leftDataStream = left.asInstanceOf[DataStreamRel].translateToPlan(planner, queryConfig)
val rightDataStream = right.asInstanceOf[DataStreamRel].translateToPlan(planner, queryConfig)
// get the equi-keys and other conditions
val joinInfo = JoinInfo.of(leftNode, rightNode, joinCondition)
val leftKeys = joinInfo.leftKeys.toIntArray
val rightKeys = joinInfo.rightKeys.toIntArray
val relativeWindowSize = leftUpperBound - leftLowerBound
val returnTypeInfo = CRowTypeInfo(schema.typeInfo)
// generate join function
val joinFunction =
WindowJoinUtil.generateJoinFunction(
config,
joinType,
leftSchema.typeInfo,
rightSchema.typeInfo,
schema,
remainCondition,
ruleDescription)
val joinOpName =
s"where: (" +
s"${joinConditionToString(schema.relDataType, joinCondition, getExpressionString)}), " +
s"join: (${joinSelectionToString(schema.relDataType)})"
val flinkJoinType = joinType match {
case JoinRelType.INNER => JoinType.INNER
case JoinRelType.FULL => JoinType.FULL_OUTER
case JoinRelType.LEFT => JoinType.LEFT_OUTER
case JoinRelType.RIGHT => JoinType.RIGHT_OUTER
case _ => throw new TableException(s"$joinType is not supported.")
}
if (relativeWindowSize < 0) {
LOG.warn(s"The relative window size $relativeWindowSize is negative," +
" please check the join conditions.")
createNegativeWindowSizeJoin(
flinkJoinType,
leftDataStream,
rightDataStream,
leftSchema.arity,
rightSchema.arity,
returnTypeInfo)
} else {
if (isRowTime) {
createRowTimeJoin(
flinkJoinType,
leftDataStream,
rightDataStream,
returnTypeInfo,
joinOpName,
joinFunction.name,
joinFunction.code,
leftKeys,
rightKeys
)
} else {
createProcTimeJoin(
flinkJoinType,
leftDataStream,
rightDataStream,
returnTypeInfo,
joinOpName,
joinFunction.name,
joinFunction.code,
leftKeys,
rightKeys
)
}
}
}
def createNegativeWindowSizeJoin(
joinType: JoinType,
leftDataStream: DataStream[CRow],
rightDataStream: DataStream[CRow],
leftArity: Int,
rightArity: Int,
returnTypeInfo: TypeInformation[CRow]): DataStream[CRow] = {
// We filter all records instead of adding an empty source to preserve the watermarks.
val allFilter = new FlatMapFunction[CRow, CRow] with ResultTypeQueryable[CRow] {
override def flatMap(value: CRow, out: Collector[CRow]): Unit = { }
override def getProducedType: TypeInformation[CRow] = returnTypeInfo
}
val leftPadder = new MapFunction[CRow, CRow] with ResultTypeQueryable[CRow] {
val paddingUtil = new OuterJoinPaddingUtil(leftArity, rightArity)
override def map(value: CRow): CRow = new CRow(paddingUtil.padLeft(value.row), true)
override def getProducedType: TypeInformation[CRow] = returnTypeInfo
}
val rightPadder = new MapFunction[CRow, CRow] with ResultTypeQueryable[CRow] {
val paddingUtil = new OuterJoinPaddingUtil(leftArity, rightArity)
override def map(value: CRow): CRow = new CRow(paddingUtil.padRight(value.row), true)
override def getProducedType: TypeInformation[CRow] = returnTypeInfo
}
val leftP = leftDataStream.getParallelism
val rightP = rightDataStream.getParallelism
joinType match {
case JoinType.INNER =>
leftDataStream.flatMap(allFilter).name("Empty Inner Join").setParallelism(leftP)
.union(rightDataStream.flatMap(allFilter).name("Empty Inner Join").setParallelism(rightP))
case JoinType.LEFT_OUTER =>
leftDataStream.map(leftPadder).name("Left Outer Join").setParallelism(leftP)
.union(rightDataStream.flatMap(allFilter).name("Left Outer Join").setParallelism(rightP))
case JoinType.RIGHT_OUTER =>
leftDataStream.flatMap(allFilter).name("Right Outer Join").setParallelism(leftP)
.union(rightDataStream.map(rightPadder).name("Right Outer Join").setParallelism(rightP))
case JoinType.FULL_OUTER =>
leftDataStream.map(leftPadder).name("Full Outer Join").setParallelism(leftP)
.union(rightDataStream.map(rightPadder).name("Full Outer Join").setParallelism(rightP))
case _ => throw new TableException(s"$joinType is not supported.")
}
}
def createProcTimeJoin(
joinType: JoinType,
leftDataStream: DataStream[CRow],
rightDataStream: DataStream[CRow],
returnTypeInfo: TypeInformation[CRow],
operatorName: String,
joinFunctionName: String,
joinFunctionCode: String,
leftKeys: Array[Int],
rightKeys: Array[Int]): DataStream[CRow] = {
val procJoinFunc = new ProcTimeBoundedStreamJoin(
joinType,
leftLowerBound,
leftUpperBound,
leftSchema.typeInfo,
rightSchema.typeInfo,
joinFunctionName,
joinFunctionCode)
if (!leftKeys.isEmpty) {
leftDataStream.connect(rightDataStream)
.keyBy(
new CRowKeySelector(leftKeys, leftSchema.projectedTypeInfo(leftKeys)),
new CRowKeySelector(rightKeys, rightSchema.projectedTypeInfo(rightKeys)))
.process(procJoinFunc)
.name(operatorName)
.returns(returnTypeInfo)
} else {
leftDataStream.connect(rightDataStream)
.keyBy(new NullByteKeySelector[CRow](), new NullByteKeySelector[CRow]())
.process(procJoinFunc)
.setParallelism(1)
.setMaxParallelism(1)
.name(operatorName)
.returns(returnTypeInfo)
}
}
def createRowTimeJoin(
joinType: JoinType,
leftDataStream: DataStream[CRow],
rightDataStream: DataStream[CRow],
returnTypeInfo: TypeInformation[CRow],
operatorName: String,
joinFunctionName: String,
joinFunctionCode: String,
leftKeys: Array[Int],
rightKeys: Array[Int]): DataStream[CRow] = {
val rowTimeJoinFunc = new RowTimeBoundedStreamJoin(
joinType,
leftLowerBound,
leftUpperBound,
allowedLateness = 0L,
leftSchema.typeInfo,
rightSchema.typeInfo,
joinFunctionName,
joinFunctionCode,
leftTimeIdx,
rightTimeIdx)
if (!leftKeys.isEmpty) {
leftDataStream
.connect(rightDataStream)
.keyBy(
new CRowKeySelector(leftKeys, leftSchema.projectedTypeInfo(leftKeys)),
new CRowKeySelector(rightKeys, rightSchema.projectedTypeInfo(rightKeys)))
.transform(
operatorName,
returnTypeInfo,
new KeyedCoProcessOperatorWithWatermarkDelay[Tuple, CRow, CRow, CRow](
rowTimeJoinFunc,
rowTimeJoinFunc.getMaxOutputDelay)
)
} else {
leftDataStream.connect(rightDataStream)
.keyBy(new NullByteKeySelector[CRow](), new NullByteKeySelector[CRow])
.transform(
operatorName,
returnTypeInfo,
new KeyedCoProcessOperatorWithWatermarkDelay[java.lang.Byte, CRow, CRow, CRow](
rowTimeJoinFunc,
rowTimeJoinFunc.getMaxOutputDelay)
)
.setParallelism(1)
.setMaxParallelism(1)
}
}
}
| fhueske/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamWindowJoin.scala | Scala | apache-2.0 | 11,503 |
package com.sothr.imagetools.engine.dao
import com.sothr.imagetools.engine.util.{PropertiesService, PropertyEnum}
import grizzled.slf4j.Logging
import org.hibernate.SessionFactory
import org.hibernate.boot.registry.StandardServiceRegistryBuilder
import org.hibernate.cfg.Configuration
import org.hibernate.service.ServiceRegistry
/**
* Utility class to interface with hibernate
*
* Created by drew on 2/8/14.
*/
object HibernateUtil extends Logging {
private val sessionFactory: SessionFactory = buildSessionFactory()
private var serviceRegistry: ServiceRegistry = null
def getSessionFactory: SessionFactory = {
sessionFactory
}
private def buildSessionFactory(): SessionFactory = {
try {
// Create the SessionFactory from hibernate.cfg.xml
val configuration = new Configuration().configure("hibernate.cfg.xml")
//set the database location
info(s"Connecting to database at: \\'${PropertiesService.get(PropertyEnum.DatabaseConnectionURL.toString)}\\'")
configuration.setProperty("hibernate.connection.url", PropertiesService.get(PropertyEnum.DatabaseConnectionURL.toString))
serviceRegistry = new StandardServiceRegistryBuilder().applySettings(configuration.getProperties).build
configuration.buildSessionFactory(serviceRegistry)
} catch {
case ex: Throwable =>
// Make sure you log the exception, as it might be swallowed
error("Initial SessionFactory creation failed.", ex)
throw new ExceptionInInitializerError(ex)
}
}
}
| warricksothr/ImageTools | engine/src/main/scala/com/sothr/imagetools/engine/dao/HibernateUtil.scala | Scala | mit | 1,532 |
package com.twitter.finagle.http.exp.routing
import com.twitter.finagle.http.Method
private[http] final case class Schema(
method: Method,
path: Path)
| twitter/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/exp/routing/Schema.scala | Scala | apache-2.0 | 157 |
package fpgatidbits.streams
import Chisel._
class StreamFilter[Tin <: Data, Tout <: Data]
(genI: Tin, genO: Tout, filterFxn: Tin => Tout ) extends Module {
val io = new Bundle {
val in = Decoupled(genI).flip
val out = Decoupled(genO)
}
io.out.valid := io.in.valid
io.out.bits := filterFxn(io.in.bits)
io.in.ready := io.out.ready
}
object StreamFilter {
def apply[Tin <: Data, Tout <: Data]
(in: DecoupledIO[Tin], outGen: Tout, filterFxn: Tin => Tout ) = {
val sf = Module(new StreamFilter[Tin,Tout](in.bits.clone,outGen.clone, filterFxn)).io
sf.in <> in
sf.out
}
}
import fpgatidbits.dma._
object ReadRespFilter {
def apply(in: DecoupledIO[GenericMemoryResponse]) = {
val filterFxn = {r: GenericMemoryResponse => r.readData}
StreamFilter(in, in.bits.readData, filterFxn)
}
}
| maltanar/fpga-tidbits | src/main/scala/fpgatidbits/streams/StreamFilter.scala | Scala | bsd-2-clause | 855 |
package models.gitolite
import org.specs2.mutable._
import scalikejdbc._
import scalikejdbc.specs2.mutable.AutoRollback
class TagsSpec extends Specification {
"Tags" should {
val t = Tags.syntax("t")
"find by primary keys" in new AutoRollback {
val maybeFound = Tags.find(123)
maybeFound.isDefined should beTrue
}
"find by where clauses" in new AutoRollback {
val maybeFound = Tags.findBy(sqls.eq(t.id, 123))
maybeFound.isDefined should beTrue
}
"find all records" in new AutoRollback {
val allResults = Tags.findAll()
allResults.size should be_>(0)
}
"count all records" in new AutoRollback {
val count = Tags.countAll()
count should be_>(0L)
}
"find all by where clauses" in new AutoRollback {
val results = Tags.findAllBy(sqls.eq(t.id, 123))
results.size should be_>(0)
}
"count by where clauses" in new AutoRollback {
val count = Tags.countBy(sqls.eq(t.id, 123))
count should be_>(0L)
}
"create new record" in new AutoRollback {
val created = Tags.create()
created should not beNull
}
"save a record" in new AutoRollback {
val entity = Tags.findAll().head
// TODO modify something
val modified = entity
val updated = Tags.save(modified)
updated should not equalTo(entity)
}
"destroy a record" in new AutoRollback {
val entity = Tags.findAll().head
Tags.destroy(entity)
val shouldBeNone = Tags.find(123)
shouldBeNone.isDefined should beFalse
}
}
}
| thomaschoo/gitolite-to-gitbucket | src/test/scala/models/gitolite/TagsSpec.scala | Scala | mit | 1,576 |
package jp.co.dwango.s99
import org.scalatest.{DiagrammedAssertions, FunSpec}
class P12Spec extends FunSpec with DiagrammedAssertions {
describe("P12") {
it("for empty list, decode(s) is s") {
assert(P12.decode(List.empty[(Int, Int)]) == List.empty[(Int, Int)])
}
it("for list has one element") {
assert(P12.decode(List((1, 1))) == List(1))
assert(P12.decode(List((2, 1))) == List(1, 1))
assert(P12.decode(List((3, 1))) == List(1, 1, 1))
}
it("otherwise") {
assert(P12.decode(List((1, 1), (1, 2))) == List(1, 2))
assert(P12.decode(List((2, 1), (1, 2), (1, 3))) == List(1, 1, 2, 3))
assert(P12.decode(List((2, 1), (2, 2), (1, 3))) == List(1, 1, 2, 2, 3))
}
}
}
| dwango/S99 | src/test/scala/jp/co/dwango/s99/P12Spec.scala | Scala | mit | 732 |
import io.gatling.core.Predef._
import io.gatling.http.Predef._
object Login {
val login = exec(http("Login page")
.get("/"))
.pause(2)
.exec(http("Do login")
.post("""/j_spring_security_check""")
.formParamMap(Map("j_username" -> "helleAndersen", "j_password" -> "helleAndersen1")))
def logout = exec(http("Logout").get("/logout/index"))
def clinical(userName : String, passWord : String) = exec(http("Login page")
.get("/"))
.pause(2)
.exec(http("Do login")
.post("""/j_spring_security_check""")
.formParamMap(Map("j_username" -> userName, "j_password" -> passWord)))
def patient(userName : String, passWord : String) = exec(http("Patient login page")
.get("/patient")
.basicAuth(userName, passWord)
.header("Client-version", "1.29.0")
.disableFollowRedirect
)
} | silverbullet-dk/opentele-performance-tests | src/test/scala/user-files/simulations/processes/clinician/Login.scala | Scala | apache-2.0 | 836 |
/*
Copyright (c) 2017-2021, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.$internal
object ### {
def apply(cond: Boolean)(f: => Unit): Unit = ???
}
| sireum/v3-logika-runtime | macros/shared/src/main/scala/org/sireum/$internal/CC.scala | Scala | bsd-2-clause | 1,444 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.core
package hdfs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import au.com.cba.omnia.permafrost.hdfs.Hdfs
case class GuardFilter(filter: (FileSystem, Path) => Boolean) {
def &&&(that: GuardFilter): GuardFilter =
GuardFilter((fs, p) => filter(fs, p) && that.filter(fs, p))
}
/**
* (DEPRECATED) Utility functions that operate on the Hadoop filesystem
*
* ''Use [[MaestroHdfs]] instead (same method names, but return type is Hdfs).''
*/
object Guard {
/** Filter out any directories that HAVE a _PROCESSED file. */
val NotProcessed = GuardFilter((fs, p) => !fs.exists(new Path(p, "_PROCESSED")))
/** Filter out any directories that DO NOT HAVE a _INGESTION_COMPLETE file. */
val IngestionComplete = GuardFilter((fs, p) => fs.exists(new Path(p, "_INGESTION_COMPLETE")))
/** Expands the globs in the provided path and only keeps those directories that pass the filter. */
def expandPaths(path: String, filter: GuardFilter = NotProcessed): List[String] = runHdfs(MaestroHdfs.expandPaths(path, filter))
/** Expand the complete file paths from the expandPaths, filtering out directories and 0 byte files */
def listNonEmptyFiles(paths: List[String]): List[String] = runHdfs(MaestroHdfs.listNonEmptyFiles(paths))
/** As `expandPath` but the filter is `NotProcessed` and `IngestionComplete`. */
def expandTransferredPaths(path: String): List[String] = runHdfs(MaestroHdfs.expandTransferredPaths(path))
/** Creates the _PROCESSED flag to indicate completion of processing in given list of paths */
def createFlagFile(directoryPath : List[String]): Unit = runHdfs(MaestroHdfs.createFlagFile(directoryPath))
lazy val conf = new Configuration
def runHdfs[A](hdfs: Hdfs[A]): A =
hdfs.run(conf).foldAll(
a => a,
msg => throw new Exception(msg),
ex => throw ex,
(_, ex) => throw ex
)
}
| CommBank/maestro | maestro-core/src/main/scala/au/com/cba/omnia/maestro/core/hdfs/Guard.scala | Scala | apache-2.0 | 2,575 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.