code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model
import org.openapitools.client.core.ApiModel
case class GithubOrganizationlinks (
repositories: Option[Link] = None,
self: Option[Link] = None,
`class`: Option[String] = None
) extends ApiModel
|
cliffano/swaggy-jenkins
|
clients/scala-akka/generated/src/main/scala/org/openapitools/client/model/GithubOrganizationlinks.scala
|
Scala
|
mit
| 588
|
package io.netflow
import io.netty.handler.logging.LoggingHandler
import io.wasted.util._
object Node extends App with Logger {
def start() {
// OS Checking
val os = System.getProperty("os.name").toLowerCase
if (!(os.contains("nix") || os.contains("nux") || os.contains("bsd") || os.contains("mac") || os.contains("sunos")) || os.contains("win")) {
warn("netflow.io has not been tested on %s", System.getProperty("os.name"))
}
// JVM Checking
val jvmVersion = System.getProperty("java.runtime.version")
if (!jvmVersion.matches("^1.[78].*$")) {
error("Java Runtime %s is not supported", jvmVersion)
return
}
// Create this dummy here to initialize Netty's logging before Akka's
new LoggingHandler()
Server.start()
}
def stop(): Unit = Server.stop()
start()
}
|
ayscb/netflow
|
netflow1/netflow-master/src/main/scala/io/netflow/Node.scala
|
Scala
|
apache-2.0
| 835
|
package org.jetbrains.plugins.scala
package codeInsight
package template
import com.intellij.codeInsight.template.{ExpressionContext, Result}
import com.intellij.psi.PsiDocumentManager
import com.intellij.util.IncorrectOperationException
import org.jetbrains.plugins.scala.lang.completion.lookups.ScalaLookupItem
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.ScTypeDefinitionImpl
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api.{JavaArrayType, ParameterizedType}
import scala.util._
package object macros {
private[macros] def findElementAtOffset(implicit context: ExpressionContext) = {
val editor = context.getEditor
val manager = PsiDocumentManager.getInstance(editor.getProject)
val document = editor.getDocument
manager.commitDocument(document)
manager.getPsiFile(document) match {
case scalaFile: ScalaFile => Option(scalaFile.findElementAt(context.getStartOffset))
case _ => None
}
}
private[macros] def resultToScExpr(result: Result)
(implicit context: ExpressionContext): Option[ScType] =
Try {
findElementAtOffset.map(ScalaPsiElementFactory.createExpressionFromText(result.toString, _))
} match {
case Success(value) => value.flatMap(_.`type`().toOption)
case Failure(_: IncorrectOperationException) => None
}
private[macros] def arrayComponent(scType: ScType): Option[ScType] = scType match {
case JavaArrayType(argument) => Some(argument)
case paramType@ParameterizedType(_, Seq(head)) if paramType.canonicalText.startsWith("_root_.scala.Array") => Some(head)
case _ => None
}
private[macros] def createLookupItem(definition: ScTypeDefinition): ScalaLookupItem = {
import ScTypeDefinitionImpl._
val name = toQualifiedName(packageName(definition)(Nil, DefaultSeparator) :+ Right(definition))()
val lookupItem = new ScalaLookupItem(definition, name, Option(definition.getContainingClass))
lookupItem.shouldImport = true
lookupItem
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInsight/template/macros/package.scala
|
Scala
|
apache-2.0
| 2,297
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.util
import java.util.Optional
import java.util.concurrent.ExecutionException
import kafka.common.AdminCommandFailedException
import kafka.log.LogConfig
import kafka.server.DynamicConfig
import kafka.utils.{CommandDefaultOptions, CommandLineUtils, CoreUtils, Exit, Json, Logging}
import kafka.utils.Implicits._
import kafka.utils.json.JsonValue
import org.apache.kafka.clients.admin.AlterConfigOp.OpType
import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, ConfigEntry, NewPartitionReassignment, PartitionReassignment, TopicDescription}
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors.{ReplicaNotAvailableException, UnknownTopicOrPartitionException}
import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.common.{KafkaException, KafkaFuture, TopicPartition, TopicPartitionReplica}
import scala.jdk.CollectionConverters._
import scala.collection.{Map, Seq, mutable}
import scala.math.Ordered.orderingToOrdered
object ReassignPartitionsCommand extends Logging {
private[admin] val AnyLogDir = "any"
val helpText = "This tool helps to move topic partitions between replicas."
/**
* The earliest version of the partition reassignment JSON. We will default to this
* version if no other version number is given.
*/
private[admin] val EarliestVersion = 1
/**
* The earliest version of the JSON for each partition reassignment topic. We will
* default to this version if no other version number is given.
*/
private[admin] val EarliestTopicsJsonVersion = 1
// Throttles that are set at the level of an individual broker.
private[admin] val brokerLevelLeaderThrottle =
DynamicConfig.Broker.LeaderReplicationThrottledRateProp
private[admin] val brokerLevelFollowerThrottle =
DynamicConfig.Broker.FollowerReplicationThrottledRateProp
private[admin] val brokerLevelLogDirThrottle =
DynamicConfig.Broker.ReplicaAlterLogDirsIoMaxBytesPerSecondProp
private[admin] val brokerLevelThrottles = Seq(
brokerLevelLeaderThrottle,
brokerLevelFollowerThrottle,
brokerLevelLogDirThrottle
)
// Throttles that are set at the level of an individual topic.
private[admin] val topicLevelLeaderThrottle =
LogConfig.LeaderReplicationThrottledReplicasProp
private[admin] val topicLevelFollowerThrottle =
LogConfig.FollowerReplicationThrottledReplicasProp
private[admin] val topicLevelThrottles = Seq(
topicLevelLeaderThrottle,
topicLevelFollowerThrottle
)
private[admin] val cannotExecuteBecauseOfExistingMessage = "Cannot execute because " +
"there is an existing partition assignment. Use --additional to override this and " +
"create a new partition assignment in addition to the existing one. The --additional " +
"flag can also be used to change the throttle by resubmitting the current reassignment."
private[admin] val youMustRunVerifyPeriodicallyMessage = "Warning: You must run " +
"--verify periodically, until the reassignment completes, to ensure the throttle " +
"is removed."
/**
* A map from topic names to partition movements.
*/
type MoveMap = mutable.Map[String, mutable.Map[Int, PartitionMove]]
/**
* A partition movement. The source and destination brokers may overlap.
*
* @param sources The source brokers.
* @param destinations The destination brokers.
*/
sealed case class PartitionMove(sources: mutable.Set[Int],
destinations: mutable.Set[Int]) { }
/**
* The state of a partition reassignment. The current replicas and target replicas
* may overlap.
*
* @param currentReplicas The current replicas.
* @param targetReplicas The target replicas.
* @param done True if the reassignment is done.
*/
sealed case class PartitionReassignmentState(currentReplicas: Seq[Int],
targetReplicas: Seq[Int],
done: Boolean) {}
/**
* The state of a replica log directory movement.
*/
sealed trait LogDirMoveState {
/**
* True if the move is done without errors.
*/
def done: Boolean
}
/**
* A replica log directory move state where the source log directory is missing.
*
* @param targetLogDir The log directory that we wanted the replica to move to.
*/
sealed case class MissingReplicaMoveState(targetLogDir: String)
extends LogDirMoveState {
override def done = false
}
/**
* A replica log directory move state where the source replica is missing.
*
* @param targetLogDir The log directory that we wanted the replica to move to.
*/
sealed case class MissingLogDirMoveState(targetLogDir: String)
extends LogDirMoveState {
override def done = false
}
/**
* A replica log directory move state where the move is in progress.
*
* @param currentLogDir The current log directory.
* @param futureLogDir The log directory that the replica is moving to.
* @param targetLogDir The log directory that we wanted the replica to move to.
*/
sealed case class ActiveMoveState(currentLogDir: String,
targetLogDir: String,
futureLogDir: String)
extends LogDirMoveState {
override def done = false
}
/**
* A replica log directory move state where there is no move in progress, but we did not
* reach the target log directory.
*
* @param currentLogDir The current log directory.
* @param targetLogDir The log directory that we wanted the replica to move to.
*/
sealed case class CancelledMoveState(currentLogDir: String,
targetLogDir: String)
extends LogDirMoveState {
override def done = true
}
/**
* The completed replica log directory move state.
*
* @param targetLogDir The log directory that we wanted the replica to move to.
*/
sealed case class CompletedMoveState(targetLogDir: String)
extends LogDirMoveState {
override def done = true
}
/**
* An exception thrown to indicate that the command has failed, but we don't want to
* print a stack trace.
*
* @param message The message to print out before exiting. A stack trace will not
* be printed.
*/
class TerseReassignmentFailureException(message: String) extends KafkaException(message) {
}
def main(args: Array[String]): Unit = {
val opts = validateAndParseArgs(args)
var failed = true
var adminClient: Admin = null
try {
val props = if (opts.options.has(opts.commandConfigOpt))
Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt))
else
new util.Properties()
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt))
props.putIfAbsent(AdminClientConfig.CLIENT_ID_CONFIG, "reassign-partitions-tool")
adminClient = Admin.create(props)
handleAction(adminClient, opts)
failed = false
} catch {
case e: TerseReassignmentFailureException =>
println(e.getMessage)
case e: Throwable =>
println("Error: " + e.getMessage)
println(Utils.stackTrace(e))
} finally {
// It's good to do this after printing any error stack trace.
if (adminClient != null) {
adminClient.close()
}
}
// If the command failed, exit with a non-zero exit code.
if (failed) {
Exit.exit(1)
}
}
private def handleAction(adminClient: Admin,
opts: ReassignPartitionsCommandOptions): Unit = {
if (opts.options.has(opts.verifyOpt)) {
verifyAssignment(adminClient,
Utils.readFileAsString(opts.options.valueOf(opts.reassignmentJsonFileOpt)),
opts.options.has(opts.preserveThrottlesOpt))
} else if (opts.options.has(opts.generateOpt)) {
generateAssignment(adminClient,
Utils.readFileAsString(opts.options.valueOf(opts.topicsToMoveJsonFileOpt)),
opts.options.valueOf(opts.brokerListOpt),
!opts.options.has(opts.disableRackAware))
} else if (opts.options.has(opts.executeOpt)) {
executeAssignment(adminClient,
opts.options.has(opts.additionalOpt),
Utils.readFileAsString(opts.options.valueOf(opts.reassignmentJsonFileOpt)),
opts.options.valueOf(opts.interBrokerThrottleOpt),
opts.options.valueOf(opts.replicaAlterLogDirsThrottleOpt),
opts.options.valueOf(opts.timeoutOpt))
} else if (opts.options.has(opts.cancelOpt)) {
cancelAssignment(adminClient,
Utils.readFileAsString(opts.options.valueOf(opts.reassignmentJsonFileOpt)),
opts.options.has(opts.preserveThrottlesOpt),
opts.options.valueOf(opts.timeoutOpt))
} else if (opts.options.has(opts.listOpt)) {
listReassignments(adminClient)
} else {
throw new RuntimeException("Unsupported action.")
}
}
/**
* A result returned from verifyAssignment.
*
* @param partStates A map from partitions to reassignment states.
* @param partsOngoing True if there are any ongoing partition reassignments.
* @param moveStates A map from log directories to movement states.
* @param movesOngoing True if there are any ongoing moves that we know about.
*/
case class VerifyAssignmentResult(partStates: Map[TopicPartition, PartitionReassignmentState],
partsOngoing: Boolean = false,
moveStates: Map[TopicPartitionReplica, LogDirMoveState] = Map.empty,
movesOngoing: Boolean = false)
/**
* The entry point for the --verify command.
*
* @param adminClient The AdminClient to use.
* @param jsonString The JSON string to use for the topics and partitions to verify.
* @param preserveThrottles True if we should avoid changing topic or broker throttles.
*
* @return A result that is useful for testing.
*/
def verifyAssignment(adminClient: Admin, jsonString: String, preserveThrottles: Boolean)
: VerifyAssignmentResult = {
val (targetParts, targetLogDirs) = parsePartitionReassignmentData(jsonString)
val (partStates, partsOngoing) = verifyPartitionAssignments(adminClient, targetParts)
val (moveStates, movesOngoing) = verifyReplicaMoves(adminClient, targetLogDirs)
if (!partsOngoing && !movesOngoing && !preserveThrottles) {
// If the partition assignments and replica assignments are done, clear any throttles
// that were set. We have to clear all throttles, because we don't have enough
// information to know all of the source brokers that might have been involved in the
// previous reassignments.
clearAllThrottles(adminClient, targetParts)
}
VerifyAssignmentResult(partStates, partsOngoing, moveStates, movesOngoing)
}
/**
* Verify the partition reassignments specified by the user.
*
* @param adminClient The AdminClient to use.
* @param targets The partition reassignments specified by the user.
*
* @return A tuple of the partition reassignment states, and a
* boolean which is true if there are no ongoing
* reassignments (including reassignments not described
* in the JSON file.)
*/
def verifyPartitionAssignments(adminClient: Admin,
targets: Seq[(TopicPartition, Seq[Int])])
: (Map[TopicPartition, PartitionReassignmentState], Boolean) = {
val (partStates, partsOngoing) = findPartitionReassignmentStates(adminClient, targets)
println(partitionReassignmentStatesToString(partStates))
(partStates, partsOngoing)
}
def compareTopicPartitions(a: TopicPartition, b: TopicPartition): Boolean = {
(a.topic(), a.partition()) < (b.topic(), b.partition())
}
def compareTopicPartitionReplicas(a: TopicPartitionReplica, b: TopicPartitionReplica): Boolean = {
(a.brokerId(), a.topic(), a.partition()) < (b.brokerId(), b.topic(), b.partition())
}
/**
* Convert partition reassignment states to a human-readable string.
*
* @param states A map from topic partitions to states.
* @return A string summarizing the partition reassignment states.
*/
def partitionReassignmentStatesToString(states: Map[TopicPartition, PartitionReassignmentState])
: String = {
val bld = new mutable.ArrayBuffer[String]()
bld.append("Status of partition reassignment:")
states.keySet.toBuffer.sortWith(compareTopicPartitions).foreach { topicPartition =>
val state = states(topicPartition)
if (state.done) {
if (state.currentReplicas.equals(state.targetReplicas)) {
bld.append("Reassignment of partition %s is complete.".
format(topicPartition.toString))
} else {
bld.append(s"There is no active reassignment of partition ${topicPartition}, " +
s"but replica set is ${state.currentReplicas.mkString(",")} rather than " +
s"${state.targetReplicas.mkString(",")}.")
}
} else {
bld.append("Reassignment of partition %s is still in progress.".format(topicPartition))
}
}
bld.mkString(System.lineSeparator())
}
/**
* Find the state of the specified partition reassignments.
*
* @param adminClient The Admin client to use.
* @param targetReassignments The reassignments we want to learn about.
*
* @return A tuple containing the reassignment states for each topic
* partition, plus whether there are any ongoing reassignments.
*/
def findPartitionReassignmentStates(adminClient: Admin,
targetReassignments: Seq[(TopicPartition, Seq[Int])])
: (Map[TopicPartition, PartitionReassignmentState], Boolean) = {
val currentReassignments = adminClient.
listPartitionReassignments.reassignments.get().asScala
val (foundReassignments, notFoundReassignments) = targetReassignments.partition {
case (part, _) => currentReassignments.contains(part)
}
val foundResults = foundReassignments.map {
case (part, targetReplicas) => (part,
PartitionReassignmentState(
currentReassignments(part).replicas.
asScala.map(i => i.asInstanceOf[Int]),
targetReplicas,
false))
}
val topicNamesToLookUp = new mutable.HashSet[String]()
notFoundReassignments.foreach { case (part, _) =>
if (!currentReassignments.contains(part))
topicNamesToLookUp.add(part.topic)
}
val topicDescriptions = adminClient.
describeTopics(topicNamesToLookUp.asJava).values().asScala
val notFoundResults = notFoundReassignments.map {
case (part, targetReplicas) =>
currentReassignments.get(part) match {
case Some(reassignment) => (part,
PartitionReassignmentState(
reassignment.replicas.asScala.map(_.asInstanceOf[Int]),
targetReplicas,
false))
case None =>
(part, topicDescriptionFutureToState(part.partition,
topicDescriptions(part.topic), targetReplicas))
}
}
val allResults = foundResults ++ notFoundResults
(allResults.toMap, currentReassignments.nonEmpty)
}
private def topicDescriptionFutureToState(partition: Int,
future: KafkaFuture[TopicDescription],
targetReplicas: Seq[Int]): PartitionReassignmentState = {
try {
val topicDescription = future.get()
if (topicDescription.partitions().size() < partition) {
throw new ExecutionException("Too few partitions found", new UnknownTopicOrPartitionException())
}
PartitionReassignmentState(
topicDescription.partitions.get(partition).replicas.asScala.map(_.id),
targetReplicas,
true)
} catch {
case t: ExecutionException if t.getCause.isInstanceOf[UnknownTopicOrPartitionException] =>
PartitionReassignmentState(Seq(), targetReplicas, true)
}
}
/**
* Verify the replica reassignments specified by the user.
*
* @param adminClient The AdminClient to use.
* @param targetReassignments The replica reassignments specified by the user.
*
* @return A tuple of the replica states, and a boolean which is true
* if there are any ongoing replica moves.
*
* Note: Unlike in verifyPartitionAssignments, we will
* return false here even if there are unrelated ongoing
* reassignments. (We don't have an efficient API that
* returns all ongoing replica reassignments.)
*/
def verifyReplicaMoves(adminClient: Admin,
targetReassignments: Map[TopicPartitionReplica, String])
: (Map[TopicPartitionReplica, LogDirMoveState], Boolean) = {
val moveStates = findLogDirMoveStates(adminClient, targetReassignments)
println(replicaMoveStatesToString(moveStates))
(moveStates, !moveStates.values.forall(_.done))
}
/**
* Find the state of the specified partition reassignments.
*
* @param adminClient The AdminClient to use.
* @param targetMoves The movements we want to learn about. The map is keyed
* by TopicPartitionReplica, and its values are target log
* directories.
*
* @return The states for each replica movement.
*/
def findLogDirMoveStates(adminClient: Admin,
targetMoves: Map[TopicPartitionReplica, String])
: Map[TopicPartitionReplica, LogDirMoveState] = {
val replicaLogDirInfos = adminClient.describeReplicaLogDirs(
targetMoves.keySet.asJava).all().get().asScala
targetMoves.map { case (replica, targetLogDir) =>
val moveState = replicaLogDirInfos.get(replica) match {
case None => MissingReplicaMoveState(targetLogDir)
case Some(info) => if (info.getCurrentReplicaLogDir == null) {
MissingLogDirMoveState(targetLogDir)
} else if (info.getFutureReplicaLogDir == null) {
if (info.getCurrentReplicaLogDir.equals(targetLogDir)) {
CompletedMoveState(targetLogDir)
} else {
CancelledMoveState(info.getCurrentReplicaLogDir, targetLogDir)
}
} else {
ActiveMoveState(info.getCurrentReplicaLogDir(),
targetLogDir,
info.getFutureReplicaLogDir)
}
}
(replica, moveState)
}
}
/**
* Convert replica move states to a human-readable string.
*
* @param states A map from topic partition replicas to states.
* @return A tuple of a summary string, and a boolean describing
* whether there are any active replica moves.
*/
def replicaMoveStatesToString(states: Map[TopicPartitionReplica, LogDirMoveState])
: String = {
val bld = new mutable.ArrayBuffer[String]
states.keySet.toBuffer.sortWith(compareTopicPartitionReplicas).foreach { replica =>
val state = states(replica)
state match {
case MissingLogDirMoveState(_) =>
bld.append(s"Partition ${replica.topic}-${replica.partition} is not found " +
s"in any live log dir on broker ${replica.brokerId}. There is likely an " +
s"offline log directory on the broker.")
case MissingReplicaMoveState(_) =>
bld.append(s"Partition ${replica.topic}-${replica.partition} cannot be found " +
s"in any live log directory on broker ${replica.brokerId}.")
case ActiveMoveState(_, targetLogDir, futureLogDir) =>
if (targetLogDir.equals(futureLogDir)) {
bld.append(s"Reassignment of replica $replica is still in progress.")
} else {
bld.append(s"Partition ${replica.topic}-${replica.partition} on broker " +
s"${replica.brokerId} is being moved to log dir $futureLogDir " +
s"instead of $targetLogDir.")
}
case CancelledMoveState(currentLogDir, targetLogDir) =>
bld.append(s"Partition ${replica.topic}-${replica.partition} on broker " +
s"${replica.brokerId} is not being moved from log dir $currentLogDir to " +
s"$targetLogDir.")
case CompletedMoveState(_) =>
bld.append(s"Reassignment of replica $replica completed successfully.")
}
}
bld.mkString(System.lineSeparator())
}
/**
* Clear all topic-level and broker-level throttles.
*
* @param adminClient The AdminClient to use.
* @param targetParts The target partitions loaded from the JSON file.
*/
def clearAllThrottles(adminClient: Admin,
targetParts: Seq[(TopicPartition, Seq[Int])]): Unit = {
val activeBrokers = adminClient.describeCluster().nodes().get().asScala.map(_.id()).toSet
val brokers = activeBrokers ++ targetParts.flatMap(_._2).toSet
println("Clearing broker-level throttles on broker%s %s".format(
if (brokers.size == 1) "" else "s", brokers.mkString(",")))
clearBrokerLevelThrottles(adminClient, brokers)
val topics = targetParts.map(_._1.topic()).toSet
println("Clearing topic-level throttles on topic%s %s".format(
if (topics.size == 1) "" else "s", topics.mkString(",")))
clearTopicLevelThrottles(adminClient, topics)
}
/**
* Clear all throttles which have been set at the broker level.
*
* @param adminClient The AdminClient to use.
* @param brokers The brokers to clear the throttles for.
*/
def clearBrokerLevelThrottles(adminClient: Admin, brokers: Set[Int]): Unit = {
val configOps = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]()
brokers.foreach { brokerId =>
configOps.put(
new ConfigResource(ConfigResource.Type.BROKER, brokerId.toString),
brokerLevelThrottles.map(throttle => new AlterConfigOp(
new ConfigEntry(throttle, null), OpType.DELETE)).asJava)
}
adminClient.incrementalAlterConfigs(configOps).all().get()
}
/**
* Clear the reassignment throttles for the specified topics.
*
* @param adminClient The AdminClient to use.
* @param topics The topics to clear the throttles for.
*/
def clearTopicLevelThrottles(adminClient: Admin, topics: Set[String]): Unit = {
val configOps = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]()
topics.foreach {
topicName => configOps.put(
new ConfigResource(ConfigResource.Type.TOPIC, topicName),
topicLevelThrottles.map(throttle => new AlterConfigOp(new ConfigEntry(throttle, null),
OpType.DELETE)).asJava)
}
adminClient.incrementalAlterConfigs(configOps).all().get()
}
/**
* The entry point for the --generate command.
*
* @param adminClient The AdminClient to use.
* @param reassignmentJson The JSON string to use for the topics to reassign.
* @param brokerListString The comma-separated string of broker IDs to use.
* @param enableRackAwareness True if rack-awareness should be enabled.
*
* @return A tuple containing the proposed assignment and the
* current assignment.
*/
def generateAssignment(adminClient: Admin,
reassignmentJson: String,
brokerListString: String,
enableRackAwareness: Boolean)
: (Map[TopicPartition, Seq[Int]], Map[TopicPartition, Seq[Int]]) = {
val (brokersToReassign, topicsToReassign) =
parseGenerateAssignmentArgs(reassignmentJson, brokerListString)
val currentAssignments = getReplicaAssignmentForTopics(adminClient, topicsToReassign)
val brokerMetadatas = getBrokerMetadata(adminClient, brokersToReassign, enableRackAwareness)
val proposedAssignments = calculateAssignment(currentAssignments, brokerMetadatas)
println("Current partition replica assignment\n%s\n".
format(formatAsReassignmentJson(currentAssignments, Map.empty)))
println("Proposed partition reassignment configuration\n%s".
format(formatAsReassignmentJson(proposedAssignments, Map.empty)))
(proposedAssignments, currentAssignments)
}
/**
* Calculate the new partition assignments to suggest in --generate.
*
* @param currentAssignment The current partition assignments.
* @param brokerMetadatas The rack information for each broker.
*
* @return A map from partitions to the proposed assignments for each.
*/
def calculateAssignment(currentAssignment: Map[TopicPartition, Seq[Int]],
brokerMetadatas: Seq[BrokerMetadata])
: Map[TopicPartition, Seq[Int]] = {
val groupedByTopic = currentAssignment.groupBy { case (tp, _) => tp.topic }
val proposedAssignments = mutable.Map[TopicPartition, Seq[Int]]()
groupedByTopic.forKeyValue { (topic, assignment) =>
val (_, replicas) = assignment.head
val assignedReplicas = AdminUtils.
assignReplicasToBrokers(brokerMetadatas, assignment.size, replicas.size)
proposedAssignments ++= assignedReplicas.map { case (partition, replicas) =>
new TopicPartition(topic, partition) -> replicas
}
}
proposedAssignments
}
private def describeTopics(adminClient: Admin,
topics: Set[String])
: Map[String, TopicDescription] = {
adminClient.describeTopics(topics.asJava).values.asScala.map { case (topicName, topicDescriptionFuture) =>
try topicName -> topicDescriptionFuture.get
catch {
case t: ExecutionException if t.getCause.isInstanceOf[UnknownTopicOrPartitionException] =>
throw new ExecutionException(
new UnknownTopicOrPartitionException(s"Topic $topicName not found."))
}
}
}
/**
* Get the current replica assignments for some topics.
*
* @param adminClient The AdminClient to use.
* @param topics The topics to get information about.
* @return A map from partitions to broker assignments.
* If any topic can't be found, an exception will be thrown.
*/
def getReplicaAssignmentForTopics(adminClient: Admin,
topics: Seq[String])
: Map[TopicPartition, Seq[Int]] = {
describeTopics(adminClient, topics.toSet).flatMap {
case (topicName, topicDescription) => topicDescription.partitions.asScala.map { info =>
(new TopicPartition(topicName, info.partition), info.replicas.asScala.map(_.id))
}
}
}
/**
* Get the current replica assignments for some partitions.
*
* @param adminClient The AdminClient to use.
* @param partitions The partitions to get information about.
* @return A map from partitions to broker assignments.
* If any topic can't be found, an exception will be thrown.
*/
def getReplicaAssignmentForPartitions(adminClient: Admin,
partitions: Set[TopicPartition])
: Map[TopicPartition, Seq[Int]] = {
describeTopics(adminClient, partitions.map(_.topic)).flatMap {
case (topicName, topicDescription) => topicDescription.partitions.asScala.flatMap { info =>
val tp = new TopicPartition(topicName, info.partition)
if (partitions.contains(tp)) {
Some(tp, info.replicas.asScala.map(_.id))
} else {
None
}
}
}
}
/**
* Find the rack information for some brokers.
*
* @param adminClient The AdminClient object.
* @param brokers The brokers to gather metadata about.
* @param enableRackAwareness True if we should return rack information, and throw an
* exception if it is inconsistent.
*
* @return The metadata for each broker that was found.
* Brokers that were not found will be omitted.
*/
def getBrokerMetadata(adminClient: Admin,
brokers: Seq[Int],
enableRackAwareness: Boolean): Seq[BrokerMetadata] = {
val brokerSet = brokers.toSet
val results = adminClient.describeCluster().nodes.get().asScala.
filter(node => brokerSet.contains(node.id)).
map {
node => if (enableRackAwareness && node.rack != null) {
BrokerMetadata(node.id, Some(node.rack))
} else {
BrokerMetadata(node.id, None)
}
}.toSeq
val numRackless = results.count(_.rack.isEmpty)
if (enableRackAwareness && numRackless != 0 && numRackless != results.size) {
throw new AdminOperationException("Not all brokers have rack information. Add " +
"--disable-rack-aware in command line to make replica assignment without rack " +
"information.")
}
results
}
/**
* Parse and validate data gathered from the command-line for --generate
* In particular, we parse the JSON and validate that duplicate brokers and
* topics don't appear.
*
* @param reassignmentJson The JSON passed to --generate .
* @param brokerList A list of brokers passed to --generate.
*
* @return A tuple of brokers to reassign, topics to reassign
*/
def parseGenerateAssignmentArgs(reassignmentJson: String,
brokerList: String): (Seq[Int], Seq[String]) = {
val brokerListToReassign = brokerList.split(',').map(_.toInt)
val duplicateReassignments = CoreUtils.duplicates(brokerListToReassign)
if (duplicateReassignments.nonEmpty)
throw new AdminCommandFailedException("Broker list contains duplicate entries: %s".
format(duplicateReassignments.mkString(",")))
val topicsToReassign = parseTopicsData(reassignmentJson)
val duplicateTopicsToReassign = CoreUtils.duplicates(topicsToReassign)
if (duplicateTopicsToReassign.nonEmpty)
throw new AdminCommandFailedException("List of topics to reassign contains duplicate entries: %s".
format(duplicateTopicsToReassign.mkString(",")))
(brokerListToReassign, topicsToReassign)
}
/**
* The entry point for the --execute and --execute-additional commands.
*
* @param adminClient The AdminClient to use.
* @param additional Whether --additional was passed.
* @param reassignmentJson The JSON string to use for the topics to reassign.
* @param interBrokerThrottle The inter-broker throttle to use, or a negative
* number to skip using a throttle.
* @param logDirThrottle The replica log directory throttle to use, or a
* negative number to skip using a throttle.
* @param timeoutMs The maximum time in ms to wait for log directory
* replica assignment to begin.
* @param time The Time object to use.
*/
def executeAssignment(adminClient: Admin,
additional: Boolean,
reassignmentJson: String,
interBrokerThrottle: Long = -1L,
logDirThrottle: Long = -1L,
timeoutMs: Long = 10000L,
time: Time = Time.SYSTEM): Unit = {
val (proposedParts, proposedReplicas) = parseExecuteAssignmentArgs(reassignmentJson)
val currentReassignments = adminClient.
listPartitionReassignments().reassignments().get().asScala
// If there is an existing assignment, check for --additional before proceeding.
// This helps avoid surprising users.
if (!additional && currentReassignments.nonEmpty) {
throw new TerseReassignmentFailureException(cannotExecuteBecauseOfExistingMessage)
}
verifyBrokerIds(adminClient, proposedParts.values.flatten.toSet)
val currentParts = getReplicaAssignmentForPartitions(adminClient, proposedParts.keySet.toSet)
println(currentPartitionReplicaAssignmentToString(proposedParts, currentParts))
if (interBrokerThrottle >= 0 || logDirThrottle >= 0) {
println(youMustRunVerifyPeriodicallyMessage)
if (interBrokerThrottle >= 0) {
val moveMap = calculateProposedMoveMap(currentReassignments, proposedParts, currentParts)
modifyReassignmentThrottle(adminClient, moveMap, interBrokerThrottle)
}
if (logDirThrottle >= 0) {
val movingBrokers = calculateMovingBrokers(proposedReplicas.keySet.toSet)
modifyLogDirThrottle(adminClient, movingBrokers, logDirThrottle)
}
}
// Execute the partition reassignments.
val errors = alterPartitionReassignments(adminClient, proposedParts)
if (errors.nonEmpty) {
throw new TerseReassignmentFailureException(
"Error reassigning partition(s):%n%s".format(
errors.keySet.toBuffer.sortWith(compareTopicPartitions).map { part =>
s"$part: ${errors(part).getMessage}"
}.mkString(System.lineSeparator())))
}
println("Successfully started partition reassignment%s for %s".format(
if (proposedParts.size == 1) "" else "s",
proposedParts.keySet.toBuffer.sortWith(compareTopicPartitions).mkString(",")))
if (proposedReplicas.nonEmpty) {
executeMoves(adminClient, proposedReplicas, timeoutMs, time)
}
}
/**
* Execute some partition log directory movements.
*
* @param adminClient The AdminClient to use.
* @param proposedReplicas A map from TopicPartitionReplicas to the
* directories to move them to.
* @param timeoutMs The maximum time in ms to wait for log directory
* replica assignment to begin.
* @param time The Time object to use.
*/
def executeMoves(adminClient: Admin,
proposedReplicas: Map[TopicPartitionReplica, String],
timeoutMs: Long,
time: Time): Unit = {
val startTimeMs = time.milliseconds()
val pendingReplicas = new mutable.HashMap[TopicPartitionReplica, String]()
pendingReplicas ++= proposedReplicas
var done = false
do {
val completed = alterReplicaLogDirs(adminClient, pendingReplicas)
if (completed.nonEmpty) {
println("Successfully started log directory move%s for: %s".format(
if (completed.size == 1) "" else "s",
completed.toBuffer.sortWith(compareTopicPartitionReplicas).mkString(",")))
}
pendingReplicas --= completed
if (pendingReplicas.isEmpty) {
done = true
} else if (time.milliseconds() >= startTimeMs + timeoutMs) {
throw new TerseReassignmentFailureException(
"Timed out before log directory move%s could be started for: %s".format(
if (pendingReplicas.size == 1) "" else "s",
pendingReplicas.keySet.toBuffer.sortWith(compareTopicPartitionReplicas).
mkString(",")))
} else {
// If a replica has been moved to a new host and we also specified a particular
// log directory, we will have to keep retrying the alterReplicaLogDirs
// call. It can't take effect until the replica is moved to that host.
time.sleep(100)
}
} while (!done)
}
/**
* Entry point for the --list command.
*
* @param adminClient The AdminClient to use.
*/
def listReassignments(adminClient: Admin): Unit = {
println(curReassignmentsToString(adminClient))
}
/**
* Convert the current partition reassignments to text.
*
* @param adminClient The AdminClient to use.
* @return A string describing the current partition reassignments.
*/
def curReassignmentsToString(adminClient: Admin): String = {
val currentReassignments = adminClient.
listPartitionReassignments().reassignments().get().asScala
val text = currentReassignments.keySet.toBuffer.sortWith(compareTopicPartitions).map { part =>
val reassignment = currentReassignments(part)
val replicas = reassignment.replicas.asScala
val addingReplicas = reassignment.addingReplicas.asScala
val removingReplicas = reassignment.removingReplicas.asScala
"%s: replicas: %s.%s%s".format(part, replicas.mkString(","),
if (addingReplicas.isEmpty) "" else
" adding: %s.".format(addingReplicas.mkString(",")),
if (removingReplicas.isEmpty) "" else
" removing: %s.".format(removingReplicas.mkString(",")))
}.mkString(System.lineSeparator())
if (text.isEmpty) {
"No partition reassignments found."
} else {
"Current partition reassignments:%n%s".format(text)
}
}
/**
* Verify that all the brokers in an assignment exist.
*
* @param adminClient The AdminClient to use.
* @param brokers The broker IDs to verify.
*/
def verifyBrokerIds(adminClient: Admin, brokers: Set[Int]): Unit = {
val allNodeIds = adminClient.describeCluster().nodes().get().asScala.map(_.id).toSet
brokers.find(!allNodeIds.contains(_)).map {
id => throw new AdminCommandFailedException(s"Unknown broker id ${id}")
}
}
/**
* Return the string which we want to print to describe the current partition assignment.
*
* @param proposedParts The proposed partition assignment.
* @param currentParts The current partition assignment.
*
* @return The string to print. We will only print information about
* partitions that appear in the proposed partition assignment.
*/
def currentPartitionReplicaAssignmentToString(proposedParts: Map[TopicPartition, Seq[Int]],
currentParts: Map[TopicPartition, Seq[Int]]): String = {
"Current partition replica assignment%n%n%s%n%nSave this to use as the %s".
format(formatAsReassignmentJson(currentParts.filter { case (k, _) => proposedParts.contains(k) }.toMap, Map.empty),
"--reassignment-json-file option during rollback")
}
/**
* Execute the given partition reassignments.
*
* @param adminClient The admin client object to use.
* @param reassignments A map from topic names to target replica assignments.
* @return A map from partition objects to error strings.
*/
def alterPartitionReassignments(adminClient: Admin,
reassignments: Map[TopicPartition, Seq[Int]]): Map[TopicPartition, Throwable] = {
val results = adminClient.alterPartitionReassignments(reassignments.map { case (part, replicas) =>
(part, Optional.of(new NewPartitionReassignment(replicas.map(Integer.valueOf).asJava)))
}.asJava).values().asScala
results.flatMap {
case (part, future) => {
try {
future.get()
None
} catch {
case t: ExecutionException => Some(part, t.getCause())
}
}
}
}
/**
* Cancel the given partition reassignments.
*
* @param adminClient The admin client object to use.
* @param reassignments The partition reassignments to cancel.
* @return A map from partition objects to error strings.
*/
def cancelPartitionReassignments(adminClient: Admin,
reassignments: Set[TopicPartition])
: Map[TopicPartition, Throwable] = {
val results = adminClient.alterPartitionReassignments(reassignments.map {
(_, Optional.empty[NewPartitionReassignment]())
}.toMap.asJava).values().asScala
results.flatMap { case (part, future) =>
try {
future.get()
None
} catch {
case t: ExecutionException => Some(part, t.getCause())
}
}
}
/**
* Compute the in progress partition move from the current reassignments.
* @param currentReassignments All replicas, adding replicas and removing replicas of target partitions
*/
private def calculateCurrentMoveMap(currentReassignments: Map[TopicPartition, PartitionReassignment]): MoveMap = {
val moveMap = new mutable.HashMap[String, mutable.Map[Int, PartitionMove]]()
// Add the current reassignments to the move map.
currentReassignments.forKeyValue { (part, reassignment) =>
val allReplicas = reassignment.replicas().asScala.map(Int.unbox)
val addingReplicas = reassignment.addingReplicas.asScala.map(Int.unbox)
// The addingReplicas is included in the replicas during reassignment
val sources = mutable.Set[Int]() ++ allReplicas.diff(addingReplicas)
val destinations = mutable.Set[Int]() ++ addingReplicas
val partMoves = moveMap.getOrElseUpdate(part.topic, new mutable.HashMap[Int, PartitionMove])
partMoves.put(part.partition, PartitionMove(sources, destinations))
}
moveMap
}
/**
* Calculate the global map of all partitions that are moving.
*
* @param currentReassignments The currently active reassignments.
* @param proposedParts The proposed location of the partitions (destinations replicas only).
* @param currentParts The current location of the partitions that we are
* proposing to move.
* @return A map from topic name to partition map.
* The partition map is keyed on partition index and contains
* the movements for that partition.
*/
def calculateProposedMoveMap(currentReassignments: Map[TopicPartition, PartitionReassignment],
proposedParts: Map[TopicPartition, Seq[Int]],
currentParts: Map[TopicPartition, Seq[Int]]): MoveMap = {
val moveMap = calculateCurrentMoveMap(currentReassignments)
proposedParts.forKeyValue { (part, replicas) =>
val partMoves = moveMap.getOrElseUpdate(part.topic, new mutable.HashMap[Int, PartitionMove])
// If there is a reassignment in progress, use the sources from moveMap, otherwise
// use the sources from currentParts
val sources = mutable.Set[Int]() ++ (partMoves.get(part.partition) match {
case Some(move) => move.sources.toSeq
case None => currentParts.getOrElse(part,
throw new RuntimeException(s"Trying to reassign a topic partition $part with 0 replicas"))
})
val destinations = mutable.Set[Int]() ++ replicas.diff(sources.toSeq)
partMoves.put(part.partition,
PartitionMove(sources, destinations))
}
moveMap
}
/**
* Calculate the leader throttle configurations to use.
*
* @param moveMap The movements.
* @return A map from topic names to leader throttle configurations.
*/
def calculateLeaderThrottles(moveMap: MoveMap): Map[String, String] = {
moveMap.map {
case (topicName, partMoveMap) => {
val components = new mutable.TreeSet[String]
partMoveMap.forKeyValue { (partId, move) =>
move.sources.foreach(source => components.add("%d:%d".format(partId, source)))
}
(topicName, components.mkString(","))
}
}
}
/**
* Calculate the follower throttle configurations to use.
*
* @param moveMap The movements.
* @return A map from topic names to follower throttle configurations.
*/
def calculateFollowerThrottles(moveMap: MoveMap): Map[String, String] = {
moveMap.map {
case (topicName, partMoveMap) => {
val components = new mutable.TreeSet[String]
partMoveMap.forKeyValue { (partId, move) =>
move.destinations.foreach(destination =>
if (!move.sources.contains(destination)) {
components.add("%d:%d".format(partId, destination))
})
}
(topicName, components.mkString(","))
}
}
}
/**
* Calculate all the brokers which are involved in the given partition reassignments.
*
* @param moveMap The partition movements.
* @return A set of all the brokers involved.
*/
def calculateReassigningBrokers(moveMap: MoveMap): Set[Int] = {
val reassigningBrokers = new mutable.TreeSet[Int]
moveMap.values.foreach {
_.values.foreach {
partMove =>
partMove.sources.foreach(reassigningBrokers.add)
partMove.destinations.foreach(reassigningBrokers.add)
}
}
reassigningBrokers.toSet
}
/**
* Calculate all the brokers which are involved in the given directory movements.
*
* @param replicaMoves The replica movements.
* @return A set of all the brokers involved.
*/
def calculateMovingBrokers(replicaMoves: Set[TopicPartitionReplica]): Set[Int] = {
replicaMoves.map(_.brokerId())
}
/**
* Modify the topic configurations that control inter-broker throttling.
*
* @param adminClient The adminClient object to use.
* @param leaderThrottles A map from topic names to leader throttle configurations.
* @param followerThrottles A map from topic names to follower throttle configurations.
*/
def modifyTopicThrottles(adminClient: Admin,
leaderThrottles: Map[String, String],
followerThrottles: Map[String, String]): Unit = {
val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]()
val topicNames = leaderThrottles.keySet ++ followerThrottles.keySet
topicNames.foreach { topicName =>
val ops = new util.ArrayList[AlterConfigOp]
leaderThrottles.get(topicName).foreach { value =>
ops.add(new AlterConfigOp(new ConfigEntry(topicLevelLeaderThrottle, value), OpType.SET))
}
followerThrottles.get(topicName).foreach { value =>
ops.add(new AlterConfigOp(new ConfigEntry(topicLevelFollowerThrottle, value), OpType.SET))
}
if (!ops.isEmpty) {
configs.put(new ConfigResource(ConfigResource.Type.TOPIC, topicName), ops)
}
}
adminClient.incrementalAlterConfigs(configs).all().get()
}
private def modifyReassignmentThrottle(admin: Admin, moveMap: MoveMap, interBrokerThrottle: Long): Unit = {
val leaderThrottles = calculateLeaderThrottles(moveMap)
val followerThrottles = calculateFollowerThrottles(moveMap)
modifyTopicThrottles(admin, leaderThrottles, followerThrottles)
val reassigningBrokers = calculateReassigningBrokers(moveMap)
modifyInterBrokerThrottle(admin, reassigningBrokers, interBrokerThrottle)
}
/**
* Modify the leader/follower replication throttles for a set of brokers.
*
* @param adminClient The Admin instance to use
* @param reassigningBrokers The set of brokers involved in the reassignment
* @param interBrokerThrottle The new throttle (ignored if less than 0)
*/
def modifyInterBrokerThrottle(adminClient: Admin,
reassigningBrokers: Set[Int],
interBrokerThrottle: Long): Unit = {
if (interBrokerThrottle >= 0) {
val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]()
reassigningBrokers.foreach { brokerId =>
val ops = new util.ArrayList[AlterConfigOp]
ops.add(new AlterConfigOp(new ConfigEntry(brokerLevelLeaderThrottle,
interBrokerThrottle.toString), OpType.SET))
ops.add(new AlterConfigOp(new ConfigEntry(brokerLevelFollowerThrottle,
interBrokerThrottle.toString), OpType.SET))
configs.put(new ConfigResource(ConfigResource.Type.BROKER, brokerId.toString), ops)
}
adminClient.incrementalAlterConfigs(configs).all().get()
println(s"The inter-broker throttle limit was set to $interBrokerThrottle B/s")
}
}
/**
* Modify the log dir reassignment throttle for a set of brokers.
*
* @param admin The Admin instance to use
* @param movingBrokers The set of broker to alter the throttle of
* @param logDirThrottle The new throttle (ignored if less than 0)
*/
def modifyLogDirThrottle(admin: Admin,
movingBrokers: Set[Int],
logDirThrottle: Long): Unit = {
if (logDirThrottle >= 0) {
val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]()
movingBrokers.foreach { brokerId =>
val ops = new util.ArrayList[AlterConfigOp]
ops.add(new AlterConfigOp(new ConfigEntry(brokerLevelLogDirThrottle, logDirThrottle.toString), OpType.SET))
configs.put(new ConfigResource(ConfigResource.Type.BROKER, brokerId.toString), ops)
}
admin.incrementalAlterConfigs(configs).all().get()
println(s"The replica-alter-dir throttle limit was set to $logDirThrottle B/s")
}
}
/**
* Parse the reassignment JSON string passed to the --execute command.
*
* @param reassignmentJson The JSON string.
* @return A tuple of the partitions to be reassigned and the replicas
* to be reassigned.
*/
def parseExecuteAssignmentArgs(reassignmentJson: String)
: (Map[TopicPartition, Seq[Int]], Map[TopicPartitionReplica, String]) = {
val (partitionsToBeReassigned, replicaAssignment) = parsePartitionReassignmentData(reassignmentJson)
if (partitionsToBeReassigned.isEmpty)
throw new AdminCommandFailedException("Partition reassignment list cannot be empty")
if (partitionsToBeReassigned.exists(_._2.isEmpty)) {
throw new AdminCommandFailedException("Partition replica list cannot be empty")
}
val duplicateReassignedPartitions = CoreUtils.duplicates(partitionsToBeReassigned.map { case (tp, _) => tp })
if (duplicateReassignedPartitions.nonEmpty)
throw new AdminCommandFailedException("Partition reassignment contains duplicate topic partitions: %s".format(duplicateReassignedPartitions.mkString(",")))
val duplicateEntries = partitionsToBeReassigned
.map { case (tp, replicas) => (tp, CoreUtils.duplicates(replicas))}
.filter { case (_, duplicatedReplicas) => duplicatedReplicas.nonEmpty }
if (duplicateEntries.nonEmpty) {
val duplicatesMsg = duplicateEntries
.map { case (tp, duplicateReplicas) => "%s contains multiple entries for %s".format(tp, duplicateReplicas.mkString(",")) }
.mkString(". ")
throw new AdminCommandFailedException("Partition replica lists may not contain duplicate entries: %s".format(duplicatesMsg))
}
(partitionsToBeReassigned.toMap, replicaAssignment)
}
/**
* The entry point for the --cancel command.
*
* @param adminClient The AdminClient to use.
* @param jsonString The JSON string to use for the topics and partitions to cancel.
* @param preserveThrottles True if we should avoid changing topic or broker throttles.
* @param timeoutMs The maximum time in ms to wait for log directory
* replica assignment to begin.
* @param time The Time object to use.
*
* @return A tuple of the partition reassignments that were cancelled,
* and the replica movements that were cancelled.
*/
def cancelAssignment(adminClient: Admin,
jsonString: String,
preserveThrottles: Boolean,
timeoutMs: Long = 10000L,
time: Time = Time.SYSTEM)
: (Set[TopicPartition], Set[TopicPartitionReplica]) = {
val (targetParts, targetReplicas) = parsePartitionReassignmentData(jsonString)
val targetPartsSet = targetParts.map(_._1).toSet
val curReassigningParts = adminClient.listPartitionReassignments(targetPartsSet.asJava).
reassignments().get().asScala.flatMap {
case (part, reassignment) => if (!reassignment.addingReplicas().isEmpty ||
!reassignment.removingReplicas().isEmpty) {
Some(part)
} else {
None
}
}.toSet
if (curReassigningParts.nonEmpty) {
val errors = cancelPartitionReassignments(adminClient, curReassigningParts)
if (errors.nonEmpty) {
throw new TerseReassignmentFailureException(
"Error cancelling partition reassignment%s for:%n%s".format(
if (errors.size == 1) "" else "s",
errors.keySet.toBuffer.sortWith(compareTopicPartitions).map {
part => s"${part}: ${errors(part).getMessage}"
}.mkString(System.lineSeparator())))
}
println("Successfully cancelled partition reassignment%s for: %s".format(
if (curReassigningParts.size == 1) "" else "s",
s"${curReassigningParts.toBuffer.sortWith(compareTopicPartitions).mkString(",")}"))
} else {
println("None of the specified partition reassignments are active.")
}
val curMovingParts = findLogDirMoveStates(adminClient, targetReplicas).flatMap {
case (part, moveState) => moveState match {
case state: ActiveMoveState => Some(part, state.currentLogDir)
case _ => None
}
}.toMap
if (curMovingParts.isEmpty) {
println("None of the specified partition moves are active.")
} else {
executeMoves(adminClient, curMovingParts, timeoutMs, time)
}
if (!preserveThrottles) {
clearAllThrottles(adminClient, targetParts)
}
(curReassigningParts, curMovingParts.keySet)
}
def formatAsReassignmentJson(partitionsToBeReassigned: Map[TopicPartition, Seq[Int]],
replicaLogDirAssignment: Map[TopicPartitionReplica, String]): String = {
Json.encodeAsString(Map(
"version" -> 1,
"partitions" -> partitionsToBeReassigned.keySet.toBuffer.sortWith(compareTopicPartitions).map {
tp =>
val replicas = partitionsToBeReassigned(tp)
Map(
"topic" -> tp.topic,
"partition" -> tp.partition,
"replicas" -> replicas.asJava,
"log_dirs" -> replicas.map(r => replicaLogDirAssignment.getOrElse(new TopicPartitionReplica(tp.topic, tp.partition, r), AnyLogDir)).asJava
).asJava
}.asJava
).asJava)
}
def parseTopicsData(jsonData: String): Seq[String] = {
Json.parseFull(jsonData) match {
case Some(js) =>
val version = js.asJsonObject.get("version") match {
case Some(jsonValue) => jsonValue.to[Int]
case None => EarliestTopicsJsonVersion
}
parseTopicsData(version, js)
case None => throw new AdminOperationException("The input string is not a valid JSON")
}
}
def parseTopicsData(version: Int, js: JsonValue): Seq[String] = {
version match {
case 1 =>
for {
partitionsSeq <- js.asJsonObject.get("topics").toSeq
p <- partitionsSeq.asJsonArray.iterator
} yield p.asJsonObject("topic").to[String]
case _ => throw new AdminOperationException(s"Not supported version field value $version")
}
}
def parsePartitionReassignmentData(jsonData: String): (Seq[(TopicPartition, Seq[Int])], Map[TopicPartitionReplica, String]) = {
Json.tryParseFull(jsonData) match {
case Right(js) =>
val version = js.asJsonObject.get("version") match {
case Some(jsonValue) => jsonValue.to[Int]
case None => EarliestVersion
}
parsePartitionReassignmentData(version, js)
case Left(f) =>
throw new AdminOperationException(f)
}
}
// Parses without deduplicating keys so the data can be checked before allowing reassignment to proceed
def parsePartitionReassignmentData(version:Int, jsonData: JsonValue): (Seq[(TopicPartition, Seq[Int])], Map[TopicPartitionReplica, String]) = {
version match {
case 1 =>
val partitionAssignment = mutable.ListBuffer.empty[(TopicPartition, Seq[Int])]
val replicaAssignment = mutable.Map.empty[TopicPartitionReplica, String]
for {
partitionsSeq <- jsonData.asJsonObject.get("partitions").toSeq
p <- partitionsSeq.asJsonArray.iterator
} {
val partitionFields = p.asJsonObject
val topic = partitionFields("topic").to[String]
val partition = partitionFields("partition").to[Int]
val newReplicas = partitionFields("replicas").to[Seq[Int]]
val newLogDirs = partitionFields.get("log_dirs") match {
case Some(jsonValue) => jsonValue.to[Seq[String]]
case None => newReplicas.map(_ => AnyLogDir)
}
if (newReplicas.size != newLogDirs.size)
throw new AdminCommandFailedException(s"Size of replicas list $newReplicas is different from " +
s"size of log dirs list $newLogDirs for partition ${new TopicPartition(topic, partition)}")
partitionAssignment += (new TopicPartition(topic, partition) -> newReplicas)
replicaAssignment ++= newReplicas.zip(newLogDirs).map { case (replica, logDir) =>
new TopicPartitionReplica(topic, partition, replica) -> logDir
}.filter(_._2 != AnyLogDir)
}
(partitionAssignment, replicaAssignment)
case _ => throw new AdminOperationException(s"Not supported version field value $version")
}
}
def validateAndParseArgs(args: Array[String]): ReassignPartitionsCommandOptions = {
val opts = new ReassignPartitionsCommandOptions(args)
CommandLineUtils.printHelpAndExitIfNeeded(opts, helpText)
// Determine which action we should perform.
val validActions = Seq(opts.generateOpt, opts.executeOpt, opts.verifyOpt,
opts.cancelOpt, opts.listOpt)
val allActions = validActions.filter(opts.options.has _)
if (allActions.size != 1) {
CommandLineUtils.printUsageAndDie(opts.parser, "Command must include exactly one action: %s".format(
validActions.map("--" + _.options().get(0)).mkString(", ")))
}
val action = allActions(0)
if (!opts.options.has(opts.bootstrapServerOpt))
CommandLineUtils.printUsageAndDie(opts.parser, "Please specify --bootstrap-server")
// Make sure that we have all the required arguments for our action.
val requiredArgs = Map(
opts.verifyOpt -> collection.immutable.Seq(
opts.reassignmentJsonFileOpt
),
opts.generateOpt -> collection.immutable.Seq(
opts.topicsToMoveJsonFileOpt,
opts.brokerListOpt
),
opts.executeOpt -> collection.immutable.Seq(
opts.reassignmentJsonFileOpt
),
opts.cancelOpt -> collection.immutable.Seq(
opts.reassignmentJsonFileOpt
),
opts.listOpt -> collection.immutable.Seq.empty
)
CommandLineUtils.checkRequiredArgs(opts.parser, opts.options, requiredArgs(action): _*)
// Make sure that we didn't specify any arguments that are incompatible with our chosen action.
val permittedArgs = Map(
opts.verifyOpt -> Seq(
opts.bootstrapServerOpt,
opts.commandConfigOpt,
opts.preserveThrottlesOpt,
),
opts.generateOpt -> Seq(
opts.bootstrapServerOpt,
opts.brokerListOpt,
opts.commandConfigOpt,
opts.disableRackAware,
),
opts.executeOpt -> Seq(
opts.additionalOpt,
opts.bootstrapServerOpt,
opts.commandConfigOpt,
opts.interBrokerThrottleOpt,
opts.replicaAlterLogDirsThrottleOpt,
opts.timeoutOpt,
),
opts.cancelOpt -> Seq(
opts.bootstrapServerOpt,
opts.commandConfigOpt,
opts.preserveThrottlesOpt,
opts.timeoutOpt
),
opts.listOpt -> Seq(
opts.bootstrapServerOpt,
opts.commandConfigOpt
)
)
opts.options.specs.forEach(opt => {
if (!opt.equals(action) &&
!requiredArgs(action).contains(opt) &&
!permittedArgs(action).contains(opt)) {
CommandLineUtils.printUsageAndDie(opts.parser,
"""Option "%s" can't be used with action "%s"""".format(opt, action))
}
})
opts
}
def alterReplicaLogDirs(adminClient: Admin,
assignment: Map[TopicPartitionReplica, String])
: Set[TopicPartitionReplica] = {
adminClient.alterReplicaLogDirs(assignment.asJava).values().asScala.flatMap {
case (replica, future) => {
try {
future.get()
Some(replica)
} catch {
case t: ExecutionException =>
t.getCause match {
// Ignore ReplicaNotAvailableException. It is OK if the replica is not
// available at this moment.
case _: ReplicaNotAvailableException => None
case e: Throwable =>
throw new AdminCommandFailedException(s"Failed to alter dir for $replica", e)
}
}
}
}.toSet
}
sealed class ReassignPartitionsCommandOptions(args: Array[String]) extends CommandDefaultOptions(args) {
// Actions
val verifyOpt = parser.accepts("verify", "Verify if the reassignment completed as specified by the " +
"--reassignment-json-file option. If there is a throttle engaged for the replicas specified, and the rebalance has completed, the throttle will be removed")
val generateOpt = parser.accepts("generate", "Generate a candidate partition reassignment configuration." +
" Note that this only generates a candidate assignment, it does not execute it.")
val executeOpt = parser.accepts("execute", "Kick off the reassignment as specified by the --reassignment-json-file option.")
val cancelOpt = parser.accepts("cancel", "Cancel an active reassignment.")
val listOpt = parser.accepts("list", "List all active partition reassignments.")
// Arguments
val bootstrapServerOpt = parser.accepts("bootstrap-server", "REQUIRED: the server(s) to use for bootstrapping.")
.withRequiredArg
.describedAs("Server(s) to use for bootstrapping")
.ofType(classOf[String])
val commandConfigOpt = parser.accepts("command-config", "Property file containing configs to be passed to Admin Client.")
.withRequiredArg
.describedAs("Admin client property file")
.ofType(classOf[String])
val reassignmentJsonFileOpt = parser.accepts("reassignment-json-file", "The JSON file with the partition reassignment configuration" +
"The format to use is - \n" +
"{\"partitions\":\n\t[{\"topic\": \"foo\",\n\t \"partition\": 1,\n\t \"replicas\": [1,2,3],\n\t \"log_dirs\": [\"dir1\",\"dir2\",\"dir3\"] }],\n\"version\":1\n}\n" +
"Note that \"log_dirs\" is optional. When it is specified, its length must equal the length of the replicas list. The value in this list " +
"can be either \"any\" or the absolution path of the log directory on the broker. If absolute log directory path is specified, the replica will be moved to the specified log directory on the broker.")
.withRequiredArg
.describedAs("manual assignment json file path")
.ofType(classOf[String])
val topicsToMoveJsonFileOpt = parser.accepts("topics-to-move-json-file", "Generate a reassignment configuration to move the partitions" +
" of the specified topics to the list of brokers specified by the --broker-list option. The format to use is - \n" +
"{\"topics\":\n\t[{\"topic\": \"foo\"},{\"topic\": \"foo1\"}],\n\"version\":1\n}")
.withRequiredArg
.describedAs("topics to reassign json file path")
.ofType(classOf[String])
val brokerListOpt = parser.accepts("broker-list", "The list of brokers to which the partitions need to be reassigned" +
" in the form \"0,1,2\". This is required if --topics-to-move-json-file is used to generate reassignment configuration")
.withRequiredArg
.describedAs("brokerlist")
.ofType(classOf[String])
val disableRackAware = parser.accepts("disable-rack-aware", "Disable rack aware replica assignment")
val interBrokerThrottleOpt = parser.accepts("throttle", "The movement of partitions between brokers will be throttled to this value (bytes/sec). " +
"This option can be included with --execute when a reassignment is started, and it can be altered by resubmitting the current reassignment " +
"along with the --additional flag. The throttle rate should be at least 1 KB/s.")
.withRequiredArg()
.describedAs("throttle")
.ofType(classOf[Long])
.defaultsTo(-1)
val replicaAlterLogDirsThrottleOpt = parser.accepts("replica-alter-log-dirs-throttle",
"The movement of replicas between log directories on the same broker will be throttled to this value (bytes/sec). " +
"This option can be included with --execute when a reassignment is started, and it can be altered by resubmitting the current reassignment " +
"along with the --additional flag. The throttle rate should be at least 1 KB/s.")
.withRequiredArg()
.describedAs("replicaAlterLogDirsThrottle")
.ofType(classOf[Long])
.defaultsTo(-1)
val timeoutOpt = parser.accepts("timeout", "The maximum time in ms to wait for log directory replica assignment to begin.")
.withRequiredArg()
.describedAs("timeout")
.ofType(classOf[Long])
.defaultsTo(10000)
val additionalOpt = parser.accepts("additional", "Execute this reassignment in addition to any " +
"other ongoing ones. This option can also be used to change the throttle of an ongoing reassignment.")
val preserveThrottlesOpt = parser.accepts("preserve-throttles", "Do not modify broker or topic throttles.")
options = parser.parse(args : _*)
}
}
|
lindong28/kafka
|
core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
|
Scala
|
apache-2.0
| 67,974
|
package de.htwg.zeta.common.format.project.gdsl.shape.geoModel
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Position
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Rectangle
import de.htwg.zeta.common.models.project.gdsl.shape.geomodel.Size
import de.htwg.zeta.common.models.project.gdsl.style.Style
import play.api.libs.json.JsSuccess
import play.api.libs.json.Json
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
//noinspection ScalaStyle
class RectangleFormatTest extends AnyFreeSpec with Matchers {
"A RectangleFormat should" - {
"write an object" in {
val result = RectangleFormat(GeoModelFormat.geoModelFormatProvider)
.writes(Rectangle(
size = Size.default,
position = Position.default,
childGeoModels = List(),
style = Style.defaultStyle))
result.toString() shouldBe
"""{"type":"rectangle","size":{"width":1,"height":1},"position":{"x":0,"y":0},"childGeoElements":[],"style":{"name":"default","description":"default","background":{"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"}},"font":{"name":"Arial","bold":false,"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"},"italic":false,"size":10},"line":{"color":{"r":0,"g":0,"b":0,"a":1,"rgb":"rgb(0,0,0)","rgba":"rgba(0,0,0,1.0)","hex":"#000000"},"style":"solid","width":1},"transparency":1}}"""
}
"read an object" in {
val result = RectangleFormat(GeoModelFormat.geoModelFormatProvider)
.reads(Json.parse(
"""{"type":"rectangle",
|"size":{"width":1,"height":1},
|"position":{"x":0,"y":0},
|"childGeoElements":[],
|"style":{
| "name":"default",
| "description":"default",
| "background":{"color":"rgba(0,0,0,1.0)"},
| "font":{"name":"Arial","bold":false,"color":"rgba(0,0,0,1.0)","italic":false,"size":10},
| "line":{"color":"rgba(0,0,0,1.0)","style":"solid","width":1},
| "transparency":1}
|}""".stripMargin
))
result shouldBe JsSuccess(Rectangle(
size = Size.default,
position = Position.default,
childGeoModels = List(),
style = Style.defaultStyle))
}
"fail in reading an invalid input" in {
val result = RectangleFormat(GeoModelFormat.geoModelFormatProvider)
.reads(Json.parse(
"""{"invalid":{"r":23}}"""
))
result.isSuccess shouldBe false
}
}
}
|
Zeta-Project/zeta
|
api/common/src/test/scala/de/htwg/zeta/common/format/project/gdsl/shape/geoModel/RectangleFormatTest.scala
|
Scala
|
bsd-2-clause
| 2,602
|
package jmh
import org.openjdk.jmh.annotations._
import scala.offheap._
@State(Scope.Thread)
class PoolContention {
implicit val props = Region.Props(Pool())
@Benchmark
def contention = {
val r = Region.open
r.close
}
}
|
adamwy/scala-offheap
|
jmh/src/main/scala/Pool.scala
|
Scala
|
bsd-3-clause
| 239
|
package io.findify.s3mock.response
/**
* Created by shutty on 3/13/17.
*/
case class DeleteObjectsResponse(deleted: Seq[String], error: Seq[String]) {
def toXML = {
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{ deleted.map(d => <Deleted><Key>{d}</Key></Deleted>) }
{ if (error.nonEmpty) {
<Error>
{ error.map(e => {
<Key>{e}</Key>
<Code>InternalError</Code>
<Message>Cannot delete</Message>
})}
</Error>
}}
</DeleteResult>
}
}
|
findify/s3mock
|
src/main/scala/io/findify/s3mock/response/DeleteObjectsResponse.scala
|
Scala
|
mit
| 537
|
package com.sksamuel.elastic4s.searches.aggs
case class NestedAggregationDefinition(name: String,
path: String,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends AggregationDefinition {
type T = NestedAggregationDefinition
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = metadata)
}
|
aroundus-inc/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/aggs/NestedAggregationDefinition.scala
|
Scala
|
apache-2.0
| 573
|
package org.aprsdroid.app
import _root_.android.content.res.Configuration
import _root_.android.view.{Menu, MenuItem}
class StationHelper(title_id : Int) extends LoadingListActivity {
lazy val targetcall = getIntent().getDataString()
override def onResume() = {
super.onResume()
setLongTitle(title_id, targetcall)
}
override def onConfigurationChanged(c : Configuration) = {
super.onConfigurationChanged(c)
setLongTitle(title_id, targetcall)
}
override def onCreateOptionsMenu(menu : Menu) : Boolean = {
getMenuInflater().inflate(R.menu.context_call, menu);
true
}
override def onOptionsItemSelected(mi : MenuItem) : Boolean = {
callsignAction(mi.getItemId, targetcall)
}
}
|
ge0rg/aprsdroid
|
src/StationHelper.scala
|
Scala
|
gpl-2.0
| 704
|
package io.github.hamsters
import scala.annotation.implicitNotFound
import scala.concurrent.{ExecutionContext, Future}
@implicitNotFound("""Cannot create monad instance for type ${Box}.
If you are combining Future with another monad you might pass
an (implicit ec: ExecutionContext) parameter to your method
or import scala.concurrent.ExecutionContext.Implicits.global""")
trait Monad[Box[_]] extends Functor[Box] {
def pure[A](a: A): Box[A]
def flatMap[A, B](boxA: Box[A])(f: A => Box[B]): Box[B]
}
object Monad {
implicit val optionMonad = new Monad[Option] {
override def pure[A](x: A): Option[A] = Option(x)
override def flatMap[A, B](boxA: Option[A])(f: A => Option[B]) = boxA.flatMap(f)
override def map[A, B](boxA: Option[A])(f: A => B) = boxA.map(f)
}
implicit def futureMonad(implicit ec: ExecutionContext) = new Monad[Future] {
override def pure[A](x: A): Future[A] = Future.successful(x)
override def flatMap[A, B](boxA: Future[A])(f: A => Future[B]) = {
boxA.flatMap(f)
}
override def map[A, B](boxA: Future[A])(f: A => B) = {
boxA.map(f)
}
}
}
|
dgouyette/hamsters
|
shared/src/main/scala/io/github/hamsters/Monad.scala
|
Scala
|
apache-2.0
| 1,172
|
package sangria.util
import scala.collection.concurrent.TrieMap
class TrieMapCache[Key, Value] extends Cache[Key, Value] {
private val cache = TrieMap[Key, Value]()
def size = cache.size
def contains(key: Key) = cache.contains(key)
def apply(key: Key) = cache(key)
def get(key: Key) = cache.get(key)
def getOrElse(key: Key, default: => Value) = cache.getOrElse(key, default)
def update(key: Key, value: Value) = cache.update(key, value)
def remove(key: Key) = cache.remove(key)
def clear() = cache.clear()
def getOrElseUpdate(key: Key, fn: => Value) = cache.getOrElseUpdate(key, fn)
def find(fn: (Key, Value) => Boolean) = cache.find { case (key, value) => fn(key, value) }
def mapToSet[R](fn: (Key, Value) => R) = cache.map { case (key, value) => fn(key, value) }.toSet
def mapValues[R](fn: Value => R) = cache.map { case (k, v) => (k, fn(v)) }.toMap
def keyExists(fn: Key => Boolean) = cache.keySet.exists(fn)
def forEachValue(fn: Value => Unit) = cache.values.foreach(fn)
def removeKeys(fn: Key => Boolean) =
cache.keys.toVector.foreach(key => if (fn(key)) cache.remove(key))
def canEqual(other: Any): Boolean = other.isInstanceOf[TrieMapCache[_, _]]
override def equals(other: Any): Boolean = other match {
case that: TrieMapCache[_, _] => (that.canEqual(this)) && cache == that.cache
case _ => false
}
override def hashCode(): Int =
31 * cache.hashCode()
}
|
OlegIlyenko/sangria
|
modules/core/src/main/scala/sangria/util/TrieMapCache.scala
|
Scala
|
apache-2.0
| 1,426
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.util
import scala.reflect._
import org.apache.spark.annotation.Private
import org.apache.spark.sql.types.{DataType, DoubleType, FloatType}
import org.apache.spark.util.collection.OpenHashSet
// A wrap of OpenHashSet that can handle null, Double.NaN and Float.NaN w.r.t. the SQL semantic.
@Private
class SQLOpenHashSet[@specialized(Long, Int, Double, Float) T: ClassTag](
initialCapacity: Int,
loadFactor: Double) {
def this(initialCapacity: Int) = this(initialCapacity, 0.7)
def this() = this(64)
private val hashSet = new OpenHashSet[T](initialCapacity, loadFactor)
private var containNull = false
private var containNaN = false
def addNull(): Unit = {
containNull = true
}
def addNaN(): Unit = {
containNaN = true
}
def add(k: T): Unit = {
hashSet.add(k)
}
def contains(k: T): Boolean = {
hashSet.contains(k)
}
def containsNull(): Boolean = containNull
def containsNaN(): Boolean = containNaN
}
object SQLOpenHashSet {
def isNaN(dataType: DataType): Any => Boolean = {
dataType match {
case DoubleType =>
(value: Any) => java.lang.Double.isNaN(value.asInstanceOf[java.lang.Double])
case FloatType =>
(value: Any) => java.lang.Float.isNaN(value.asInstanceOf[java.lang.Float])
case _ => (_: Any) => false
}
}
def valueNaN(dataType: DataType): Any = {
dataType match {
case DoubleType => java.lang.Double.NaN
case FloatType => java.lang.Float.NaN
case _ => null
}
}
}
|
chuckchen/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/util/SQLOpenHashSet.scala
|
Scala
|
apache-2.0
| 2,343
|
package jp.ac.nagoya_u.dsmoq.sdk.request.json
private[request] case class SetMemberRoleJson(role: Int) extends Jsonable
|
nkawa/dsmoq
|
sdk/src/main/java/jp/ac/nagoya_u/dsmoq/sdk/request/json/SetMemberRoleJson.scala
|
Scala
|
apache-2.0
| 121
|
package com.twitter.querulous.database
import com.twitter.querulous.{FutureTimeout, TimeoutException}
import java.sql.{Connection, SQLException}
import concurrent.duration.Duration
class SqlDatabaseTimeoutException(msg: String, val timeout: Duration) extends SQLException(msg)
class TimingOutDatabaseFactory(
val databaseFactory :DatabaseFactory,
val poolSize :Int,
val queueSize :Int,
val openTimeout :Duration )
extends DatabaseFactory {
private def newTimeoutPool() = new FutureTimeout(poolSize, queueSize)
def apply(dbhosts: List[String], dbname: String, username: String, password: String,
urlOptions: Map[String, String], driverName: String) = {
new TimingOutDatabase(
databaseFactory(dbhosts, dbname, username, password, urlOptions, driverName),
newTimeoutPool(),
openTimeout
)
}
}
class TimingOutDatabase(
val database :Database,
timeout :FutureTimeout,
openTimeout :Duration)
extends Database with DatabaseProxy {
val label = database.name match {
case null => database.hosts.mkString(",") +"/ (null)"
case name => database.hosts.mkString(",") +"/"+ name
}
private def getConnection(wait: Duration) = {
try {
timeout(wait) {
database.open()
} { conn =>
database.close(conn)
}
} catch {
case e: TimeoutException =>
throw new SqlDatabaseTimeoutException(label, wait)
}
}
override def open() = getConnection(openTimeout)
def close(connection: Connection) { database.close(connection) }
}
|
kievbs/querulous210
|
src/main/scala/com/twitter/querulous/database/TimingOutDatabase.scala
|
Scala
|
apache-2.0
| 1,580
|
package mesosphere.marathon
package core.task.state
import mesosphere.UnitTest
import mesosphere.marathon.core.pod.{ BridgeNetwork, ContainerNetwork }
import mesosphere.marathon.state.Container.{ Docker, Mesos, PortMapping }
import mesosphere.marathon.state._
class NetworkInfoTest extends UnitTest {
val f = new Fixture
private def ipv4Address(addr: String) = org.apache.mesos.Protos.NetworkInfo.IPAddress.newBuilder()
.setIpAddress(addr)
.setProtocol(org.apache.mesos.Protos.NetworkInfo.Protocol.IPv4)
.build()
"NetworkInfo" when {
"computing PortAssignments from PortMappings (network mode BRIDGED)" should {
val app = AppDefinition(
id = PathId("test"),
networks = Seq(BridgeNetwork()), container = Some(Docker(
portMappings = Seq(
PortMapping(
containerPort = 8080,
hostPort = Some(0),
servicePort = 9000,
protocol = "tcp",
name = Some("http")
),
PortMapping(
containerPort = 8081,
hostPort = Some(123),
servicePort = 9001,
protocol = "udp",
name = Some("admin")
)
)
))
)
val networkInfo = NetworkInfo(
hostName = f.hostName,
hostPorts = Seq(20001, 123),
ipAddresses = Nil
)
"work without an IP address" in {
networkInfo.portAssignments(app, includeUnresolved = true) should be(
Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 20001,
hostPort = Some(20001),
containerPort = Some(8080)),
PortAssignment(
portName = Some("admin"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 123,
hostPort = Some(123),
containerPort = Some(8081))
)
)
}
"ignore the IP address when it's available" in {
val networkInfoWithIp = networkInfo.copy(f.hostName, ipAddresses = Seq(ipv4Address(f.containerIp)))
networkInfoWithIp.portAssignments(app, includeUnresolved = true) should be(
Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 20001,
hostPort = Some(20001),
containerPort = Some(8080)),
PortAssignment(
portName = Some("admin"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 123,
hostPort = Some(123),
containerPort = Some(8081))
)
)
}
}
"computing PortAssignments from PortMappings (network mode USER)" should {
val app = AppDefinition(
id = PathId("test"),
networks = Seq(ContainerNetwork("whatever")), container = Some(Docker(
portMappings = Seq(
PortMapping(containerPort = 0, hostPort = Some(31000), servicePort = 9000, protocol = "tcp"),
PortMapping(containerPort = 0, hostPort = None, servicePort = 9001, protocol = "tcp"),
PortMapping(containerPort = 0, hostPort = Some(31005), servicePort = 9002, protocol = "tcp")
)
))
)
val networkInfo = NetworkInfo(
hostName = f.hostName,
hostPorts = Seq(31000, 31005),
ipAddresses = Nil
)
"work without an IP address" in {
networkInfo.portAssignments(app, includeUnresolved = true) should be(
Seq(
PortAssignment(
portName = None,
effectiveIpAddress = Option(f.hostName),
effectivePort = 31000,
hostPort = Some(31000),
containerPort = Some(0)),
PortAssignment(
portName = None,
effectiveIpAddress = None,
effectivePort = PortAssignment.NoPort,
hostPort = None,
containerPort = Some(0)),
PortAssignment(
portName = None,
effectiveIpAddress = Option(f.hostName),
effectivePort = 31005,
hostPort = Some(31005),
containerPort = Some(0))
)
)
}
"use an IP address when it's available" in {
val networkInfoWithIp = networkInfo.copy(f.hostName, ipAddresses = Seq(ipv4Address(f.containerIp)))
networkInfoWithIp.portAssignments(app, includeUnresolved = true) should be(
Seq(
PortAssignment(
portName = None,
effectiveIpAddress = Some(f.hostName),
effectivePort = 31000,
hostPort = Some(31000),
containerPort = Some(0)),
PortAssignment(
portName = None,
effectiveIpAddress = Some(f.containerIp),
effectivePort = 0,
hostPort = None,
containerPort = Some(0)),
PortAssignment(
portName = None,
effectiveIpAddress = Some(f.hostName),
effectivePort = 31005,
hostPort = Some(31005),
containerPort = Some(0))
)
)
}
}
"computing PortAssignments from PortDefinitions" should {
"compute the correct values" in {
val app = AppDefinition(
id = PathId("test"),
container = Some(Mesos()),
portDefinitions = Seq(
PortDefinition(
port = 8080, // this will be a service port
protocol = "udp,tcp",
name = Some("http"),
labels = Map.empty
),
PortDefinition(
port = 9000, // this will be a service port
name = Some("admin"),
labels = Map.empty
)
)
)
val networkInfo = NetworkInfo(
hostName = f.hostName,
hostPorts = Seq(31000, 31005),
ipAddresses = Nil
)
networkInfo.portAssignments(app, includeUnresolved = true) should be(
Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 31000,
hostPort = Some(31000),
containerPort = None),
PortAssignment(
portName = Some("admin"),
effectiveIpAddress = Some(f.hostName),
effectivePort = 31005,
hostPort = Some(31005),
containerPort = None)
)
)
}
}
}
class Fixture {
val hostName = "host.some"
val containerIp = "10.0.0.42"
}
}
|
guenter/marathon
|
src/test/scala/mesosphere/marathon/core/task/state/NetworkInfoTest.scala
|
Scala
|
apache-2.0
| 6,812
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import org.scalatest.{FlatSpec, Matchers}
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.Table
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class CMulSpec extends FlatSpec with Matchers {
"A CMul with scaleW" should "work correctly" in {
val seed = 100
RNG.setSeed(seed)
val input = Tensor[Double](5, 4)
var i = 0
input.apply1(_ => {i += 1; i})
val gradOutput = Tensor[Double](5, 4)
i = 0
gradOutput.apply1(_ => {i += 1; i*0.1})
val layer1 = new CMul[Double](Array(5, 1))
val layer2 = layer1.cloneModule().asInstanceOf[CMul[Double]].setScaleW(0.5)
val output1 = layer1.forward(input)
val gradInput1 = layer1.backward(input, gradOutput)
val output2 = layer2.forward(input)
val gradInput2 = layer2.backward(input, gradOutput)
val cmul = CMul[Float](Array[Int](1, 4096, 1, 1))
val cmul2 = cmul.cloneModule().asInstanceOf[CMul[Float]]
val input1 = Tensor[Float](300, 4096).randn()
i = 0
input1.apply1(_ => {i += 1; i})
val gradOutput_1 = Tensor[Float](300, 4096)
i = 0
gradOutput_1.apply1(_ => {i += 1; i})
val output3 = cmul.forward(input1)
val gradInput3 = cmul.backward(input1, gradOutput_1)
val output4 = cmul2.forward(input1)
val gradInput4 = cmul2.backward(input1, gradOutput_1)
output1 should be (output2)
gradInput1 should be (gradInput2)
output3 should be (output4)
gradInput3 should be (gradInput4)
layer2.gradWeight should be (layer1.gradWeight.mul(0.5))
}
"CMUl" should "works well on batch input" in {
val model = nn.CMul[Float](Array(1, 64, 1, 1))
val model2 = model.cloneModule()
val batchInput = Tensor[Float](64, 64, 112, 112).rand()
val input = batchInput.select(1, 1).resize(Array(1, 64, 112, 112))
model.evaluate()
model2.evaluate()
val out1 = model.forward(batchInput)
val out2 = model2.forward(input).resize(Array(64, 112, 112))
out2 should be(out1.select(1, 1))
}
}
class CMulSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val input = Tensor[Float](5, 1).apply1(e => Random.nextFloat())
val cmul = CMul[Float](Array(5, 1)).setName("cmul")
runSerializationTest(cmul, input)
}
}
|
wzhongyuan/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/CMulSpec.scala
|
Scala
|
apache-2.0
| 3,090
|
/*
* Copyright 2010-2011 Vilius Normantas <code@norma.lt>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
/** Stores the most recent historical value of the specified target indicator. */
class HistoryLast[Value](target: Indicator[Value] with History) extends HistoryAt(0, target) {
override def name = "HistoryLast(" + target.name + ")"
}
|
ViliusN/Crossbow
|
crossbow-core/src/lt/norma/crossbow/indicators/HistoryLast.scala
|
Scala
|
gpl-3.0
| 1,014
|
package redmine4s.api.resource
import play.api.libs.json._
import redmine4s.api.model.User
/**
* Users
* http://www.redmine.org/projects/redmine/wiki/Rest_Users
*/
trait UserResource extends BaseResource {
private def applyRedmineToUser: PartialFunction[User, User] = {
case p: User => p.copy(redmine = redmine, memberships = p.memberships.map(_.map(_.copy(redmine = redmine))))
}
/** Returns a list of users. */
def listUsers(params: Map[String, String] = Map.empty): Iterable[User] = {
import redmine4s.api.json.JsonHelper.userReads
list("/users.json", __ \\ 'users, params).map(applyRedmineToUser).toIterable
}
/** Creates a user. */
def createUser(login: String,
password: Option[String] = None,
firstname: String,
lastname: String,
mail: String,
authSourceId: Option[Long] = None,
mailNotification: Option[String] = None,
mustChangePasswd: Option[Boolean] = None,
customFields: Option[Seq[(Long, String)]] = None): User = {
import redmine4s.api.json.JsonHelper.{userCreateWrites, userReads}
applyRedmineToUser(create("/users.json", __ \\ 'user, (login, password, firstname, lastname, mail, authSourceId, mailNotification, mustChangePasswd, customFields)))
}
/** Returns the user details. */
def showUser(userId: Long): User = {
import redmine4s.api.json.JsonHelper.userReads
applyRedmineToUser(show(s"/users/$userId.json", __ \\ 'user, Map("include" -> "memberships,groups")))
}
/** Updates a user. */
def updateUser(userId: Long,
login: Option[String] = None,
password: Option[String] = None,
firstname: Option[String] = None,
lastname: Option[String] = None,
mail: Option[String] = None,
authSourceId: Option[Long] = None,
mailNotification: Option[String] = None,
mustChangePasswd: Option[Boolean] = None,
customFields: Option[Seq[(Long, String)]] = None): User = {
import redmine4s.api.json.JsonHelper.{userReads, userUpdateWrites}
create(s"/users/$userId.json", __ \\ 'user, (login, password, firstname, lastname, mail, authSourceId, mailNotification, mustChangePasswd, customFields))
showUser(userId)
}
/** Deletes a user. */
def deleteUser(userId: Long): Unit = delete(s"/users/$userId.json")
}
|
tomingtoming/redmine4s
|
src/main/scala/redmine4s/api/resource/UserResource.scala
|
Scala
|
apache-2.0
| 2,476
|
package io.bartholomews.spotify4s.core.entities
// https://developer.spotify.com/documentation/web-api/reference-beta/#object-externalidobject
sealed trait ExternalIds {
def value: String
}
// https://en.wikipedia.org/wiki/International_Standard_Recording_Code
case class ISRC(value: String) extends ExternalIds
// https://en.wikipedia.org/wiki/International_Article_Number
case class EAN(value: String) extends ExternalIds
// https://en.wikipedia.org/wiki/Universal_Product_Code
case class UPC(value: String) extends ExternalIds
|
bartholomews/spotify-scala-client
|
modules/core/src/main/scala/io/bartholomews/spotify4s/core/entities/ExternalIds.scala
|
Scala
|
mit
| 536
|
package org.jetbrains.sbt.project.template.activator
import java.io._
import com.intellij.openapi.progress.ProgressIndicator
import com.intellij.openapi.util.io.FileUtil
import com.intellij.util.io.HttpRequests
import com.intellij.util.net.NetUtils
/**
* User: Dmitry.Naydanov
* Date: 30.01.15.
*/
object ActivatorDownloadUtil {
private val CONTENT_LENGTH_TEMPLATE: String = "${content-length}"
def downloadContentToFile(progress: ProgressIndicator,url: String,outputFile: File) {
val parentDirExists: Boolean = FileUtil.createParentDirs(outputFile)
if (!parentDirExists) throw new IOException("Parent dir of '" + outputFile.getAbsolutePath + "' can not be created!")
val out = new BufferedOutputStream(new FileOutputStream(outputFile))
try {
download(progress, url, out)
} finally out.close()
}
def download(progress: ProgressIndicator, location: String, output: OutputStream) {
val originalText: String = if (progress != null) progress.getText else null
substituteContentLength(progress, originalText, -1)
if (progress != null) progress.setText2("Downloading " + location)
try {
HttpRequests.request(location).productNameAsUserAgent.connect(new HttpRequests.RequestProcessor[Object]() {
def process(request: HttpRequests.Request): AnyRef = {
try {
val contentLength: Int = request.getConnection.getContentLength
substituteContentLength(progress, originalText, contentLength)
NetUtils.copyStreamContent(progress, request.getInputStream, output, contentLength)
}
catch {
case e: IOException =>
throw new IOException(HttpRequests.createErrorMessage(e, request, true), e)
}
null
}
})
} catch {
case e: IOException => throw new IOException("Cannot download " + location, e)
}
}
private def substituteContentLength(progress: ProgressIndicator, text: String, contentLengthInBytes: Int) {
if (progress == null || text == null) return
val ind = text indexOf CONTENT_LENGTH_TEMPLATE
if (ind != -1) {
val mes: String = formatContentLength(contentLengthInBytes)
val newText: String = text.substring(0, ind) + mes + text.substring(ind + CONTENT_LENGTH_TEMPLATE.length)
progress.setText(newText)
}
}
private def formatContentLength(contentLengthInBytes: Int): String = {
if (contentLengthInBytes < 0) return ""
val kilo: Int = 1024
if (contentLengthInBytes < kilo) return f", $contentLengthInBytes bytes"
if (contentLengthInBytes < kilo * kilo) return f", ${contentLengthInBytes / (1.0 * kilo)}%.1f KB"
f", ${contentLengthInBytes / (1.0 * kilo * kilo)}%.1f MB"
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/sbt/project/template/activator/ActivatorDownloadUtil.scala
|
Scala
|
apache-2.0
| 2,740
|
/*
* Copyright 2016-2017 original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tap.analysis
import play.api.Logger
//import org.json4s.{DefaultFormats, Formats, Serialization, jackson}
//import org.json4s.jackson
/**
* Created by andrew@andrewresearch.net on 16/10/17.
*/
object Lexicons {
val logger: Logger = Logger(this.getClass)
def matchEpistemicVerbs(terms:Vector[String],useLemmas:Boolean = false):Vector[String] = terms
.intersect(if (useLemmas) epistemicVerbLemmas else epistemicVerbTerms)
type Lexicon = Vector[String]
val epistemicVerbTerms:Lexicon = Vector("think","thought","believe","believed","guess","guessed","suppose","supposed",
"sure","certain","confident","learnt","learned","imagine","imagined","wonder","wondered","consider","considered",
"realise","realised","realize","realized","understand","understood","assume","assumed","admit")
val epistemicVerbLemmas:Lexicon = Vector("think","believe","guess","suppose","sure","certain","confident",
"learnt","learn","imagine","wonder","consider","realise","realize","understand","assume","admit")
}
|
uts-cic/tap
|
src/main/scala/tap/analysis/Lexicons.scala
|
Scala
|
apache-2.0
| 1,646
|
package com.ponkotuy.data
import com.ponkotuy.data.master.MasterRemodel
import com.ponkotuy.tool.PostQueryParser
import org.scalatest.FunSuite
import org.json4s.native.JsonMethods._
/**
*
* @author ponkotuy
* Date: 15/02/05.
*/
class MasterRemodelSuite extends FunSuite {
test("normal pattern") {
val jsonValue = """{"api_result":1,"api_result_msg":"\u6210\u529f","api_data":{"api_req_buildkit":1,"api_req_remodelkit":1,"api_certain_buildkit":2,"api_certain_remodelkit":2,"api_req_slot_id":0,"api_req_slot_num":0,"api_change_flag":0}}"""
val postValue = """api%5Fverno=1&api%5Fid=101&api%5Ftoken=xxxx&api%5Fslot%5Fid=764"""
val value = MasterRemodel.fromJson(parse(jsonValue) \ "api_data", PostQueryParser.parse(postValue), 1)
val expected = MasterRemodel(1, 1, 2, 2, 0, 0, false, 764, 1)
assert(value.isDefined)
assert(value.get === expected)
}
}
|
ttdoda/MyFleetGirls
|
library/src/test/scala/com/ponkotuy/data/MasterRemodelSuite.scala
|
Scala
|
mit
| 882
|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.persistence.cassandra
import java.net.URI
import scala.concurrent.Future
import akka.actor.ActorSystem
import com.lightbend.lagom.internal.javadsl.persistence.cassandra._
import com.lightbend.lagom.internal.persistence.cassandra.{ CassandraOffsetStore, CassandraReadSideSettings, ServiceLocatorAdapter, ServiceLocatorHolder }
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.spi.persistence.OffsetStore
import javax.annotation.PostConstruct
import javax.inject.Inject
import play.api.{ Configuration, Environment }
import play.api.inject._
import scala.util.Try
/**
* Guice module for the Persistence API.
*/
class CassandraPersistenceModule extends Module {
override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(
bind[CassandraPersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(),
bind[PersistentEntityRegistry].to[CassandraPersistentEntityRegistry],
bind[CassandraSession].toSelf,
bind[CassandraReadSide].to[CassandraReadSideImpl],
bind[CassandraReadSideSettings].toSelf,
bind[CassandraOffsetStore].to[JavadslCassandraOffsetStore],
bind[OffsetStore].to(bind[CassandraOffsetStore])
)
}
private[lagom] object CassandraPersistenceModule {
class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) {
// Guice doesn't support this, but other DI frameworks do.
@PostConstruct
def init(): Unit = {
Try(injector.instanceOf[ServiceLocator]).foreach { locator =>
ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter {
override def locateAll(name: String): Future[List[URI]] = {
import system.dispatcher
import scala.compat.java8.FutureConverters._
import scala.collection.JavaConverters._
locator.locateAll(name).toScala.map(_.asScala.toList)
}
})
}
}
}
}
|
rstento/lagom
|
persistence-cassandra/javadsl/src/main/scala/com/lightbend/lagom/javadsl/persistence/cassandra/CassandraPersistenceModule.scala
|
Scala
|
apache-2.0
| 2,126
|
package com.blackboxsociety.http.routes
import com.blackboxsociety.http._
case class PathRoute(path: String) extends HttpRouteRule {
def route(request: HttpRequest): Option[HttpRequest] = {
if(request.resource.path == path)
Some(request)
else
None
}
}
|
blackboxsociety/blackbox-http
|
src/main/scala/com/blackboxsociety/http/routes/PathRoute.scala
|
Scala
|
mit
| 279
|
package im.actor.server.office
import java.util.concurrent.TimeUnit
import akka.actor.{ ActorLogging, Status }
import akka.contrib.pattern.ShardRegion.Passivate
import akka.pattern.pipe
import akka.persistence.PersistentActor
import org.joda.time.DateTime
import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{ Failure, Success }
case object StopOffice
trait ProcessorState
trait Processor[State <: ProcessorState, Event <: AnyRef] extends PersistentActor with ActorLogging {
private val passivationIntervalMs = context.system.settings.config.getDuration("office.passivation-interval", TimeUnit.MILLISECONDS)
private implicit val ec = context.dispatcher
protected type ProcessorQuery
protected def updatedState(evt: Event, state: State): State
protected def workWith(e: Event, s: State): State = {
val updated = updatedState(e, s)
context become working(updated)
updated
}
protected def workWith(es: immutable.Seq[Event], state: State): State = {
val newState = es.foldLeft(state) {
case (s, e) ⇒
log.debug("Updating state: {} with event: {}", s, e)
updatedState(e, s)
}
context become working(newState)
newState
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
log.error(reason, "Failure while processing message {}", message)
super.preRestart(reason, message)
}
protected def handleInitCommand: Receive
protected def handleCommand(state: State): Receive
protected def handleQuery(state: State): Receive
final def receiveCommand = initializing
protected final def initializing: Receive = handleInitCommand orElse stashingBehavior()
protected final def working(state: State): Receive = handleCommand(state) orElse handleQuery(state) orElse {
case unmatched ⇒ log.warning("Unmatched message: {}, sender: {}", unmatched, sender())
}
private final def stashingBehavior(): Receive = {
case msg ⇒
log.debug("Stashing while initializing. Message: {}", msg)
stash()
}
private final def stashingBehavior(evt: Any): Receive = {
case msg ⇒
log.debug("Stashing while event processing. Message: {}, Event: {}", msg, evt)
stash()
}
protected final def stashing(evt: Any, state: State): Receive =
handleQuery(state) orElse stashingBehavior(evt)
final def persistReply[R](e: Event, state: State)(f: Event ⇒ Future[R]): Unit = {
log.debug("[persistReply] {}", e)
persist(e) { evt ⇒
f(evt) pipeTo sender() onComplete {
case Success(_) ⇒
case Failure(f) ⇒
log.error(f, "Failure while processing event {}", evt)
}
workWith(e, state)
}
}
final def persistStashing[R](e: Event, state: State)(f: Event ⇒ Future[R]): Unit = {
log.debug("[persistStashing], event {}", e)
context become stashing(e, state)
persistAsync(e) { evt ⇒
f(evt) andThen {
case Success(_) ⇒
workWith(e, state)
unstashAll()
case Failure(f) ⇒
log.error(f, "Failure while processing event {}", e)
workWith(e, state)
unstashAll()
}
}
}
final def persistStashingReply[R](e: Event, state: State)(f: Event ⇒ Future[R]): Unit = {
val replyTo = sender()
log.debug("[persistStashingReply], event {}", e)
context become stashing(e, state)
persistAsync(e) { evt ⇒
f(evt) pipeTo replyTo onComplete {
case Success(r) ⇒
workWith(e, state)
unstashAll()
case Failure(f) ⇒
log.error(f, "Failure while processing event {}", e)
replyTo ! Status.Failure(f)
workWith(e, state)
unstashAll()
}
}
}
final def persistStashingReply[R](es: immutable.Seq[Event], state: State)(f: immutable.Seq[Event] ⇒ Future[R]): Unit = {
val replyTo = sender()
log.debug("[persistStashingReply], events {}", es)
context become stashing(es, state)
persistAsync(es)(_ ⇒ ())
def updateBatch(es: immutable.Seq[Event], s: State): State =
es.foldLeft(state) {
case (s, e) ⇒
updatedState(e, s)
}
defer(()) { _ ⇒
f(es) pipeTo replyTo onComplete {
case Success(_) ⇒
context become working(updateBatch(es, state))
unstashAll()
case Failure(e) ⇒
log.error(e, "Failure while processing event {}", e)
replyTo ! Status.Failure(e)
context become working(updateBatch(es, state))
unstashAll()
}
}
}
def now(): DateTime = new DateTime()
if (passivationIntervalMs > 0) {
log.warning("Passivating in {} ms", passivationIntervalMs)
val interval = passivationIntervalMs.milliseconds
context.system.scheduler.scheduleOnce(interval, context.parent, Passivate(stopMessage = StopOffice))
}
}
|
x303597316/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/office/Processor.scala
|
Scala
|
mit
| 4,962
|
package net.sansa_stack.inference.data
/**
* @author Lorenz Buehmann
*/
trait TripleOps[Rdf <: RDF] {
/**
* @return the subject
*/
def s: Rdf#Node
/**
* @return the predicate
*/
def p: Rdf#URI
/**
* @return the object
*/
def o: Rdf#Node
}
|
SANSA-Stack/SANSA-RDF
|
sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/data/TripleOps.scala
|
Scala
|
apache-2.0
| 282
|
package scaladex.core.util
case class Secret(private val value: String) extends AnyVal {
def decode: String = value
override def toString: String = "*****"
}
|
scalacenter/scaladex
|
modules/core/shared/src/main/scala/scaladex/core/util/Secret.scala
|
Scala
|
bsd-3-clause
| 164
|
package im.mange.backdoor
import im.mange.backdoor.server.kryo.Cryopreservation
import im.mange.backdoor.server.{BackdoorMessageHandler, JsonRequestHandler}
import net.liftweb.common.Box
import net.liftweb.http.rest.RestHelper
import net.liftweb.http.{PostRequest, Req, _}
import net.liftweb.json._
object BackdoorServer extends RestHelper {
private val missingHandler: Box[LiftResponse] = BackdoorMessageHandler.fail("Please configure a backdoor handler")
var handler: Option[BackdoorMessageHandler] = None
serve {
case Req("backdoor" :: "alive" :: Nil, "", GetRequest) => BackdoorMessageHandler.ok
case req@Req("backdoor" :: Nil, "", PostRequest) => {
try {
JsonRequestHandler.handle(req)((json, req) ⇒ {
val prettyJson = pretty(render(json))
if (BackdoorConfig.debug) println(s"### Received:[\n$prettyJson\n]")
val message: Any = Cryopreservation.thaw(prettyJson)
handler.fold(missingHandler)(_.handle(message))
})
} catch {
case e: Exception ⇒ BackdoorMessageHandler.fail(e.getMessage)
}
}
}
}
|
alltonp/backdoor-liftweb
|
src/main/scala/im/mange/backdoor/BackdoorServer.scala
|
Scala
|
apache-2.0
| 1,108
|
package models.dao.anorm
import anorm._
import anorm.SqlParser._
import play.api.db.DB
import play.api.Play.current
import models.dao.{Category, CategoryDAO}
object AnormCategoryDAO extends CategoryDAO {
val category = {
int("id") ~ str("display_name") ~ str("url_name") map {
case id~displayName~urlName => Category(id, displayName, urlName)
}
}
def create(displayName: String, urlName: String) = DB.withConnection { implicit c =>
SQL("INSERT INTO category(display_name, url_name) VALUES({displayName}, {urlName})").on(
'displayName -> displayName, 'urlName -> urlName).executeUpdate()
}
def findById(id: Int): Option[Category] = DB.withConnection { implicit c =>
SQL("SELECT * FROM category WHERE id = {id}").on('id -> id).as(category singleOpt)
}
def findByName(urlName: String): Option[Category] = DB.withConnection { implicit c =>
SQL("SELECT * FROM category WHERE url_name = {urlName}").on('urlName -> urlName).as(category singleOpt)
}
def all(): List[Category] = DB.withConnection { implicit c =>
SQL("SELECT * FROM category ORDER BY display_name").as(category *)
}
}
|
aspectcg15/play-app
|
app/models/dao/anorm/AnormCategoryDAO.scala
|
Scala
|
gpl-3.0
| 1,137
|
/*
* Copyright 2019 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.filters
import akka.stream.Materializer
import play.api.Play
import play.api.Play.current
import play.api.mvc.Filter
import play.mvc.Http.HeaderNames
object CommonHeaders {
val NoCacheHeader = HeaderNames.CACHE_CONTROL -> "no-cache,no-store,max-age=0"
}
trait MicroserviceFilterSupport {
implicit def mat: Materializer = Play.materializer
}
|
hmrc/play-filters
|
src/main/scala/uk/gov/hmrc/play/filters/Support.scala
|
Scala
|
apache-2.0
| 978
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.common
import com.intel.analytics.bigdl.dllib.feature.dataset.Sample
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* a Preprocessing that converts Tensor to Sample.
*/
class TensorToSample[T: ClassTag]()(implicit ev: TensorNumeric[T])
extends Preprocessing[Tensor[T], Sample[T]] {
override def apply(prev: Iterator[Tensor[T]]): Iterator[Sample[T]] = {
prev.map(Sample(_))
}
}
object TensorToSample {
def apply[F, T: ClassTag]()(implicit ev: TensorNumeric[T]): TensorToSample[T] =
new TensorToSample()
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/feature/common/TensorToSample.scala
|
Scala
|
apache-2.0
| 1,296
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.util
import org.apache.spark.ml.feature.StringIndexer
import org.apache.spark.sql.Row
class MLTestSuite extends MLTest {
import testImplicits._
test("test transformer on stream data") {
val data = Seq((0, "a"), (1, "b"), (2, "c"), (3, "d"), (4, "e"), (5, "f"))
.toDF("id", "label")
val indexer = new StringIndexer().setStringOrderType("alphabetAsc")
.setInputCol("label").setOutputCol("indexed")
val indexerModel = indexer.fit(data)
testTransformer[(Int, String)](data, indexerModel, "id", "indexed") {
case Row(id: Int, indexed: Double) =>
assert(id === indexed.toInt)
}
testTransformerByGlobalCheckFunc[(Int, String)] (data, indexerModel, "id", "indexed") { rows =>
assert(rows.map(_.getDouble(1)).max === 5.0)
}
intercept[Exception] {
testTransformerOnStreamData[(Int, String)](data, indexerModel, "id", "indexed") {
case Row(id: Int, indexed: Double) =>
assert(id != indexed.toInt)
}
}
intercept[Exception] {
testTransformerOnStreamData[(Int, String)](data, indexerModel, "id", "indexed") {
rows: Seq[Row] =>
assert(rows.map(_.getDouble(1)).max === 1.0)
}
}
}
}
|
lvdongr/spark
|
mllib/src/test/scala/org/apache/spark/ml/util/MLTestSuite.scala
|
Scala
|
apache-2.0
| 2,044
|
//package io.eels.component.parquet
//
//import java.nio.file.Paths
//
//import io.eels.{SinkParser, SourceParser}
//import org.apache.hadoop.conf.Configuration
//import org.apache.hadoop.fs.{FileSystem, Path}
//import org.apache.hadoop.hive.conf.HiveConf
//
//object ParquetSourceParser extends SourceParser {
// val regex = "parquet:([^?].*?)(\\\\?.*)?".r
// override def apply(str: String): Option[Builder[ParquetSource]] = str match {
// case regex(path, params) =>
// Some(ParquetSourceBuilder(path, Option(params).map(UrlParamParser.apply).getOrElse(Map.empty)))
// case _ => None
// }
//}
//
//case class ParquetSourceBuilder(path: String, params: Map[String, List[String]]) extends Builder[ParquetSource] {
// require(path != null, "path cannot be null")
// override def apply(): ParquetSource = {
// implicit val fs = FileSystem.get(new Configuration)
// new ParquetSource(Paths.get(path))
// }
//}
//
//object ParquetSinkParser extends SinkParser {
// val Regex = "parquet:([^?].*?)(\\\\?.*)?".r
// override def apply(str: String): Option[Builder[ParquetSink]] = str match {
// case Regex(path, params) =>
// Some(ParquetSinkBuilder(new Path(path), Option(params).map(UrlParamParser.apply).getOrElse(Map.empty)))
// case _ => None
// }
//}
//
//case class ParquetSinkBuilder(path: Path, params: Map[String, List[String]]) extends Builder[ParquetSink] {
// require(path != null, "path name cannot be null")
// override def apply(): ParquetSink = {
// implicit val fs = FileSystem.get(new Configuration)
// implicit val conf = new HiveConf()
// new ParquetSink(path)
// }
//}
|
eel-lib/eel
|
eel-core/src/main/scala/io/eels/component/parquet/util/parser.scala
|
Scala
|
mit
| 1,628
|
package redis
package algebra
import scalaz.{Free, Functor, Inject, InjectFunctions, NonEmptyList}
import data.Status
sealed abstract class HashAlgebra[A]
final case class Hdel[A](key: ByteString, fields: NonEmptyList[ByteString], h: Long => A) extends HashAlgebra[A]
final case class Hexists[A](key: ByteString, field: ByteString, h: Boolean => A) extends HashAlgebra[A]
final case class Hget[A](key: ByteString, field: ByteString, h: Option[ByteString] => A) extends HashAlgebra[A]
final case class Hgetall[A](key: ByteString, h: Seq[(ByteString, ByteString)] => A) extends HashAlgebra[A]
final case class Hincrby[A](key: ByteString, field: ByteString, increment: Long, h: Long => A) extends HashAlgebra[A]
final case class Hincrbyfloat[A](key: ByteString, field: ByteString, increment: BigDecimal, h: BigDecimal => A) extends HashAlgebra[A]
final case class Hkeys[A](key: ByteString, h: Seq[ByteString] => A) extends HashAlgebra[A]
final case class Hlen[A](key: ByteString, h: Long => A) extends HashAlgebra[A]
final case class Hmget[A](key: ByteString, fields: NonEmptyList[ByteString], h: Seq[Option[ByteString]] => A) extends HashAlgebra[A]
final case class Hmset[A](key: ByteString, pairs: NonEmptyList[(ByteString, ByteString)], h: Status => A) extends HashAlgebra[A]
final case class Hset[A](key: ByteString, field: ByteString, value: ByteString, h: Boolean => A) extends HashAlgebra[A]
final case class Hsetnx[A](key: ByteString, field: ByteString, value: ByteString, h: Boolean => A) extends HashAlgebra[A]
final case class Hvals[A](key: ByteString, h: Seq[ByteString] => A) extends HashAlgebra[A]
trait HashInstances {
implicit val hashAlgebraFunctor: Functor[HashAlgebra] =
new Functor[HashAlgebra] {
def map[A, B](a: HashAlgebra[A])(f: A => B): HashAlgebra[B] =
a match {
case Hdel(k, s, h) => Hdel(k, s, x => f(h(x)))
case Hexists(k, s, h) => Hexists(k, s, x => f(h(x)))
case Hget(k, s, h) => Hget(k, s, x => f(h(x)))
case Hgetall(k, h) => Hgetall(k, x => f(h(x)))
case Hincrby(k, s, i, h) => Hincrby(k, s, i, x => f(h(x)))
case Hincrbyfloat(k, s, i, h) => Hincrbyfloat(k, s, i, x => f(h(x)))
case Hkeys(k, h) => Hkeys(k, x => f(h(x)))
case Hlen(k, h) => Hlen(k, x => f(h(x)))
case Hmget(k, s, h) => Hmget(k, s, x => f(h(x)))
case Hmset(k, p, h) => Hmset(k, p, x => f(h(x)))
case Hset(k, s, v, h) => Hset(k, s, v, x => f(h(x)))
case Hsetnx(k, s, v, h) => Hsetnx(k, s, v, x => f(h(x)))
case Hvals(k, h) => Hvals(k, x => f(h(x)))
}
}
}
trait HashFunctions extends InjectFunctions {
def hdel[F[_]: Functor](key: ByteString, fields: NonEmptyList[ByteString])(implicit I: Inject[HashAlgebra, F]): Free[F, Long] =
inject[F, HashAlgebra, Long](Hdel(key, fields, Free.point(_)))
def hexists[F[_]: Functor](key: ByteString, field: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Boolean] =
inject[F, HashAlgebra, Boolean](Hexists(key, field, Free.point(_)))
def hget[F[_]: Functor](key: ByteString, field: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Option[ByteString]] =
inject[F, HashAlgebra, Option[ByteString]](Hget(key, field, Free.point(_)))
def hgetall[F[_]: Functor](key: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Seq[(ByteString, ByteString)]] =
inject[F, HashAlgebra, Seq[(ByteString, ByteString)]](Hgetall(key, Free.point(_)))
def hincrby[F[_]: Functor](key: ByteString, field: ByteString, increment: Long)(implicit I: Inject[HashAlgebra, F]): Free[F, Long] =
inject[F, HashAlgebra, Long](Hincrby(key, field, increment, Free.point(_)))
def hincrbyfloat[F[_]: Functor](key: ByteString, field: ByteString, increment: BigDecimal)(implicit I: Inject[HashAlgebra, F]): Free[F, BigDecimal] =
inject[F, HashAlgebra, BigDecimal](Hincrbyfloat(key, field, increment, Free.point(_)))
def hkeys[F[_]: Functor](key: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Seq[ByteString]] =
inject[F, HashAlgebra, Seq[ByteString]](Hkeys(key, Free.point(_)))
def hlen[F[_]: Functor](key: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Long] =
inject[F, HashAlgebra, Long](Hlen(key, Free.point(_)))
def hmget[F[_]: Functor](key: ByteString, fields: NonEmptyList[ByteString])(implicit I: Inject[HashAlgebra, F]): Free[F, Seq[Option[ByteString]]] =
inject[F, HashAlgebra, Seq[Option[ByteString]]](Hmget(key, fields, Free.point(_)))
def hmset[F[_]: Functor](key: ByteString, pairs: NonEmptyList[(ByteString, ByteString)])(implicit I: Inject[HashAlgebra, F]): Free[F, Status] =
inject[F, HashAlgebra, Status](Hmset(key, pairs, Free.point(_)))
def hset[F[_]: Functor](key: ByteString, field: ByteString, value: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Boolean] =
inject[F, HashAlgebra, Boolean](Hset(key, field, value, Free.point(_)))
def hsetnx[F[_]: Functor](key: ByteString, field: ByteString, value: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Boolean] =
inject[F, HashAlgebra, Boolean](Hsetnx(key, field, value, Free.point(_)))
def hvals[F[_]: Functor](key: ByteString)(implicit I: Inject[HashAlgebra, F]): Free[F, Seq[ByteString]] =
inject[F, HashAlgebra, Seq[ByteString]](Hvals(key, Free.point(_)))
}
|
ethul/redis-algebra
|
src/main/scala/redis/algebra/hash.scala
|
Scala
|
mit
| 5,352
|
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.feel.valuemapper
import org.camunda.feel.syntaxtree.Val
abstract class JavaCustomValueMapper extends CustomValueMapper {
/**
* Transform the given object into a FEEL type - one of [[Val]] (e.g. [[Double]] to [[ValNumber]]).
* If it can't be transformed then it returns [[None]] instead and the object is passed to the next mapper in the chain.
*
* @param x the object to transform
* @param innerValueMapper the mapper function to transform inner values of a collection type
* @return the FEEL representation of the object
*/
def toValue(x: Any, innerValueMapper: java.util.function.Function[Any, Val])
: java.util.Optional[Val]
override def toVal(x: Any, innerValueMapper: Any => Val): Option[Val] = {
toValue(x, innerValue => innerValueMapper.apply(innerValue)) match {
case v if (v.isPresent) => Some(v.get)
case _ => None
}
}
/**
* Transform the given FEEL type into a base Scala/Java object (e.g. [[ValNumber]] to [[Double]]).
* If it can't be transformed then it returns [[None]] instead and the object is passed to the next mapper in the chain.
*
* @param value the FEEL type to transform
* @param innerValueMapper the mapper function to transform inner values of a collection type
* @return the base object of the FEEL type
*/
def unpackValue(value: Val,
innerValueMapper: java.util.function.Function[Val, Any])
: java.util.Optional[Any]
override def unpackVal(value: Val,
innerValueMapper: Val => Any): Option[Any] = {
unpackValue(value, innerValue => innerValueMapper.apply(innerValue)) match {
case x if (x.isPresent) => Some(x.get)
case _ => None
}
}
/**
* The priority of this mapper in the chain. The mappers are invoked in order of their priority,
* starting with the highest priority.
*/
override val priority: Int = 1
}
|
camunda/feel-scala
|
src/main/scala/org/camunda/feel/valuemapper/JavaCustomValueMapper.scala
|
Scala
|
apache-2.0
| 2,810
|
package com.socrata.pg.store
import scala.language.reflectiveCalls
class CurrentVersionTest extends PGSecondaryTestBase with PGSecondaryUniverseTestBase with PGStoreTestBase {
import PGSecondaryUtil._
test("handle CurrentVersion") {
withPgu() { pgu =>
val f = columnsCreatedFixture
var version = f.dataVersion
f.events foreach { e =>
version = version + 1
f.pgs.doVersion(pgu, f.datasetInfo, version, version, None, Iterator(e))
}
f.pgs.doCurrentVersion(pgu, f.datasetInfo.internalName, None) shouldEqual version
}
}
test("handle CurrentVersion over a range") {
withPgu() { pgu =>
val f = columnsCreatedFixture
var version = f.dataVersion
f.events foreach { e =>
f.pgs.doVersion(pgu, f.datasetInfo, version + 1, version + 10, None, Iterator(e))
version = version + 10
}
f.pgs.doCurrentVersion(pgu, f.datasetInfo.internalName, None) shouldEqual version
}
}
}
|
socrata-platform/soql-postgres-adapter
|
store-pg/src/test/scala/com/socrata/pg/store/CurrentVersionTest.scala
|
Scala
|
apache-2.0
| 987
|
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.bson
/**
* A companion helper for a `BsonElement` - the mapping from a name to a BsonValue.
*
* Used by the [[BsonMagnets]] and polices valid key/value pairs types for [[Document]].
*
* @since 1.0
*/
object BsonElement {
def apply(key: String, value: BsonValue): BsonElement = new BsonElement(key, value)
}
|
rozza/mongo-scala-driver
|
bson/src/main/scala/org/mongodb/scala/bson/BsonElement.scala
|
Scala
|
apache-2.0
| 946
|
package net.dinkla.lbnn.utils
trait Utilities {
def mkdir(dir: String): Unit
def deldir(dir: String): Unit
def delete(ps: String*): Unit = for (p <- ps) { deldir(p) }
def exists(file: String): Boolean
def merge(src: String, dst: String): Unit
def download(url: String, dest: String): Unit
def write(path: String, contents: String): Unit
}
|
jdinkla/location-based-nearest-neighbours
|
src/main/scala/net/dinkla/lbnn/utils/Utilities.scala
|
Scala
|
apache-2.0
| 366
|
package org.jetbrains.plugins.scala.lang.resolve
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.project._
class PartialUnificationImplicitClassTest extends ScalaLightCodeInsightFixtureTestAdapter with SimpleResolveTestBase {
import SimpleResolveTestBase._
override def setUp(): Unit = {
super.setUp()
val profile = getModule.scalaCompilerSettingsProfile
val newSettings = profile.getSettings.copy(
additionalCompilerOptions = Seq("-Ypartial-unification")
)
profile.setSettings(newSettings)
}
def testSCL14548(): Unit = doResolveTest(
s"""
|implicit class FooOps[F[_], A](self: F[A]) {
| def f${REFTGT}oo: Int = 0
|}
|
|(null: Either[String, Int]).fo${REFSRC}o
""".stripMargin
)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolve/PartialUnificationImplicitClassTest.scala
|
Scala
|
apache-2.0
| 833
|
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.parquet
import java.nio.file.Files
import java.time.Instant
import java.util.UUID
import com.vividsolutions.jts.geom.{Coordinate, Point}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.hadoop.ParquetReader
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.common.jobs.StorageConfiguration
import org.locationtech.geomesa.parquet.jobs.SimpleFeatureReadSupport
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ListMapTest extends Specification {
sequential
//TODO test things other than lists of strings
"Parquet simple feature storage" should {
"do list stuff" >> {
val f = Files.createTempFile("geomesa", ".parquet")
val gf = JTSFactoryFinder.getGeometryFactory
val sft = SimpleFeatureTypes.createType("test", "foobar:List[String],dtg:Date,*geom:Point:srid=4326")
val sftConf = {
val c = new Configuration()
StorageConfiguration.setSft(c, sft)
c
}
"write" >> {
// Use GZIP in tests but snappy in prod due to license issues
val writer = SimpleFeatureParquetWriter.builder(new Path(f.toUri), sftConf)
.withCompressionCodec(CompressionCodecName.GZIP).build()
val d1 = java.util.Date.from(Instant.parse("2017-01-01T00:00:00Z"))
val d2 = java.util.Date.from(Instant.parse("2017-01-02T00:00:00Z"))
val d3 = java.util.Date.from(Instant.parse("2017-01-03T00:00:00Z"))
val sf = new ScalaSimpleFeature(sft, "1", Array(List("a", "b", "c"), d1, gf.createPoint(new Coordinate(25.236263, 27.436734))))
val sf2 = new ScalaSimpleFeature(sft, "2", Array(null, d2, gf.createPoint(new Coordinate(67.2363, 55.236))))
val sf3 = new ScalaSimpleFeature(sft, "3", Array(List.empty[String], d3, gf.createPoint(new Coordinate(73.0, 73.0))))
writer.write(sf)
writer.write(sf2)
writer.write(sf3)
writer.close()
Files.size(f) must be greaterThan 0
}
"read" >> {
val reader = ParquetReader.builder[SimpleFeature](new SimpleFeatureReadSupport, new Path(f.toUri))
.withFilter(FilterCompat.NOOP)
.withConf(sftConf)
.build()
import org.locationtech.geomesa.utils.geotools.Conversions._
import scala.collection.JavaConversions._
val sf = reader.read()
sf.getAttributeCount mustEqual 3
sf.getID must be equalTo "1"
sf.get[java.util.List[String]]("foobar").toList must containTheSameElementsAs(List("a", "b", "c"))
sf.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 25.236263
sf.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 27.436734
val sf2 = reader.read()
sf2.getAttributeCount mustEqual 3
sf2.getID must be equalTo "2"
sf2.get[java.util.List[String]]("foobar") must beNull
sf2.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 67.2363
sf2.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 55.236
val sf3 = reader.read()
sf3.getAttributeCount mustEqual 3
sf3.getID must be equalTo "3"
sf3.get[java.util.List[String]]("foobar").toList must beEmpty
sf3.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
sf3.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
step {
Files.deleteIfExists(f)
}
}
"do map stuff" >> {
val f = Files.createTempFile("geomesa", ".parquet")
val gf = JTSFactoryFinder.getGeometryFactory
val sft = SimpleFeatureTypes.createType("test", "foobar:Map[String,String],dtg:Date,*geom:Point:srid=4326")
val sftConf = {
val c = new Configuration()
StorageConfiguration.setSft(c, sft)
c
}
"write" >> {
// Use GZIP in tests but snappy in prod due to license issues
val writer = SimpleFeatureParquetWriter.builder(new Path(f.toUri), sftConf)
.withCompressionCodec(CompressionCodecName.GZIP).build()
val d1 = java.util.Date.from(Instant.parse("2017-01-01T00:00:00Z"))
val d2 = java.util.Date.from(Instant.parse("2017-01-02T00:00:00Z"))
val d3 = java.util.Date.from(Instant.parse("2017-01-03T00:00:00Z"))
val sf = new ScalaSimpleFeature(sft, "1", Array(Map("a" -> "1", "b" -> "2", "c" -> "3"), d1, gf.createPoint(new Coordinate(25.236263, 27.436734))))
val sf2 = new ScalaSimpleFeature(sft, "2", Array(null, d2, gf.createPoint(new Coordinate(67.2363, 55.236))))
val sf3 = new ScalaSimpleFeature(sft, "3", Array(Map.empty[String,String], d3, gf.createPoint(new Coordinate(73.0, 73.0))))
writer.write(sf)
writer.write(sf2)
writer.write(sf3)
writer.close()
Files.size(f) must be greaterThan 0
}
// TODO really need to test with more maps and values and stuff
"read" >> {
val reader = ParquetReader.builder[SimpleFeature](new SimpleFeatureReadSupport, new Path(f.toUri))
.withFilter(FilterCompat.NOOP)
.withConf(sftConf)
.build()
import org.locationtech.geomesa.utils.geotools.Conversions._
import scala.collection.JavaConversions._
val sf = reader.read()
sf.getAttributeCount mustEqual 3
sf.getID must be equalTo "1"
val m = sf.get[java.util.Map[String,String]]("foobar").toMap
m must containTheSameElementsAs(Seq("a" -> "1", "b" -> "2", "c" -> "3"))
sf.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 25.236263
sf.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 27.436734
val sf2 = reader.read()
sf2.getAttributeCount mustEqual 3
sf2.getID must be equalTo "2"
sf2.get[java.util.Map[String,String]]("foobar") must beNull
sf2.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 67.2363
sf2.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 55.236
val sf3 = reader.read()
sf3.getAttributeCount mustEqual 3
sf3.getID must be equalTo "3"
sf3.get[java.util.Map[String,String]]("foobar").toMap must beEmpty
sf3.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
sf3.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
step {
Files.deleteIfExists(f)
}
}
"do non string list map stuff" >> {
val f = Files.createTempFile("geomesa", ".parquet")
val gf = JTSFactoryFinder.getGeometryFactory
val sft = SimpleFeatureTypes.createType("test", "foo:List[UUID],bar:Map[Int,Double],dtg:Date,*geom:Point:srid=4326")
val sftConf = {
val c = new Configuration()
StorageConfiguration.setSft(c, sft)
c
}
"write" >> {
// Use GZIP in tests but snappy in prod due to license issues
val writer = SimpleFeatureParquetWriter.builder(new Path(f.toUri), sftConf)
.withCompressionCodec(CompressionCodecName.GZIP).build()
val d1 = java.util.Date.from(Instant.parse("2017-01-01T00:00:00Z"))
val d2 = java.util.Date.from(Instant.parse("2017-01-02T00:00:00Z"))
val d3 = java.util.Date.from(Instant.parse("2017-01-03T00:00:00Z"))
val u1 = UUID.fromString("00000000-0000-1111-0000-000000000000")
val u2 = UUID.fromString("00000000-0000-2222-0000-000000000000")
val u3 = UUID.fromString("00000000-0000-3333-0000-000000000000")
val sf = new ScalaSimpleFeature(sft, "1", Array(List(u1, u2),Map[Int, Double](1 -> 2.0, 3 -> 6.0), d1, gf.createPoint(new Coordinate(25.236263, 27.436734))))
val sf2 = new ScalaSimpleFeature(sft, "2", Array(null, null, d2, gf.createPoint(new Coordinate(67.2363, 55.236))))
val sf3 = new ScalaSimpleFeature(sft, "3", Array(List.empty[UUID],Map.empty[Int, Double], d3, gf.createPoint(new Coordinate(73.0, 73.0))))
writer.write(sf)
writer.write(sf2)
writer.write(sf3)
writer.close()
Files.size(f) must be greaterThan 0
}
// TODO really need to test with more maps and values and stuff
"read aruff" >> {
val reader = ParquetReader.builder[SimpleFeature](new SimpleFeatureReadSupport, new Path(f.toUri))
.withFilter(FilterCompat.NOOP)
.withConf(sftConf)
.build()
val u1 = "00000000-0000-1111-0000-000000000000"
val u2 = "00000000-0000-2222-0000-000000000000"
import org.locationtech.geomesa.utils.geotools.Conversions._
import scala.collection.JavaConversions._
val sf = reader.read()
sf.getAttributeCount mustEqual 4
sf.getID must be equalTo "1"
val u = sf.get[java.util.List[UUID]]("foo").toList.map(_.toString)
u must containTheSameElementsAs(Seq[String](u2, u1))
val m = sf.get[java.util.Map[Int, Double]]("bar").toMap
m must containTheSameElementsAs(Seq(1 -> 2.0, 3 -> 6.0))
sf.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 25.236263
sf.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 27.436734
val sf2 = reader.read()
sf2.getAttributeCount mustEqual 4
sf2.getID must be equalTo "2"
sf2.getAttribute("foo") must beNull
sf2.getAttribute("bar") must beNull
sf2.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 67.2363
sf2.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 55.236
val sf3 = reader.read()
sf3.getAttributeCount mustEqual 4
sf3.getID must be equalTo "3"
sf3.get[java.util.List[_]]("foo").toList must beEmpty
sf3.get[java.util.Map[_,_]]("bar").toMap must beEmpty
sf3.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
sf3.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
step {
Files.deleteIfExists(f)
}
}
}
}
|
ddseapy/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-parquet/src/test/scala/org/locationtech/geomesa/parquet/ListMapTest.scala
|
Scala
|
apache-2.0
| 10,784
|
import com.lightbend.paradox.sbt.ParadoxPlugin
import com.lightbend.paradox.sbt.ParadoxPlugin.autoImport._
import com.typesafe.sbt.git.ConsoleGitRunner
import coursier.version.{Version, VersionParse}
import sbt.Keys._
import sbt._
import sjsonnew.support.scalajson.unsafe.Converter
import sjsonnew.support.scalajson.unsafe.PrettyPrinter
import sjsonnew.BasicJsonProtocol._
import java.nio.file.Files
object Docs extends AutoPlugin {
object autoImport {
val preprocessDocs = taskKey[File]("Prepare the documentation directory for Paradox")
val checkScaladocLinks = taskKey[Unit]("Prepare the documentation directory for Paradox")
val scaladocDirs = taskKey[Seq[(String, File)]]("Scaladoc directories to include with documentation")
val addDocsToDocRepo = taskKey[Boolean]("Pull doc repo and add generated documentation to it")
val deployDocs = taskKey[Unit]("Deploy docs to GitHub Pages")
val showParadoxProperties = taskKey[Unit]("Show a table of paradoxProperties")
}
import autoImport._
override def requires = ParadoxPlugin
def modifyFileLines(file: File)(f: String => String): Unit =
IO.writeLines(file, IO.readLines(file).map(f))
lazy val docRepoCheckoutDir = {
val dir = Files.createTempDirectory("slick-docs").toFile
dir.deleteOnExit()
dir
}
private def addDocsToDocRepoImpl(src: File, ver: String, log: Logger) = {
val dir = docRepoCheckoutDir
val repo = "git@github.com:slick/doc.git"
log.info(s"Cloning $repo into $dir")
if (dir.listFiles().isEmpty)
ConsoleGitRunner("clone", "--branch=gh-pages", "--depth=1", repo, ".")(dir, log)
else {
ConsoleGitRunner("reset", "--hard")(dir, log)
ConsoleGitRunner("clean", "-fd")(dir, log)
ConsoleGitRunner("pull")(dir, log)
}
val dest = dir / ver
val existed = dest.exists()
IO.delete(dest)
log.info("Copying docs")
IO.copyDirectory(src, dest)
val versionNumberParts = Version(ver).items.takeWhile(Version.isNumeric)
val versions =
IO.listFiles(dir)
.filter(_.isDirectory)
.flatMap(f => VersionParse.version(f.getName))
.toSeq
.filter { v =>
val (numberParts, otherParts) = v.items.span(Version.isNumeric)
otherParts.isEmpty || numberParts == versionNumberParts
}
.sorted
IO.write(
dir / "versions.json",
PrettyPrinter(Converter.toJson(versions.map(_.repr)).get)
)
existed
}
override def projectSettings = Seq(
homepage := None,
paradoxTheme := Some(builtinParadoxTheme("generic")),
Compile / paradoxProperties ++= {
val scaladocBaseUrl = s"https://scala-slick.org/doc/${version.value}"
val ref = Versioning.currentRef(baseDirectory.value)
Map(
"scaladoc.scala.base_url" -> s"https://www.scala-lang.org/api/${scalaVersion.value}",
"scaladoc.slick.base_url" -> s"$scaladocBaseUrl/api",
"scaladoc.slick.codegen.base_url" -> s"$scaladocBaseUrl/codegen-api",
"scaladoc.slick.jdbc.hikaricp.base_url" -> s"$scaladocBaseUrl/hikaricp-api",
"scaladoc.com.typesafe.slick.testkit.base_url" -> s"$scaladocBaseUrl/testkit-api",
"javadoc.javax.sql.base_url" -> "https://docs.oracle.com/javase/8/docs/api/",
"github.base_url" -> (scmInfo.value.get.browseUrl.toString + "/blob/main"),
"extref.SI.base_url" -> "https://issues.scala-lang.org/browse/SI-%s",
"extref.about-pool-sizing.base_url" -> "https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing",
"extref.activator.base_url" -> "https://typesafe.com/activator",
"extref.akka-sphinx.base_url" -> "https://doc.akka.io/docs/akka/2.4.0/dev/documentation.html",
"extref.akka-streams.base_url" -> "https://akka.io/docs/",
"extref.akka.base_url" -> "https://akka.io/",
"extref.db2.base_url" -> "https://www.ibm.com/analytics/db2",
"extref.derby.base_url" -> "https://db.apache.org/derby/",
"extref.h2.base_url" -> "https://www.h2database.com/",
"extref.hikaricp-monitoring.base_url" ->
"https://github.com/brettwooldridge/HikariCP/wiki/MBean-(JMX)-Monitoring-and-Management",
"extref.hikaricp.base_url" -> "https://github.com/brettwooldridge/HikariCP",
"extref.hsqldb.base_url" -> "http://hsqldb.org/",
"extref.javaapi.base_url" -> "https://docs.oracle.com/javase/7/docs/api/%s.html",
"extref.javadb.base_url" -> "https://www.oracle.com/java/technologies/javadb.html",
"extref.jdbc.base_url" -> "https://en.wikipedia.org/wiki/Java_Database_Connectivity",
"extref.jmx.base_url" -> "https://en.wikipedia.org/wiki/Java_Management_Extensions",
"extref.jpa.base_url" -> "https://en.wikipedia.org/wiki/Java_Persistence_API",
"extref.lightbend.base_url" -> "https://www.lightbend.com/",
"extref.logback.base_url" -> "https://logback.qos.ch/",
"extref.mysql.base_url" -> "https://www.mysql.com/",
"extref.oracle.base_url" -> "https://www.oracle.com/database/",
"extref.play.base_url" -> "https://www.playframework.com/",
"extref.postgresql.base_url" -> "https://www.postgresql.org/",
"extref.reactive-manifesto.base_url" -> "https://www.reactivemanifesto.org/",
"extref.reactive-streams.base_url" -> "https://www.reactive-streams.org/",
"extref.samplerepo.base_url" -> s"https://github.com/slick/slick/tree/$ref/samples/%s",
"extref.sbt.base_url" -> "https://www.scala-sbt.org/",
"extref.scala-futures.base_url" -> "https://docs.scala-lang.org/overviews/core/futures.html",
"extref.scalaquery.base_url" -> "http://scalaquery.org",
"extref.slf4j.base_url" -> "https://www.slf4j.org/",
"extref.slick-manuals.base_url" -> "https://scala-slick.org/docs/",
"extref.slick.base_url" -> s"https://github.com/slick/slick/blob/$ref/%s",
"extref.sql-server.base_url" -> "https://www.microsoft.com/en-us/sql-server",
"extref.sqlite.base_url" -> "https://www.sqlite.org/index.html",
"extref.typesafe-config.base_url" -> "https://github.com/lightbend/config",
"extref.wikipedia.base_url" -> "https://en.wikipedia.org/wiki/"
)
},
sourceDirectory := baseDirectory.value / "paradox",
Compile / paradoxTheme / sourceDirectory := baseDirectory.value / "template",
preprocessDocs / target := target.value / "preprocessed",
watchSources += sourceDirectory.value,
watchSources := watchSources.value.filterNot(_.base == (preprocessDocs / target).value),
preprocessDocs := {
val out = (preprocessDocs / target).value
val log = streams.value.log
IO.copyDirectory(sourceDirectory.value, out)
IO.copyDirectory(baseDirectory.value / "code", target.value / "code")
for ((name, dir) <- scaladocDirs.value) {
val dest = out / name
log.info(s"Copying $dir to $dest")
IO.copyDirectory(dir, dest, overwrite = true, preserveLastModified = true)
(dest ** "*.html").get().foreach { file =>
modifyFileLines(file) { line =>
line.replaceAll(
"(https://github.com/slick/slick/blob/[^\\"]*)/" +
"(Users|home)/" +
"[^\\"]*/slick/target/scala-[^\\"]*/src_managed/main/" +
"([^\\"]*)\\\\.scala",
"""$1/scala/$3.fm"""
)
}
}
}
out
},
Compile / paradox / unmanagedSourceDirectories := Seq((preprocessDocs / target).value),
Compile / paradox := (Compile / paradox).dependsOn(preprocessDocs).value,
Compile / paradox := {
val outDir = (Compile / paradox).value
val files = IO.listFiles(outDir, globFilter("*.html"))
val ref = Versioning.currentRef(baseDirectory.value)
for (f <- files)
modifyFileLines(f) { line =>
line
.replaceAllLiterally(
"https://github.com/slick/slick/tree/master/doc/target/preprocessed/",
s"https://github.com/slick/slick/tree/$ref/doc/paradox/"
)
.replaceAllLiterally(
"https://github.com/slick/slick/tree/master/doc/target/code/",
s"https://github.com/slick/slick/tree/$ref/doc/code/"
)
}
outDir
},
checkScaladocLinks := {
for ((name, dir) <- scaladocDirs.value)
new ReusableSbtChecker(dir.toString, (Compile / paradox).value.toString, name, streams.value.log)
.run()
},
addDocsToDocRepo := {
val dir = (Compile / paradox).value
addDocsToDocRepoImpl(dir, version.value, streams.value.log)
},
deployDocs := {
checkScaladocLinks.value
val log = streams.value.log
val dir = docRepoCheckoutDir
val existed = addDocsToDocRepo.value
log.info("Pushing changes")
val commitMessage = (if (existed) "Updated" else "Added") + " docs for version " + version.value
ConsoleGitRunner.commitAndPush(commitMessage)(dir, log)
},
showParadoxProperties := {
val props = (Compile / paradoxProperties).value
val colWidth = props.keys.map(_.length).max
for ((k, v) <- props.toSeq.sorted)
streams.value.log.info(s"%-${colWidth}s %s".format(k, v))
}
)
}
|
slick/slick
|
project/Docs.scala
|
Scala
|
bsd-2-clause
| 9,277
|
package mesosphere.marathon
package api.v2
import mesosphere.UnitTest
import mesosphere.marathon.api.TestAuthFixture
import mesosphere.marathon.core.base.{ Clock, ConstantClock }
import mesosphere.marathon.core.launcher.OfferMatchResult
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.launchqueue.LaunchQueue.{ QueuedInstanceInfo, QueuedInstanceInfoWithStatistics }
import mesosphere.marathon.raml.{ App, Raml }
import mesosphere.marathon.state.AppDefinition
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.stream.Implicits._
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.mesos.NoOfferMatchReason
import play.api.libs.json._
import scala.collection.immutable.Seq
import scala.concurrent.duration._
class QueueResourceTest extends UnitTest {
case class Fixture(
clock: Clock = ConstantClock(),
config: MarathonConf = mock[MarathonConf],
auth: TestAuthFixture = new TestAuthFixture,
queue: LaunchQueue = mock[LaunchQueue]) {
val queueResource: QueueResource = new QueueResource(
clock,
queue,
auth.auth,
auth.auth,
config
)
}
implicit val appDefReader: Reads[AppDefinition] = Reads { js =>
val ramlApp = js.as[App]
val appDef: AppDefinition = Raml.fromRaml(ramlApp)
// assume that any json we generate is canonical and valid
JsSuccess(appDef)
}
"QueueResource" should {
"return well formatted JSON" in new Fixture {
//given
val app = AppDefinition(id = "app".toRootPath, acceptedResourceRoles = Set("*"))
val noMatch = OfferMatchResult.NoMatch(app, MarathonTestHelper.makeBasicOffer().build(), Seq(NoOfferMatchReason.InsufficientCpus), clock.now())
queue.listWithStatistics returns Seq(
QueuedInstanceInfoWithStatistics(
app, inProgress = true, instancesLeftToLaunch = 23, finalInstanceCount = 23,
backOffUntil = clock.now() + 100.seconds, startedAt = clock.now(),
rejectSummaryLastOffers = Map(NoOfferMatchReason.InsufficientCpus -> 1),
rejectSummaryLaunchAttempt = Map(NoOfferMatchReason.InsufficientCpus -> 3), processedOffersCount = 3, unusedOffersCount = 1,
lastMatch = None, lastNoMatch = None, lastNoMatches = Seq(noMatch)
)
)
//when
val response = queueResource.index(auth.request, Set("lastUnusedOffers"))
//then
response.getStatus should be(200)
val json = Json.parse(response.getEntity.asInstanceOf[String])
val queuedApps = (json \\ "queue").as[Seq[JsObject]]
val jsonApp1 = queuedApps.find { apps => (apps \\ "app" \\ "id").as[String] == "/app" }.get
(jsonApp1 \\ "app").as[AppDefinition] should be(app)
(jsonApp1 \\ "count").as[Int] should be(23)
(jsonApp1 \\ "delay" \\ "overdue").as[Boolean] should be(false)
(jsonApp1 \\ "delay" \\ "timeLeftSeconds").as[Int] should be(100) //the deadline holds the current time...
(jsonApp1 \\ "processedOffersSummary" \\ "processedOffersCount").as[Int] should be(3)
(jsonApp1 \\ "processedOffersSummary" \\ "unusedOffersCount").as[Int] should be(1)
(jsonApp1 \\ "processedOffersSummary" \\ "rejectSummaryLaunchAttempt" \\ 3 \\ "declined").as[Int] should be(3)
val offer = (jsonApp1 \\ "lastUnusedOffers").as[JsArray].value.head \\ "offer"
(offer \\ "agentId").as[String] should be(noMatch.offer.getSlaveId.getValue)
(offer \\ "hostname").as[String] should be(noMatch.offer.getHostname)
val resource = (offer \\ "resources").as[JsArray].value.head
(resource \\ "name").as[String] should be("cpus")
(resource \\ "scalar").as[Int] should be(4)
(resource \\ "set") shouldBe a[JsUndefined]
(resource \\ "ranges") shouldBe a[JsUndefined]
}
"the generated info from the queue contains 0 if there is no delay" in new Fixture {
//given
val app = AppDefinition(id = "app".toRootPath)
queue.listWithStatistics returns Seq(
QueuedInstanceInfoWithStatistics(
app, inProgress = true, instancesLeftToLaunch = 23, finalInstanceCount = 23,
backOffUntil = clock.now() - 100.seconds, startedAt = clock.now(), rejectSummaryLastOffers = Map.empty,
rejectSummaryLaunchAttempt = Map.empty, processedOffersCount = 3, unusedOffersCount = 1, lastMatch = None,
lastNoMatch = None, lastNoMatches = Seq.empty
)
)
//when
val response = queueResource.index(auth.request, Set.empty[String])
//then
response.getStatus should be(200)
val json = Json.parse(response.getEntity.asInstanceOf[String])
val queuedApps = (json \\ "queue").as[Seq[JsObject]]
val jsonApp1 = queuedApps.find { apps => (apps \\ "app" \\ "id").get == JsString("/app") }.get
(jsonApp1 \\ "app").as[AppDefinition] should be(app)
(jsonApp1 \\ "count").as[Int] should be(23)
(jsonApp1 \\ "delay" \\ "overdue").as[Boolean] should be(true)
(jsonApp1 \\ "delay" \\ "timeLeftSeconds").as[Int] should be(0)
}
"unknown application backoff can not be removed from the launch queue" in new Fixture {
//given
queue.list returns Seq.empty
//when
val response = queueResource.resetDelay("unknown", auth.request)
//then
response.getStatus should be(404)
}
"application backoff can be removed from the launch queue" in new Fixture {
//given
val app = AppDefinition(id = "app".toRootPath)
queue.list returns Seq(
QueuedInstanceInfo(
app, inProgress = true, instancesLeftToLaunch = 23, finalInstanceCount = 23,
backOffUntil = clock.now() + 100.seconds, startedAt = clock.now()
)
)
//when
val response = queueResource.resetDelay("app", auth.request)
//then
response.getStatus should be(204)
verify(queue, times(1)).resetDelay(app)
}
"access without authentication is denied" in new Fixture {
Given("An unauthenticated request")
auth.authenticated = false
val req = auth.request
When("the index is fetched")
val index = queueResource.index(req, Set.empty[String])
Then("we receive a NotAuthenticated response")
index.getStatus should be(auth.NotAuthenticatedStatus)
When("one delay is reset")
val resetDelay = queueResource.resetDelay("appId", req)
Then("we receive a NotAuthenticated response")
resetDelay.getStatus should be(auth.NotAuthenticatedStatus)
}
"access without authorization is denied if the app is in the queue" in new Fixture {
Given("An unauthorized request")
auth.authenticated = true
auth.authorized = false
val req = auth.request
When("one delay is reset")
val appId = "appId".toRootPath
val taskCount = LaunchQueue.QueuedInstanceInfo(AppDefinition(appId), inProgress = false, 0, 0,
backOffUntil = clock.now() + 100.seconds, startedAt = clock.now())
queue.list returns Seq(taskCount)
val resetDelay = queueResource.resetDelay("appId", req)
Then("we receive a not authorized response")
resetDelay.getStatus should be(auth.UnauthorizedStatus)
}
"access without authorization leads to a 404 if the app is not in the queue" in new Fixture {
Given("An unauthorized request")
auth.authenticated = true
auth.authorized = false
val req = auth.request
When("one delay is reset")
queue.list returns Seq.empty
val resetDelay = queueResource.resetDelay("appId", req)
Then("we receive a not authorized response")
resetDelay.getStatus should be(404)
}
}
}
|
natemurthy/marathon
|
src/test/scala/mesosphere/marathon/api/v2/QueueResourceTest.scala
|
Scala
|
apache-2.0
| 7,641
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.optimize
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.catalog.FunctionCatalog
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.`trait`.{AccMode, AccModeTraitDef, MiniBatchInterval, MiniBatchIntervalTrait, MiniBatchIntervalTraitDef, MiniBatchMode, UpdateAsRetractionTraitDef}
import org.apache.flink.table.planner.plan.metadata.FlinkRelMetadataQuery
import org.apache.flink.table.planner.plan.nodes.calcite.Sink
import org.apache.flink.table.planner.plan.nodes.physical.stream.{StreamExecDataStreamScan, StreamExecIntermediateTableScan, StreamPhysicalRel}
import org.apache.flink.table.planner.plan.optimize.program.{FlinkStreamProgram, StreamOptimizeContext}
import org.apache.flink.table.planner.plan.schema.IntermediateRelTable
import org.apache.flink.table.planner.plan.stats.FlinkStatistic
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil
import org.apache.flink.table.planner.sinks.DataStreamTableSink
import org.apache.flink.table.planner.utils.TableConfigUtils
import org.apache.flink.table.planner.utils.TableConfigUtils.getMillisecondFromConfigDuration
import org.apache.flink.table.sinks.RetractStreamTableSink
import org.apache.flink.util.Preconditions
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core.TableScan
import org.apache.calcite.rex.RexBuilder
import java.util
import scala.collection.JavaConversions._
/**
* A [[CommonSubGraphBasedOptimizer]] for Stream.
*/
class StreamCommonSubGraphBasedOptimizer(planner: StreamPlanner)
extends CommonSubGraphBasedOptimizer {
override protected def doOptimize(roots: Seq[RelNode]): Seq[RelNodeBlock] = {
val config = planner.getTableConfig
// build RelNodeBlock plan
val sinkBlocks = RelNodeBlockPlanBuilder.buildRelNodeBlockPlan(roots, config)
// infer updateAsRetraction property for sink block
sinkBlocks.foreach { sinkBlock =>
val retractionFromRoot = sinkBlock.outputNode match {
case n: Sink =>
n.sink match {
case _: RetractStreamTableSink[_] => true
case s: DataStreamTableSink[_] => s.updatesAsRetraction
case _ => false
}
case o =>
o.getTraitSet.getTrait(UpdateAsRetractionTraitDef.INSTANCE).sendsUpdatesAsRetractions
}
sinkBlock.setUpdateAsRetraction(retractionFromRoot)
val miniBatchInterval: MiniBatchInterval = if (config.getConfiguration.getBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED)) {
val miniBatchLatency = getMillisecondFromConfigDuration(config,
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY)
Preconditions.checkArgument(miniBatchLatency > 0,
"MiniBatch Latency must be greater than 0 ms.", null)
MiniBatchInterval(miniBatchLatency, MiniBatchMode.ProcTime)
} else {
MiniBatchIntervalTrait.NONE.getMiniBatchInterval
}
sinkBlock.setMiniBatchInterval(miniBatchInterval)
}
if (sinkBlocks.size == 1) {
// If there is only one sink block, the given relational expressions are a simple tree
// (only one root), not a dag. So many operations (e.g. `infer updateAsRetraction property`,
// `propagate updateAsRetraction property`) can be omitted to save optimization time.
val block = sinkBlocks.head
val optimizedTree = optimizeTree(
block.getPlan,
block.isUpdateAsRetraction,
block.getMiniBatchInterval,
isSinkBlock = true)
block.setOptimizedPlan(optimizedTree)
return sinkBlocks
}
// infer updateAsRetraction property and miniBatchInterval property for all input blocks
sinkBlocks.foreach(b => inferTraits(
b, b.isUpdateAsRetraction, b.getMiniBatchInterval, isSinkBlock = true))
// propagate updateAsRetraction property and miniBatchInterval property to all input blocks
sinkBlocks.foreach(propagateTraits(_, isSinkBlock = true))
// clear the intermediate result
sinkBlocks.foreach(resetIntermediateResult)
// optimize recursively RelNodeBlock
sinkBlocks.foreach(b => optimizeBlock(b, isSinkBlock = true))
sinkBlocks
}
private def optimizeBlock(block: RelNodeBlock, isSinkBlock: Boolean): Unit = {
block.children.foreach {
child =>
if (child.getNewOutputNode.isEmpty) {
optimizeBlock(child, isSinkBlock = false)
}
}
val blockLogicalPlan = block.getPlan
blockLogicalPlan match {
case s: Sink =>
require(isSinkBlock)
val optimizedTree = optimizeTree(
s,
updatesAsRetraction = block.isUpdateAsRetraction,
miniBatchInterval = block.getMiniBatchInterval,
isSinkBlock = true)
block.setOptimizedPlan(optimizedTree)
case o =>
val optimizedPlan = optimizeTree(
o,
updatesAsRetraction = block.isUpdateAsRetraction,
miniBatchInterval = block.getMiniBatchInterval,
isSinkBlock = isSinkBlock)
val isAccRetract = optimizedPlan.getTraitSet
.getTrait(AccModeTraitDef.INSTANCE).getAccMode == AccMode.AccRetract
val name = createUniqueIntermediateRelTableName
val intermediateRelTable = createIntermediateRelTable(optimizedPlan, isAccRetract)
val newTableScan = wrapIntermediateRelTableToTableScan(intermediateRelTable, name)
block.setNewOutputNode(newTableScan)
block.setOutputTableName(name)
block.setOptimizedPlan(optimizedPlan)
}
}
/**
* Generates the optimized [[RelNode]] tree from the original relational node tree.
*
* @param relNode The root node of the relational expression tree.
* @param updatesAsRetraction True if request updates as retraction messages.
* @param miniBatchInterval mini-batch interval of the block.
* @param isSinkBlock True if the given block is sink block.
* @return The optimized [[RelNode]] tree
*/
private def optimizeTree(
relNode: RelNode,
updatesAsRetraction: Boolean,
miniBatchInterval: MiniBatchInterval,
isSinkBlock: Boolean): RelNode = {
val config = planner.getTableConfig
val calciteConfig = TableConfigUtils.getCalciteConfig(config)
val programs = calciteConfig.getStreamProgram
.getOrElse(FlinkStreamProgram.buildProgram(config.getConfiguration))
Preconditions.checkNotNull(programs)
programs.optimize(relNode, new StreamOptimizeContext() {
override def getTableConfig: TableConfig = config
override def getFunctionCatalog: FunctionCatalog = planner.functionCatalog
override def getRexBuilder: RexBuilder = planner.getRelBuilder.getRexBuilder
override def updateAsRetraction: Boolean = updatesAsRetraction
def getMiniBatchInterval: MiniBatchInterval = miniBatchInterval
override def needFinalTimeIndicatorConversion: Boolean = true
})
}
/**
* Infer UpdateAsRetraction property and MiniBatchInterval property for each block.
* NOTES: this method should not change the original RelNode tree.
*
* @param block The [[RelNodeBlock]] instance.
* @param retractionFromRoot Whether the sink need update as retraction messages.
* @param miniBatchInterval mini-batch interval of the block.
* @param isSinkBlock True if the given block is sink block.
*/
private def inferTraits(
block: RelNodeBlock,
retractionFromRoot: Boolean,
miniBatchInterval: MiniBatchInterval,
isSinkBlock: Boolean): Unit = {
block.children.foreach {
child =>
if (child.getNewOutputNode.isEmpty) {
inferTraits(
child,
retractionFromRoot = false,
miniBatchInterval = MiniBatchInterval.NONE,
isSinkBlock = false)
}
}
val blockLogicalPlan = block.getPlan
blockLogicalPlan match {
case n: Sink =>
require(isSinkBlock)
val optimizedPlan = optimizeTree(
n, retractionFromRoot, miniBatchInterval, isSinkBlock = true)
block.setOptimizedPlan(optimizedPlan)
case o =>
val optimizedPlan = optimizeTree(
o, retractionFromRoot, miniBatchInterval, isSinkBlock = isSinkBlock)
val name = createUniqueIntermediateRelTableName
val intermediateRelTable = createIntermediateRelTable(optimizedPlan, isAccRetract = false)
val newTableScan = wrapIntermediateRelTableToTableScan(intermediateRelTable, name)
block.setNewOutputNode(newTableScan)
block.setOutputTableName(name)
block.setOptimizedPlan(optimizedPlan)
}
}
/**
* Propagate updateAsRetraction property and miniBatchInterval property to all input blocks.
*
* @param block The [[RelNodeBlock]] instance.
* @param isSinkBlock True if the given block is sink block.
*/
private def propagateTraits(block: RelNodeBlock, isSinkBlock: Boolean): Unit = {
// process current block
def shipTraits(
rel: RelNode,
updateAsRetraction: Boolean,
miniBatchInterval: MiniBatchInterval): Unit = {
rel match {
case _: StreamExecDataStreamScan | _: StreamExecIntermediateTableScan =>
val scan = rel.asInstanceOf[TableScan]
val retractionTrait = scan.getTraitSet.getTrait(UpdateAsRetractionTraitDef.INSTANCE)
val miniBatchIntervalTrait = scan.getTraitSet.getTrait(MiniBatchIntervalTraitDef.INSTANCE)
val tableName = scan.getTable.getQualifiedName.mkString(".")
val inputBlocks = block.children.filter(b => tableName.equals(b.getOutputTableName))
Preconditions.checkArgument(inputBlocks.size <= 1)
if (inputBlocks.size == 1) {
val mergedInterval = if (isSinkBlock) {
// traits of sinkBlock have already been
// initialized before first round of optimization.
miniBatchIntervalTrait.getMiniBatchInterval
} else {
FlinkRelOptUtil.mergeMiniBatchInterval(
miniBatchIntervalTrait.getMiniBatchInterval, miniBatchInterval)
}
val newInterval = FlinkRelOptUtil.mergeMiniBatchInterval(
inputBlocks.head.getMiniBatchInterval,mergedInterval)
inputBlocks.head.setMiniBatchInterval(newInterval)
if (retractionTrait.sendsUpdatesAsRetractions || updateAsRetraction) {
inputBlocks.head.setUpdateAsRetraction(true)
}
}
case ser: StreamPhysicalRel => ser.getInputs.foreach { e =>
if (ser.needsUpdatesAsRetraction(e) || (updateAsRetraction && !ser.consumesRetractions)) {
shipTraits(e, updateAsRetraction = true, miniBatchInterval)
} else {
shipTraits(e, updateAsRetraction = false, miniBatchInterval)
}
}
}
}
shipTraits(block.getOptimizedPlan, block.isUpdateAsRetraction, block.getMiniBatchInterval)
block.children.foreach(propagateTraits(_, isSinkBlock = false))
}
/**
* Reset the intermediate result including newOutputNode and outputTableName
*
* @param block the [[RelNodeBlock]] instance.
*/
private def resetIntermediateResult(block: RelNodeBlock): Unit = {
block.setNewOutputNode(null)
block.setOutputTableName(null)
block.children.foreach {
child =>
if (child.getNewOutputNode.nonEmpty) {
resetIntermediateResult(child)
}
}
}
private def createIntermediateRelTable(
relNode: RelNode,
isAccRetract: Boolean): IntermediateRelTable = {
val uniqueKeys = getUniqueKeys(relNode)
val monotonicity = FlinkRelMetadataQuery
.reuseOrCreate(planner.getRelBuilder.getCluster.getMetadataQuery)
.getRelModifiedMonotonicity(relNode)
val statistic = FlinkStatistic.builder()
.uniqueKeys(uniqueKeys)
.relModifiedMonotonicity(monotonicity)
.build()
new IntermediateRelTable(relNode, isAccRetract, statistic)
}
private def getUniqueKeys(relNode: RelNode): util.Set[_ <: util.Set[String]] = {
val rowType = relNode.getRowType
val fmq = FlinkRelMetadataQuery.reuseOrCreate(planner.getRelBuilder.getCluster.getMetadataQuery)
val uniqueKeys = fmq.getUniqueKeys(relNode)
if (uniqueKeys != null) {
uniqueKeys.filter(_.nonEmpty).map { uniqueKey =>
val keys = new util.HashSet[String]()
uniqueKey.asList().foreach { idx =>
keys.add(rowType.getFieldNames.get(idx))
}
keys
}
} else {
null
}
}
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/optimize/StreamCommonSubGraphBasedOptimizer.scala
|
Scala
|
apache-2.0
| 13,490
|
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: blah@cliffano.com
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model
import org.openapitools.client.core.ApiModel
case class ComputerSet (
`class`: Option[String] = None,
busyExecutors: Option[Int] = None,
computer: Option[Seq[HudsonMasterComputer]] = None,
displayName: Option[String] = None,
totalExecutors: Option[Int] = None
) extends ApiModel
|
cliffano/swaggy-jenkins
|
clients/scala-akka/generated/src/main/scala/org/openapitools/client/model/ComputerSet.scala
|
Scala
|
mit
| 677
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors.attachTree
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.{SparkPlan, UnaryNode}
import org.apache.spark.sql.hive.{CarbonMetastoreCatalog, CarbonMetastoreTypes}
import org.apache.spark.sql.optimizer.{CarbonAliasDecoderRelation, CarbonDecoderRelation}
import org.apache.spark.sql.types._
import org.apache.carbondata.core.cache.{Cache, CacheProvider, CacheType}
import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
import org.apache.carbondata.core.carbon.{AbsoluteTableIdentifier, ColumnIdentifier}
import org.apache.carbondata.core.carbon.metadata.datatype.DataType
import org.apache.carbondata.core.carbon.metadata.encoder.Encoding
import org.apache.carbondata.core.carbon.metadata.schema.table.column.CarbonDimension
import org.apache.carbondata.core.carbon.querystatistics._
import org.apache.carbondata.core.util.{CarbonTimeStatisticsFactory, DataTypeUtil}
/**
* It decodes the data.
*
*/
case class CarbonDictionaryDecoder(
relations: Seq[CarbonDecoderRelation],
profile: CarbonProfile,
aliasMap: CarbonAliasDecoderRelation,
child: SparkPlan)
(@transient sqlContext: SQLContext)
extends UnaryNode {
override def otherCopyArgs: Seq[AnyRef] = sqlContext :: Nil
override val output: Seq[Attribute] = {
child.output.map { a =>
val attr = aliasMap.getOrElse(a, a)
val relation = relations.find(p => p.contains(attr))
if(relation.isDefined && canBeDecoded(attr)) {
val carbonTable = relation.get.carbonRelation.carbonRelation.metaData.carbonTable
val carbonDimension = carbonTable
.getDimensionByName(carbonTable.getFactTableName, attr.name)
if (carbonDimension != null &&
carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
!carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
val newAttr = AttributeReference(a.name,
convertCarbonToSparkDataType(carbonDimension,
relation.get.carbonRelation.carbonRelation),
a.nullable,
a.metadata)(a.exprId,
a.qualifiers).asInstanceOf[Attribute]
newAttr
} else {
a
}
} else {
a
}
}
}
def canBeDecoded(attr: Attribute): Boolean = {
profile match {
case ip: IncludeProfile if ip.attributes.nonEmpty =>
ip.attributes
.exists(a => a.name.equalsIgnoreCase(attr.name))
case ep: ExcludeProfile =>
!ep.attributes
.exists(a => a.name.equalsIgnoreCase(attr.name))
case _ => true
}
}
def convertCarbonToSparkDataType(carbonDimension: CarbonDimension,
relation: CarbonRelation): types.DataType = {
carbonDimension.getDataType match {
case DataType.STRING => StringType
case DataType.SHORT => ShortType
case DataType.INT => IntegerType
case DataType.LONG => LongType
case DataType.DOUBLE => DoubleType
case DataType.BOOLEAN => BooleanType
case DataType.DECIMAL =>
val scale: Int = carbonDimension.getColumnSchema.getScale
val precision: Int = carbonDimension.getColumnSchema.getPrecision
if (scale == 0 && precision == 0) {
DecimalType(18, 2)
} else {
DecimalType(precision, scale)
}
case DataType.TIMESTAMP => TimestampType
case DataType.STRUCT =>
CarbonMetastoreTypes
.toDataType(s"struct<${ relation.getStructChildren(carbonDimension.getColName) }>")
case DataType.ARRAY =>
CarbonMetastoreTypes
.toDataType(s"array<${ relation.getArrayChildren(carbonDimension.getColName) }>")
}
}
val getDictionaryColumnIds = {
val attributes = child.output
val dictIds: Array[(String, ColumnIdentifier, DataType)] = attributes.map { a =>
val attr = aliasMap.getOrElse(a, a)
val relation = relations.find(p => p.contains(attr))
if(relation.isDefined && canBeDecoded(attr)) {
val carbonTable = relation.get.carbonRelation.carbonRelation.metaData.carbonTable
val carbonDimension =
carbonTable.getDimensionByName(carbonTable.getFactTableName, attr.name)
if (carbonDimension != null &&
carbonDimension.hasEncoding(Encoding.DICTIONARY) &&
!carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
(carbonTable.getFactTableName, carbonDimension.getColumnIdentifier,
carbonDimension.getDataType)
} else {
(null, null, null)
}
} else {
(null, null, null)
}
}.toArray
dictIds
}
override def outputsUnsafeRows: Boolean = true
override def canProcessUnsafeRows: Boolean = true
override def canProcessSafeRows: Boolean = true
override def doExecute(): RDD[InternalRow] = {
attachTree(this, "execute") {
val storePath = sqlContext.catalog.asInstanceOf[CarbonMetastoreCatalog].storePath
val queryId = sqlContext.getConf("queryId", System.nanoTime() + "")
val absoluteTableIdentifiers = relations.map { relation =>
val carbonTable = relation.carbonRelation.carbonRelation.metaData.carbonTable
(carbonTable.getFactTableName, carbonTable.getAbsoluteTableIdentifier)
}.toMap
val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder(queryId);
if (isRequiredToDecode) {
val dataTypes = child.output.map { attr => attr.dataType }
child.execute().mapPartitions { iter =>
val cacheProvider: CacheProvider = CacheProvider.getInstance
val forwardDictionaryCache: Cache[DictionaryColumnUniqueIdentifier, Dictionary] =
cacheProvider.createCache(CacheType.FORWARD_DICTIONARY, storePath)
val dicts: Seq[Dictionary] = getDictionary(absoluteTableIdentifiers,
forwardDictionaryCache)
val dictIndex = dicts.zipWithIndex.filter(x => x._1 != null).map(x => x._2)
new Iterator[InternalRow] {
val unsafeProjection = UnsafeProjection.create(output.map(_.dataType).toArray)
var flag = true
var total = 0L
override final def hasNext: Boolean = {
flag = iter.hasNext
if (false == flag && total > 0) {
val queryStatistic = new QueryStatistic()
queryStatistic
.addFixedTimeStatistic(QueryStatisticsConstants.PREPARE_RESULT, total)
recorder.recordStatistics(queryStatistic)
recorder.logStatistics()
}
flag
}
override final def next(): InternalRow = {
val startTime = System.currentTimeMillis()
val row: InternalRow = iter.next()
val data = row.toSeq(dataTypes).toArray
dictIndex.foreach { index =>
if (data(index) != null) {
data(index) = DataTypeUtil.getDataBasedOnDataType(dicts(index)
.getDictionaryValueForKey(data(index).asInstanceOf[Int]),
getDictionaryColumnIds(index)._3)
}
}
val result = unsafeProjection(new GenericMutableRow(data))
total += System.currentTimeMillis() - startTime
result
}
}
}
} else {
child.execute()
}
}
}
private def isRequiredToDecode = {
getDictionaryColumnIds.find(p => p._1 != null) match {
case Some(value) => true
case _ => false
}
}
private def getDictionary(atiMap: Map[String, AbsoluteTableIdentifier],
cache: Cache[DictionaryColumnUniqueIdentifier, Dictionary]) = {
val dicts: Seq[Dictionary] = getDictionaryColumnIds.map { f =>
if (f._2 != null) {
try {
cache.get(new DictionaryColumnUniqueIdentifier(
atiMap.get(f._1).get.getCarbonTableIdentifier,
f._2, f._3))
} catch {
case _: Throwable => null
}
} else {
null
}
}
dicts
}
}
|
foryou2030/incubator-carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryDecoder.scala
|
Scala
|
apache-2.0
| 9,025
|
package com.github.havarunner
/**
* Marks a test class as a suite.
*
* A suite is comprised of suite members that are annotated with [[com.github.havarunner.annotation.PartOf]].
*
* A suite is also a test, which means that it can contain tests.
*
* Suite members must be within the same package as the suite.
*
* @author Lauri Lehmijoki
*/
trait HavaRunnerSuite[T] {
/**
* With this method, implementations may provide a heavy-weight object to suite members.
*
* An example of a heavy-weight object is an HTTP server, which takes several seconds to start.
*
* @return the object that can be shared by all the suite members
*/
def suiteObject: T
/**
* JVM will call this method in the shutdown hook phase (http://docs.oracle.com/javase/7/docs/api/java/lang/Runtime.html#addShutdownHook(java.lang.Thread)).
*/
def afterSuite(): Unit
}
|
havarunner/havarunner
|
src/main/scala/com/github/havarunner/HavaRunnerSuite.scala
|
Scala
|
mit
| 877
|
package com.arcusys.valamis.util.mustache
case class PartialToken(key: String, otag: String, ctag: String) extends Token {
def render(context: Any, partials: Map[String, Mustache], callstack: List[Any]): TokenProduct =
partials.get(key) match {
case Some(template) => template.product(context, partials, template :: callstack)
case _ => throw new IllegalArgumentException("Partial \"" + key + "\" is not defined.")
}
def templateSource: String = otag + ">" + key + ctag
}
|
ViLPy/Valamis
|
valamis-util/src/main/scala/com/arcusys/valamis/util/mustache/PartialToken.scala
|
Scala
|
lgpl-3.0
| 517
|
package ch13
object ex08 {
def to2d(a: Array[Int], c: Int) = {
a.grouped(c).toArray
}
def main(args: Array[String]) {
val arr = Array(1, 2, 3, 4, 5, 6)
to2d(arr,4) map (_.toList) foreach println
}
}
|
tuxdna/scala-for-the-impatient-exercises
|
src/main/scala/ch13/ex08.scala
|
Scala
|
apache-2.0
| 221
|
package com.mehmetakiftutuncu.muezzinapi.shovel
import java.time.LocalDateTime
import java.util.concurrent.TimeUnit
import javax.inject.{Inject, Singleton}
import akka.actor.{ActorRef, ActorSystem, Cancellable, PoisonPill, Props}
import com.google.inject.ImplementedBy
import com.mehmetakiftutuncu.muezzinapi.data.{AbstractCache, AbstractFirebaseRealtimeDatabase}
import com.mehmetakiftutuncu.muezzinapi.services.AbstractPrayerTimesService
import com.mehmetakiftutuncu.muezzinapi.services.fetchers.AbstractPrayerTimesFetcherService
import com.mehmetakiftutuncu.muezzinapi.shovel.ShovelActor.Dig
import com.mehmetakiftutuncu.muezzinapi.utilities.{AbstractConf, Log, Logging}
import play.api.inject.ApplicationLifecycle
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.FiniteDuration
@ImplementedBy(classOf[Shovel])
trait AbstractShovel
@Singleton
class Shovel @Inject()(ActorSystem: ActorSystem,
ApplicationLifecycle: ApplicationLifecycle,
Cache: AbstractCache,
Conf: AbstractConf,
FirebaseRealtimeDatabase: AbstractFirebaseRealtimeDatabase,
PrayerTimesFetcherService: AbstractPrayerTimesFetcherService,
PrayerTimesService: AbstractPrayerTimesService) extends AbstractShovel with Logging {
private val enabled: Boolean = Conf.getBoolean("muezzinApi.shovel.enabled", defaultValue = true)
private val initialDelay: FiniteDuration = Conf.getFiniteDuration("muezzinApi.shovel.initialDelay", FiniteDuration(1, TimeUnit.MINUTES))
private val interval: FiniteDuration = Conf.getFiniteDuration("muezzinApi.shovel.interval", FiniteDuration(1, TimeUnit.DAYS))
private val actor: ActorRef = ActorSystem.actorOf(
Props(new ShovelActor(Cache, Conf, FirebaseRealtimeDatabase, PrayerTimesFetcherService, PrayerTimesService)),
ShovelActor.actorName
)
private val cancellable: Option[Cancellable] = {
if (enabled) {
val firstRun: LocalDateTime = LocalDateTime.now.plusSeconds(initialDelay.toSeconds).withNano(0)
Log.warn(s"Starting Shovel scheduled to $firstRun...")
val c: Cancellable = ActorSystem.scheduler.schedule(
initialDelay,
interval,
actor,
Dig
)
Option(c)
} else {
None
}
}
ApplicationLifecycle.addStopHook {
() =>
actor ! PoisonPill
cancellable.foreach {
c: Cancellable =>
Log.warn("Shutting down Shovel...")
c.cancel()
}
ActorSystem.terminate()
}
}
|
mehmetakiftutuncu/MuezzinAPI
|
app/com/mehmetakiftutuncu/muezzinapi/shovel/Shovel.scala
|
Scala
|
mit
| 2,613
|
package es.uvigo.ei.sing.sds
package annotator
import scala.concurrent.Future
import entity._
import service.{ DiseaseEntity, DiseasesService }
final class DiseasesAnnotator extends AnnotatorAdapter {
import context._
lazy val diseases = new DiseasesService
override def annotate(article: Article): Future[Unit] =
diseases.getEntities(article.content).flatMap(es => saveEntities(es, article.id.get)).map(_ => ())
private def saveEntities(entities: Set[DiseaseEntity], articleId: Article.ID): Future[Set[(Keyword, Annotation)]] =
Future.sequence { entities.map(e => saveEntity(e, articleId)) }
private def saveEntity(entity: DiseaseEntity, articleId: Article.ID): Future[(Keyword, Annotation)] =
for {
normalized <- diseases.normalize(entity)
keyword <- getOrStoreKeyword(normalized, Disease)
annotation <- annotationsDAO.insert(entity.toAnnotation(articleId, keyword.id.get))
} yield (keyword, annotation)
}
|
agjacome/smart-drug-search
|
src/main/scala/annotator/DiseasesAnnotator.scala
|
Scala
|
mit
| 964
|
package gg.uhc.hosts.endpoints.docs
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
class DocsRoute {
def apply(): Route =
concat(
pathEndOrSingleSlash {
redirectToTrailingSlashIfMissing(StatusCodes.Found) {
getFromFile("apidocs/index.html")
}
},
getFromDirectory("apidocs")
)
}
|
Eluinhost/hosts.uhc.gg
|
src/main/scala/gg/uhc/hosts/endpoints/docs/DocsRoute.scala
|
Scala
|
mit
| 432
|
package pl.touk.nussknacker.engine.requestresponse.management
import org.scalatest.{FunSuite, Matchers}
import pl.touk.nussknacker.engine.api.deployment._
import pl.touk.nussknacker.engine.api.deployment.simple.{SimpleProcessStateDefinitionManager, SimpleStateStatus}
import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId}
import pl.touk.nussknacker.engine.api.{MetaData, ProcessVersion, StreamMetaData}
import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess
import pl.touk.nussknacker.engine.deployment.{DeploymentData, ExternalDeploymentId}
import pl.touk.nussknacker.engine.requestresponse.deployment.RequestResponseDeploymentData
import pl.touk.nussknacker.test.PatientScalaFutures
import scala.concurrent.Future
class MultiInstanceRequestResponseClientSpec extends FunSuite with Matchers with PatientScalaFutures {
import scala.concurrent.ExecutionContext.Implicits._
val failClient: RequestResponseClient = new RequestResponseClient {
override def cancel(name: ProcessName): Future[Unit] = {
name shouldBe id
Future.failed(failure)
}
override def deploy(deploymentData: RequestResponseDeploymentData): Future[Unit] = {
deploymentData.processVersion.processName shouldBe id
Future.failed(failure)
}
override def findStatus(name: ProcessName): Future[Option[ProcessState]] = {
name shouldBe id
Future.failed(failure)
}
def close(): Unit = {}
}
private val failure = new Exception("Fail")
def processVersion(versionId: Option[Long]): Option[ProcessVersion] = versionId.map(id => ProcessVersion(VersionId(id), ProcessName(""), ProcessId(1), "", None))
def processState(deploymentId: ExternalDeploymentId, status: StateStatus, client: RequestResponseClient, versionId: Option[Long] = Option.empty, startTime: Option[Long] = Option.empty, errors: List[String] = List.empty): ProcessState =
SimpleProcessStateDefinitionManager.processState(status, Some(deploymentId), processVersion(versionId), startTime = startTime, errors = errors)
test("Deployment should complete when all parts are successful") {
val multiClient = new MultiInstanceRequestResponseClient(List(okClient(), okClient()))
multiClient.deploy(RequestResponseDeploymentData(CanonicalProcess(MetaData("fooId", StreamMetaData()), List.empty), 1000, ProcessVersion.empty.copy(processName=id), DeploymentData.empty)).futureValue shouldBe (())
}
test("Deployment should fail when one part fails") {
val multiClient = new MultiInstanceRequestResponseClient(List(okClient(), failClient))
multiClient.deploy(RequestResponseDeploymentData(CanonicalProcess(MetaData("fooId", StreamMetaData()), List.empty), 1000, ProcessVersion.empty.copy(processName=id), DeploymentData.empty)).failed.futureValue shouldBe failure
}
test("Status should be none if no client returns status") {
val multiClient = new MultiInstanceRequestResponseClient(List(okClient(), okClient()))
multiClient.findStatus(id).futureValue shouldBe None
}
test("Status should be RUNNING if all clients running") {
val consistentState = processState(jobId, SimpleStateStatus.Running, okClient(), Some(1), Some(10000L))
val multiClient = new MultiInstanceRequestResponseClient(List(
okClient(Some(consistentState)),
okClient(Some(consistentState))
))
multiClient.findStatus(id).futureValue shouldBe Some(consistentState)
}
test("Status should be INCONSISTENT if one status unknown") {
val multiClient = new MultiInstanceRequestResponseClient(List(
okClient(),
okClient(Some(processState(jobId, SimpleStateStatus.Running, okClient(), Some(1))))
))
val excepted = processState(jobId, SimpleStateStatus.Failed, multiClient, errors = List("Inconsistent states between servers: empty; state: RUNNING, startTime: None."))
multiClient.findStatus(id).futureValue shouldBe Some(excepted)
}
test("Status should be INCONSISTENT if status differ") {
val multiClient = new MultiInstanceRequestResponseClient(List(
okClient(Some(processState(jobId, SimpleStateStatus.Running, okClient(), Some(1), Some(5000L)))),
okClient(Some(processState(jobId, SimpleStateStatus.Running, okClient(), Some(1))))
))
val excepted = processState(jobId, SimpleStateStatus.Failed, multiClient, errors = List("Inconsistent states between servers: state: RUNNING, startTime: 5000; state: RUNNING, startTime: None."))
multiClient.findStatus(id).futureValue shouldBe Some(excepted)
}
test("Status should be FAIL if one status fails") {
val multiClient = new MultiInstanceRequestResponseClient(List(okClient(), failClient))
multiClient.findStatus(id).failed.futureValue shouldBe failure
}
private val id = ProcessName("id")
private val jobId = ExternalDeploymentId("id")
def okClient(status: Option[ProcessState] = None, expectedTime: Long = 1000): RequestResponseClient = new RequestResponseClient {
override def cancel(name: ProcessName): Future[Unit] = {
name shouldBe id
Future.successful(())
}
override def deploy(deploymentData: RequestResponseDeploymentData): Future[Unit] = {
deploymentData.processVersion.processName shouldBe id
deploymentData.deploymentTime shouldBe expectedTime
Future.successful(())
}
override def findStatus(name: ProcessName): Future[Option[ProcessState]] = {
name shouldBe id
Future.successful(status)
}
override def close(): Unit = {}
}
}
|
TouK/nussknacker
|
engine/lite/request-response/runtime/src/test/scala/pl/touk/nussknacker/engine/requestresponse/management/MultiInstanceRequestResponseClientSpec.scala
|
Scala
|
apache-2.0
| 5,519
|
package com.twitter.finagle.http.netty
import org.jboss.netty.handler.codec.http.{DefaultHttpRequest, HttpMethod, HttpVersion}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HttpRequestProxyTest extends FunSuite {
test("basics") {
val message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")
val proxy = new HttpRequestProxy {
final val httpRequest = message
}
assert(proxy.httpMessage != null)
assert(proxy.getProtocolVersion === HttpVersion.HTTP_1_1)
assert(proxy.getMethod === HttpMethod.GET)
}
}
|
travisbrown/finagle
|
finagle-http/src/test/scala/com/twitter/finagle/http/netty/HttpRequestProxyTest.scala
|
Scala
|
apache-2.0
| 655
|
package shapeless.contrib.scalaz
import shapeless.contrib.scalacheck._
import org.specs2.matcher.OptionMatchers
import org.specs2.scalaz.Spec
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary._
import scalaz.{Equal, \\/}
import scalaz.scalacheck.ScalazArbitrary._
class BinaryTest extends Spec with OptionMatchers {
import scalaz.std.AllInstances._
implicit val ByteVectorArbitrary = Arbitrary(arbitrary[List[Byte]] map { _.toVector })
def binaryLaws[A : Binary : Equal : Arbitrary](name: String) =
name ! prop { (a: A, rest: Vector[Byte]) =>
val encoded = Binary[A] encode a
val decoded = Binary[A] decode (encoded ++ rest)
Equal[Option[(A, Vector[Byte])]].equal(decoded, Some((a, rest)))
}
"simple instances" should {
binaryLaws[Int]("Int")
binaryLaws[(Int, Int)]("(Int, Int)")
binaryLaws[Int \\/ Long]("Int \\\\/ Long")
binaryLaws[List[Int]]("List[Int]")
binaryLaws[String]("String")
}
case class OneElem(n: Int)
case class TwoElem(n: Int, x: String)
case class Complex(n: Int, x: TwoElem \\/ String, z: List[OneElem])
"case class instances" should {
import Binary.auto._
binaryLaws[OneElem]("OneElem")
binaryLaws[TwoElem]("TwoElem")
binaryLaws[Complex]("Complex")
binaryLaws[Complex]("Complex + checksum")(Binary[Complex].withChecksum(new java.util.zip.CRC32), implicitly, implicitly)
{
implicit val instance = Binary.auto.derive[(Int, String)]
binaryLaws[(Int, String)]("Tuple2")
}
}
sealed trait Cases[A, B]
case class Case1[A, B](a: A) extends Cases[A, B]
case class Case2[A, B](b: B) extends Cases[A, B]
sealed trait Tree[A]
case class Node[A](left: Tree[A], right: Tree[A]) extends Tree[A]
case class Leaf[A](item: A) extends Tree[A]
"multi-case class instances" should {
import Binary.auto._
binaryLaws[Cases[OneElem, TwoElem]]("Cases[OneElem, TwoElem]")
binaryLaws[Cases[Complex, Complex]]("Cases[Complex, Complex]")
binaryLaws[Tree[Int]]("Tree[Int]")
binaryLaws[Tree[Complex]]("Tree[Complex]")
}
"checksum" should {
"complain when broken" ! prop { (n: Long) =>
val binary = Binary[Long].withChecksum(new java.util.zip.CRC32)
val encoded = binary encode n
// let's manipulate the last byte of the checksum
val manipulated = encoded.init :+ (encoded.last + 1).toByte
binary decode manipulated must beNone
}
}
}
// vim: expandtab:ts=2:sw=2
|
milessabin/shapeless-contrib
|
scalaz/src/test/scala/BinaryTest.scala
|
Scala
|
mit
| 2,463
|
package coursier.install.error
final class NoPrebuiltBinaryAvailable(val candidateUrls: Seq[String])
extends InstallDirException(
if (candidateUrls.isEmpty)
"No prebuilt binary available"
else
s"No prebuilt binary available at ${candidateUrls.mkString(", ")}"
)
|
alexarchambault/coursier
|
modules/install/src/main/scala/coursier/install/error/NoPrebuiltBinaryAvailable.scala
|
Scala
|
apache-2.0
| 299
|
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow
package collectors
package scalastream
// Scala
import scala.collection.mutable.MutableList
// Akka
import akka.actor.{ActorSystem, Props}
// Specs2 and Spray testing
import org.specs2.matcher.AnyMatchers
import org.specs2.mutable.Specification
import org.specs2.specification.{Scope,Fragments}
import spray.testkit.Specs2RouteTest
// Spray
import spray.http.{DateTime,HttpHeader,HttpRequest,HttpCookie,RemoteAddress}
import spray.http.HttpHeaders.{
Cookie,
`Set-Cookie`,
`Remote-Address`,
`Raw-Request-URI`
}
// Config
import com.typesafe.config.{ConfigFactory,Config,ConfigException}
// Thrift
import org.apache.thrift.TDeserializer
// Snowplow
import sinks._
import CollectorPayload.thrift.model1.CollectorPayload
class CollectorServiceSpec extends Specification with Specs2RouteTest with
AnyMatchers {
val testConf: Config = ConfigFactory.parseString("""
collector {
interface = "0.0.0.0"
port = 8080
production = true
p3p {
policyref = "/w3c/p3p.xml"
CP = "NOI DSP COR NID PSA OUR IND COM NAV STA"
}
cookie {
expiration = 365 days
domain = "test-domain.com"
}
sink {
enabled = "test"
kinesis {
aws {
access-key: "cpf"
secret-key: "cpf"
}
stream {
region: "us-east-1"
good: "snowplow_collector_example"
bad: "snowplow_collector_example"
}
buffer {
byte-limit: 4000000 # 4MB
record-limit: 500 # 500 records
time-limit: 60000 # 1 minute
}
backoffPolicy {
minBackoff: 3000 # 3 seconds
maxBackoff: 600000 # 5 minutes
}
}
}
}
""")
val collectorConfig = new CollectorConfig(testConf)
val sink = new TestSink
val sinks = CollectorSinks(sink, sink)
val responseHandler = new ResponseHandler(collectorConfig, sinks)
val collectorService = new CollectorService(responseHandler, system)
val thriftDeserializer = new TDeserializer
// By default, spray will always add Remote-Address to every request
// when running with the `spray.can.server.remote-address-header`
// option. However, the testing does not read this option and a
// remote address always needs to be set.
def CollectorGet(uri: String, cookie: Option[`HttpCookie`] = None,
remoteAddr: String = "127.0.0.1") = {
val headers: MutableList[HttpHeader] =
MutableList(`Remote-Address`(remoteAddr),`Raw-Request-URI`(uri))
cookie.foreach(headers += `Cookie`(_))
Get(uri).withHeaders(headers.toList)
}
"Snowplow's Scala collector" should {
"return an invisible pixel" in {
CollectorGet("/i") ~> collectorService.collectorRoute ~> check {
responseAs[Array[Byte]] === ResponseHandler.pixel
}
}
"return a cookie expiring at the correct time" in {
CollectorGet("/i") ~> collectorService.collectorRoute ~> check {
headers must not be empty
val httpCookies: List[HttpCookie] = headers.collect {
case `Set-Cookie`(hc) => hc
}
httpCookies must not be empty
// Assume we only return a single cookie.
// If the collector is modified to return multiple cookies,
// this will need to be changed.
val httpCookie = httpCookies(0)
httpCookie.name must be("sp")
httpCookie.domain must beSome
httpCookie.domain.get must be(collectorConfig.cookieDomain.get)
httpCookie.expires must beSome
val expiration = httpCookie.expires.get
val offset = expiration.clicks - collectorConfig.cookieExpiration -
DateTime.now.clicks
offset.asInstanceOf[Int] must beCloseTo(0, 2000) // 1000 ms window.
}
}
"return the same cookie as passed in" in {
CollectorGet("/i", Some(HttpCookie("sp", "UUID_Test"))) ~>
collectorService.collectorRoute ~> check {
val httpCookies: List[HttpCookie] = headers.collect {
case `Set-Cookie`(hc) => hc
}
// Assume we only return a single cookie.
// If the collector is modified to return multiple cookies,
// this will need to be changed.
val httpCookie = httpCookies(0)
httpCookie.content must beEqualTo("UUID_Test")
}
}
"return a P3P header" in {
CollectorGet("/i") ~> collectorService.collectorRoute ~> check {
val p3pHeaders = headers.filter {
h => h.name.equals("P3P")
}
p3pHeaders.size must beEqualTo(1)
val p3pHeader = p3pHeaders(0)
val policyRef = collectorConfig.p3pPolicyRef
val CP = collectorConfig.p3pCP
p3pHeader.value must beEqualTo(
"policyref=\\"%s\\", CP=\\"%s\\"".format(policyRef, CP))
}
}
"store the expected event as a serialized Thrift object in the enabled sink" in {
val payloadData = "param1=val1¶m2=val2"
val storedRecordBytes = responseHandler.cookie(payloadData, null, None,
None, "localhost", RemoteAddress("127.0.0.1"), new HttpRequest(), None, "/i", true)._2
val storedEvent = new CollectorPayload
this.synchronized {
thriftDeserializer.deserialize(storedEvent, storedRecordBytes.head)
}
storedEvent.timestamp must beCloseTo(DateTime.now.clicks, 1000)
storedEvent.encoding must beEqualTo("UTF-8")
storedEvent.ipAddress must beEqualTo("127.0.0.1")
storedEvent.collector must beEqualTo("ssc-0.5.0-test")
storedEvent.path must beEqualTo("/i")
storedEvent.querystring must beEqualTo(payloadData)
}
"report itself as healthy" in {
CollectorGet("/health") ~> collectorService.collectorRoute ~> check {
response.status must beEqualTo(spray.http.StatusCodes.OK)
}
}
}
}
|
mdavid/lessig-bigdata
|
lib/snowplow/2-collectors/scala-stream-collector/src/test/scala/com.snowplowanalytics.snowplow.collectors.scalastream/CollectorServiceSpec.scala
|
Scala
|
mit
| 6,452
|
package zzb.rest
package unmarshalling
trait UnmarshallerLifting {
implicit def fromRequestUnmarshaller[T](implicit um: FromMessageUnmarshaller[T]): FromRequestUnmarshaller[T] =
new FromRequestUnmarshaller[T] {
def apply(request: RestRequest): Deserialized[T] = um(request)
}
implicit def fromResponseUnmarshaller[T](implicit um: FromMessageUnmarshaller[T]): FromResponseUnmarshaller[T] =
new FromResponseUnmarshaller[T] {
def apply(response: RestResponse): Deserialized[T] = um(response)
}
implicit def fromMessageUnmarshaller[T](implicit um: Unmarshaller[T]): FromMessageUnmarshaller[T] =
new FromMessageUnmarshaller[T] {
def apply(msg: RestMessage): Deserialized[T] = um(msg.entity)
}
}
object UnmarshallerLifting extends UnmarshallerLifting
|
stepover/zzb
|
zzb-rest/src/main/scala/zzb/rest/unmarshalling/UnmarshallerLifting.scala
|
Scala
|
mit
| 798
|
class B extends A
class C extends B
|
dotty-staging/dotty
|
sbt-test/source-dependencies/transitive-class/BC.scala
|
Scala
|
apache-2.0
| 36
|
package reactivemongo.play.json.compat
import scala.language.implicitConversions
import play.api.libs.json.{ JsNull, JsNumber, JsString, JsValue }
import reactivemongo.api.bson.{
BSONArray,
BSONBinary,
BSONBoolean,
BSONDateTime,
BSONDecimal,
BSONDocument,
BSONDouble,
BSONInteger,
BSONJavaScript,
BSONJavaScriptWS,
BSONLong,
BSONMaxKey,
BSONMinKey,
BSONNull,
BSONObjectID,
BSONRegex,
BSONString,
BSONSymbol,
BSONTimestamp,
BSONValue
}
/**
* Implicit conversions for value types between
* `play.api.libs.json` and `reactivemongo.api.bson`,
* using lax conversions.
*/
object LaxValueConverters extends LaxValueConverters
private[compat] trait LaxValueConverters
extends FromToValue with SharedValueConverters
with LaxValueConvertersLowPriority1 {
final type JsonNumber = JsNumber
final type JsonTime = JsNumber
final type JsonJavaScript = JsString
final type JsonObjectID = JsString
final type JsonSymbol = JsString
/** See `BSONDateTime.value` */
override implicit final def fromDateTime(bson: BSONDateTime): JsNumber =
JsNumber(bson.value)
implicit final def fromDouble(bson: BSONDouble): JsNumber =
JsNumber(bson.value)
implicit final def fromInteger(bson: BSONInteger): JsNumber =
JsNumber(bson.value)
/** See `BSONJavaScript.value` */
override implicit final def fromJavaScript(bson: BSONJavaScript): JsString =
JsString(bson.value)
implicit final def fromLong(bson: BSONLong): JsNumber = JsNumber(bson.value)
implicit final def fromObjectID(bson: BSONObjectID): JsString =
JsString(bson.stringify)
implicit final def fromSymbol(bson: BSONSymbol): JsString =
JsString(bson.value)
override implicit final def fromTimestamp(bson: BSONTimestamp): JsNumber =
JsNumber(bson.value)
override def toString = "LaxValueConverters"
}
private[json] sealed trait LaxValueConvertersLowPriority1 {
_: LaxValueConverters =>
implicit final def fromValue(bson: BSONValue): JsValue = bson match {
case arr: BSONArray => fromArray(arr)
case bin: BSONBinary => fromBinary(bin)
case BSONBoolean(true) => JsTrue
case BSONBoolean(_) => JsFalse
case dt: BSONDateTime => fromDateTime(dt)
case dec: BSONDecimal => fromDecimal(dec)
case doc: BSONDocument => fromDocument(doc)
case d: BSONDouble => fromDouble(d)
case i: BSONInteger => fromInteger(i)
case js: BSONJavaScript => fromJavaScript(js)
case jsw: BSONJavaScriptWS => fromJavaScriptWS(jsw)
case l: BSONLong => fromLong(l)
case BSONMaxKey => JsMaxKey
case BSONMinKey => JsMinKey
case BSONNull => JsNull
case oid: BSONObjectID => fromObjectID(oid)
case re: BSONRegex => fromRegex(re)
case str: BSONString => fromStr(str)
case sym: BSONSymbol => fromSymbol(sym)
case ts: BSONTimestamp => fromTimestamp(ts)
case _ => JsUndefined
}
}
|
ReactiveMongo/Reactivemongo-Play-Json
|
compat/src/main/scala/LaxValueConverters.scala
|
Scala
|
apache-2.0
| 2,885
|
/*
Copyright (c) 2009-2012, The Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.cdlib.was.weari.server;
import java.util.{List => JList, Map => JMap, UUID};
import org.cdlib.was.weari._;
import scala.collection.JavaConversions.{ iterableAsScalaIterable, mapAsScalaMap };
import scala.collection.immutable;
import com.typesafe.scalalogging.slf4j.Logging
class WeariHandler(config: Config)
extends thrift.Server.Iface with Logging with ExceptionLogger {
val weari = new Weari(config);
/**
* Catch all non thrift._ exceptions and wrap them in a thrift._ Exception suitable
* for sending back to a client.
*/
def throwThriftException[T](f: => T) : T = {
try {
f;
} catch {
case ex : thrift.BadJSONException => throw ex;
case ex : thrift.UnparsedException => throw ex;
case ex : thrift.ParseException => throw ex;
case ex : thrift.IndexException => throw ex;
case ex : Exception => {
logger.error(getStackTrace(ex));
throw new thrift.IndexException(ex.toString);
}
}
}
private def convertMap (m : JMap[String, JList[String]]) : immutable.Map[String, Seq[String]] =
mapAsScalaMap(m).toMap.mapValues(iterableAsScalaIterable(_).toSeq);
/**
* Index a set of ARCs on a solr server.
*
* @param arcs A list of ARC names to index
* @param extraId String to append to solr document IDs.
* @param extraFields Map of extra fields to append to solr documents.
*/
def index(arcs : JList[String],
extraId : String,
extraFields : JMap[String, JList[String]]) {
throwThriftException {
weari.index(iterableAsScalaIterable(arcs).toSeq, extraId, convertMap(extraFields));
}
}
/**
* Set fields unconditionally on a group of documents retrieved by a query string.
*/
def setFields(queryString : String,
fields : JMap[String, JList[String]]) {
throwThriftException {
weari.setFields(queryString, convertMap(fields));
}
}
/**
* Remove index entries for these ARC files from the solr server.
*/
def remove(arcs : JList[String]) {
throwThriftException {
weari.remove(iterableAsScalaIterable(arcs).toSeq);
}
}
/**
* Check to see if a given ARC file has been parsed.
*/
def isArcParsed (arcName : String) : Boolean = {
throwThriftException {
weari.isArcParsed(arcName);
}
}
/**
* Parse ARC files.
*/
def parseArcs (arcs : JList[String]) {
throwThriftException {
weari.parseArcs(iterableAsScalaIterable(arcs).toSeq.filter(!weari.isArcParsed(_)));
}
}
def deleteParse (arc : String) {
throwThriftException {
weari.deleteParse(arc);
}
}
}
|
cdlib/weari
|
src/main/scala/org/cdlib/was/weari/server/WeariHandler.scala
|
Scala
|
bsd-3-clause
| 4,204
|
package com.eevolution.context.dictionary.infrastructure.service.impl
import java.util.UUID
import com.eevolution.context.dictionary.infrastructure.repository.ReplicationRunRepository
import com.eevolution.context.dictionary.infrastructure.service.ReplicationRunService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 21/11/17.
*/
/**
* Replication Run Service Implementation
* @param registry
* @param replicationRunRepository
*/
class ReplicationRunServiceImpl (registry: PersistentEntityRegistry, replicationRunRepository: ReplicationRunRepository) extends ReplicationRunService {
private val DefaultPageSize = 10
override def getAll() = ServiceCall {_ => replicationRunRepository.getAll()}
override def getAllByPage(page : Option[Int], pageSize : Option[Int]) = ServiceCall{_ => replicationRunRepository.getAllByPage(page.getOrElse(0) , pageSize.getOrElse(DefaultPageSize))}
override def getById(id: Int) = ServiceCall { _ => replicationRunRepository.getById(id)}
override def getByUUID(uuid: UUID) = ServiceCall { _ => replicationRunRepository.getByUUID(uuid)}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/impl/ReplicationRunServiceImpl.scala
|
Scala
|
gpl-3.0
| 2,094
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.io
import java.io.IOException
import java.util.{Date, UUID}
import scala.collection.mutable
import scala.util.Try
import org.apache.hadoop.conf.Configurable
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.internal.Logging
import org.apache.spark.mapred.SparkHadoopMapRedUtil
/**
* An [[FileCommitProtocol]] implementation backed by an underlying Hadoop OutputCommitter
* (from the newer mapreduce API, not the old mapred API).
*
* Unlike Hadoop's OutputCommitter, this implementation is serializable.
*
* @param jobId the job's or stage's id
* @param path the job's output path, or null if committer acts as a noop
* @param dynamicPartitionOverwrite If true, Spark will overwrite partition directories at runtime
* dynamically. Suppose final path is /path/to/outputPath, output
* path of [[FileOutputCommitter]] is an intermediate path, e.g.
* /path/to/outputPath/.spark-staging-{jobId}, which is a staging
* directory. Task attempts firstly write files under the
* intermediate path, e.g.
* /path/to/outputPath/.spark-staging-{jobId}/_temporary/
* {appAttemptId}/_temporary/{taskAttemptId}/a=1/b=1/xxx.parquet.
*
* 1. When [[FileOutputCommitter]] algorithm version set to 1,
* we firstly move task attempt output files to
* /path/to/outputPath/.spark-staging-{jobId}/_temporary/
* {appAttemptId}/{taskId}/a=1/b=1,
* then move them to
* /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1.
* 2. When [[FileOutputCommitter]] algorithm version set to 2,
* committing tasks directly move task attempt output files to
* /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1.
*
* At the end of committing job, we move output files from
* intermediate path to final path, e.g., move files from
* /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1
* to /path/to/outputPath/a=1/b=1
*/
class HadoopMapReduceCommitProtocol(
jobId: String,
path: String,
dynamicPartitionOverwrite: Boolean = false)
extends FileCommitProtocol with Serializable with Logging {
import FileCommitProtocol._
/** OutputCommitter from Hadoop is not serializable so marking it transient. */
@transient private var committer: OutputCommitter = _
/**
* Checks whether there are files to be committed to a valid output location.
*
* As committing and aborting a job occurs on driver, where `addedAbsPathFiles` is always null,
* it is necessary to check whether a valid output path is specified.
* [[HadoopMapReduceCommitProtocol#path]] need not be a valid [[org.apache.hadoop.fs.Path]] for
* committers not writing to distributed file systems.
*/
private val hasValidPath = Try { new Path(path) }.isSuccess
/**
* Tracks files staged by this task for absolute output paths. These outputs are not managed by
* the Hadoop OutputCommitter, so we must move these to their final locations on job commit.
*
* The mapping is from the temp output path to the final desired output path of the file.
*/
@transient private var addedAbsPathFiles: mutable.Map[String, String] = null
/**
* Tracks partitions with default path that have new files written into them by this task,
* e.g. a=1/b=2. Files under these partitions will be saved into staging directory and moved to
* destination directory at the end, if `dynamicPartitionOverwrite` is true.
*/
@transient private var partitionPaths: mutable.Set[String] = null
/**
* The staging directory of this write job. Spark uses it to deal with files with absolute output
* path, or writing data into partitioned directory with dynamicPartitionOverwrite=true.
*/
@transient protected lazy val stagingDir = getStagingDir(path, jobId)
protected def setupCommitter(context: TaskAttemptContext): OutputCommitter = {
val format = context.getOutputFormatClass.getConstructor().newInstance()
// If OutputFormat is Configurable, we should set conf to it.
format match {
case c: Configurable => c.setConf(context.getConfiguration)
case _ => ()
}
format.getOutputCommitter(context)
}
override def newTaskTempFile(
taskContext: TaskAttemptContext, dir: Option[String], ext: String): String = {
newTaskTempFile(taskContext, dir, FileNameSpec("", ext))
}
override def newTaskTempFile(
taskContext: TaskAttemptContext, dir: Option[String], spec: FileNameSpec): String = {
val filename = getFilename(taskContext, spec)
val stagingDir: Path = committer match {
// For FileOutputCommitter it has its own staging path called "work path".
case f: FileOutputCommitter =>
if (dynamicPartitionOverwrite) {
assert(dir.isDefined,
"The dataset to be written must be partitioned when dynamicPartitionOverwrite is true.")
partitionPaths += dir.get
}
new Path(Option(f.getWorkPath).map(_.toString).getOrElse(path))
case _ => new Path(path)
}
dir.map { d =>
new Path(new Path(stagingDir, d), filename).toString
}.getOrElse {
new Path(stagingDir, filename).toString
}
}
override def newTaskTempFileAbsPath(
taskContext: TaskAttemptContext, absoluteDir: String, ext: String): String = {
newTaskTempFileAbsPath(taskContext, absoluteDir, FileNameSpec("", ext))
}
override def newTaskTempFileAbsPath(
taskContext: TaskAttemptContext, absoluteDir: String, spec: FileNameSpec): String = {
val filename = getFilename(taskContext, spec)
val absOutputPath = new Path(absoluteDir, filename).toString
// Include a UUID here to prevent file collisions for one task writing to different dirs.
// In principle we could include hash(absoluteDir) instead but this is simpler.
val tmpOutputPath = new Path(stagingDir, UUID.randomUUID().toString() + "-" + filename).toString
addedAbsPathFiles(tmpOutputPath) = absOutputPath
tmpOutputPath
}
protected def getFilename(taskContext: TaskAttemptContext, spec: FileNameSpec): String = {
// The file name looks like part-00000-2dd664f9-d2c4-4ffe-878f-c6c70c1fb0cb_00003-c000.parquet
// Note that %05d does not truncate the split number, so if we have more than 100000 tasks,
// the file name is fine and won't overflow.
val split = taskContext.getTaskAttemptID.getTaskID.getId
f"${spec.prefix}part-$split%05d-$jobId${spec.suffix}"
}
override def setupJob(jobContext: JobContext): Unit = {
// Setup IDs
val jobId = SparkHadoopWriterUtils.createJobID(new Date, 0)
val taskId = new TaskID(jobId, TaskType.MAP, 0)
val taskAttemptId = new TaskAttemptID(taskId, 0)
// Set up the configuration object
jobContext.getConfiguration.set("mapreduce.job.id", jobId.toString)
jobContext.getConfiguration.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
jobContext.getConfiguration.set("mapreduce.task.attempt.id", taskAttemptId.toString)
jobContext.getConfiguration.setBoolean("mapreduce.task.ismap", true)
jobContext.getConfiguration.setInt("mapreduce.task.partition", 0)
val taskAttemptContext = new TaskAttemptContextImpl(jobContext.getConfiguration, taskAttemptId)
committer = setupCommitter(taskAttemptContext)
committer.setupJob(jobContext)
}
override def commitJob(jobContext: JobContext, taskCommits: Seq[TaskCommitMessage]): Unit = {
committer.commitJob(jobContext)
if (hasValidPath) {
val (allAbsPathFiles, allPartitionPaths) =
taskCommits.map(_.obj.asInstanceOf[(Map[String, String], Set[String])]).unzip
val fs = stagingDir.getFileSystem(jobContext.getConfiguration)
val filesToMove = allAbsPathFiles.foldLeft(Map[String, String]())(_ ++ _)
logDebug(s"Committing files staged for absolute locations $filesToMove")
val absParentPaths = filesToMove.values.map(new Path(_).getParent).toSet
if (dynamicPartitionOverwrite) {
logDebug(s"Clean up absolute partition directories for overwriting: $absParentPaths")
absParentPaths.foreach(fs.delete(_, true))
}
logDebug(s"Create absolute parent directories: $absParentPaths")
absParentPaths.foreach(fs.mkdirs)
for ((src, dst) <- filesToMove) {
if (!fs.rename(new Path(src), new Path(dst))) {
throw new IOException(s"Failed to rename $src to $dst when committing files staged for " +
s"absolute locations")
}
}
if (dynamicPartitionOverwrite) {
val partitionPaths = allPartitionPaths.foldLeft(Set[String]())(_ ++ _)
logDebug(s"Clean up default partition directories for overwriting: $partitionPaths")
for (part <- partitionPaths) {
val finalPartPath = new Path(path, part)
if (!fs.delete(finalPartPath, true) && !fs.exists(finalPartPath.getParent)) {
// According to the official hadoop FileSystem API spec, delete op should assume
// the destination is no longer present regardless of return value, thus we do not
// need to double check if finalPartPath exists before rename.
// Also in our case, based on the spec, delete returns false only when finalPartPath
// does not exist. When this happens, we need to take action if parent of finalPartPath
// also does not exist(e.g. the scenario described on SPARK-23815), because
// FileSystem API spec on rename op says the rename dest(finalPartPath) must have
// a parent that exists, otherwise we may get unexpected result on the rename.
fs.mkdirs(finalPartPath.getParent)
}
val stagingPartPath = new Path(stagingDir, part)
if (!fs.rename(stagingPartPath, finalPartPath)) {
throw new IOException(s"Failed to rename $stagingPartPath to $finalPartPath when " +
s"committing files staged for overwriting dynamic partitions")
}
}
}
fs.delete(stagingDir, true)
}
}
/**
* Abort the job; log and ignore any IO exception thrown.
* This is invariably invoked in an exception handler; raising
* an exception here will lose the root cause of the failure.
*
* @param jobContext job context
*/
override def abortJob(jobContext: JobContext): Unit = {
try {
committer.abortJob(jobContext, JobStatus.State.FAILED)
} catch {
case e: IOException =>
logWarning(s"Exception while aborting ${jobContext.getJobID}", e)
}
try {
if (hasValidPath) {
val fs = stagingDir.getFileSystem(jobContext.getConfiguration)
fs.delete(stagingDir, true)
}
} catch {
case e: IOException =>
logWarning(s"Exception while aborting ${jobContext.getJobID}", e)
}
}
override def setupTask(taskContext: TaskAttemptContext): Unit = {
committer = setupCommitter(taskContext)
committer.setupTask(taskContext)
addedAbsPathFiles = mutable.Map[String, String]()
partitionPaths = mutable.Set[String]()
}
override def commitTask(taskContext: TaskAttemptContext): TaskCommitMessage = {
val attemptId = taskContext.getTaskAttemptID
logTrace(s"Commit task ${attemptId}")
SparkHadoopMapRedUtil.commitTask(
committer, taskContext, attemptId.getJobID.getId, attemptId.getTaskID.getId)
new TaskCommitMessage(addedAbsPathFiles.toMap -> partitionPaths.toSet)
}
/**
* Abort the task; log and ignore any failure thrown.
* This is invariably invoked in an exception handler; raising
* an exception here will lose the root cause of the failure.
*
* @param taskContext context
*/
override def abortTask(taskContext: TaskAttemptContext): Unit = {
try {
committer.abortTask(taskContext)
} catch {
case e: IOException =>
logWarning(s"Exception while aborting ${taskContext.getTaskAttemptID}", e)
}
// best effort cleanup of other staged files
try {
for ((src, _) <- addedAbsPathFiles) {
val tmp = new Path(src)
tmp.getFileSystem(taskContext.getConfiguration).delete(tmp, false)
}
} catch {
case e: IOException =>
logWarning(s"Exception while aborting ${taskContext.getTaskAttemptID}", e)
}
}
}
|
ueshin/apache-spark
|
core/src/main/scala/org/apache/spark/internal/io/HadoopMapReduceCommitProtocol.scala
|
Scala
|
apache-2.0
| 13,791
|
package scalaprops
import scalaz._
import scalaz.std.anyVal._
import ScalapropsScalaz._
object AlterTest extends Scalaprops {
val lawsMaybe = Properties.list(
scalazlaws.equal.all[Alter[Maybe, Int]],
scalazlaws.monoid.all[Alter[Maybe, Int]]
)
val lawsIList = Properties.list(
scalazlaws.equal.all[Alter[IList, Int]],
scalazlaws.monoid.all[Alter[IList, Int]]
)
}
|
scalaprops/scalaprops
|
scalaz/src/test/scala/scalaprops/AlterTest.scala
|
Scala
|
mit
| 389
|
package cpup.lib.arguments.parsing
trait ArgData {
def prettyPrint: String
def reify(role: ArgData.Role): String
def flatten: String
}
object ArgData {
case class List(data: ArgData*) extends ArgData {
override def toString = s"List(${data.mkString(", ")})"
override def prettyPrint = s"[==[${data.map(_.prettyPrint).mkString("")}]==]"
override def reify(ctx: Role) = ctx match {
case ArgData.Role.ArgPart => s"[=[${data.map(_.reify(ArgData.Role.Arg)).mkString}]=]"
case ArgData.Role.Arg => data.map(_.reify(ArgData.Role.ArgPart)).mkString
}
override def flatten = data.map(_.flatten).mkString
}
case class Single(data: String) extends ArgData {
override def prettyPrint = data
override def reify(role: Role) = data
override def flatten = data
}
case object Space extends ArgData {
override def prettyPrint = " "
override def reify(role: Role) = " "
override def flatten = " "
}
sealed trait Role
object Role {
case object Arg extends Role
case object ArgPart extends Role
}
}
|
CoderPuppy/cpup-lib
|
src/main/scala/cpup/lib/arguments/parsing/ArgData.scala
|
Scala
|
mit
| 1,027
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.coordinator
import java.util
import java.util.concurrent.atomic.AtomicReference
import org.apache.samza.config._
import org.apache.samza.config.JobConfig.Config2Job
import org.apache.samza.config.SystemConfig.Config2System
import org.apache.samza.config.TaskConfig.Config2Task
import org.apache.samza.config.Config
import org.apache.samza.container.grouper.stream.SystemStreamPartitionGrouperFactory
import org.apache.samza.container.grouper.task.BalancingTaskNameGrouper
import org.apache.samza.container.grouper.task.TaskNameGrouperFactory
import org.apache.samza.container.LocalityManager
import org.apache.samza.container.TaskName
import org.apache.samza.coordinator.server.HttpServer
import org.apache.samza.coordinator.server.JobServlet
import org.apache.samza.coordinator.stream.CoordinatorStreamManager
import org.apache.samza.job.model.JobModel
import org.apache.samza.job.model.TaskModel
import org.apache.samza.system._
import org.apache.samza.util.Logging
import org.apache.samza.util.Util
import org.apache.samza.Partition
import scala.collection.JavaConverters._
/**
* Helper companion object that is responsible for wiring up a JobModelManager
* given a Config object.
*/
object JobModelManager extends Logging {
val SOURCE = "JobModelManager"
/**
* a volatile value to store the current instantiated <code>JobModelManager</code>
*/
@volatile var currentJobModelManager: JobModelManager = null
val jobModelRef: AtomicReference[JobModel] = new AtomicReference[JobModel]()
/**
* Does the following actions for a job.
* a) Reads the jobModel from coordinator stream using the job's configuration.
* b) Recomputes changelog partition mapping based on jobModel and job's configuration.
* c) Builds JobModelManager using the jobModel read from coordinator stream.
* @param coordinatorStreamManager Coordinator stream manager.
* @param changelogPartitionMapping The changelog partition-to-task mapping.
* @return JobModelManager
*/
def apply(coordinatorStreamManager: CoordinatorStreamManager, changelogPartitionMapping: util.Map[TaskName, Integer]) = {
val localityManager = new LocalityManager(coordinatorStreamManager)
val config = coordinatorStreamManager.getConfig
// Map the name of each system to the corresponding SystemAdmin
val systemAdmins = new SystemAdmins(config)
val streamMetadataCache = new StreamMetadataCache(systemAdmins, 0)
val containerCount = new JobConfig(config).getContainerCount
val processorList = List.range(0, containerCount).map(c => c.toString)
systemAdmins.start()
val jobModelManager = getJobModelManager(config, changelogPartitionMapping, localityManager, streamMetadataCache, processorList.asJava)
systemAdmins.stop()
jobModelManager
}
/**
* Build a JobModelManager using a Samza job's configuration.
*/
private def getJobModelManager(config: Config,
changeLogMapping: util.Map[TaskName, Integer],
localityManager: LocalityManager,
streamMetadataCache: StreamMetadataCache,
containerIds: java.util.List[String]) = {
val jobModel: JobModel = readJobModel(config, changeLogMapping, localityManager, streamMetadataCache, containerIds)
jobModelRef.set(jobModel)
val server = new HttpServer
server.addServlet("/", new JobServlet(jobModelRef))
currentJobModelManager = new JobModelManager(jobModel, server)
currentJobModelManager
}
/**
* For each input stream specified in config, exactly determine its
* partitions, returning a set of SystemStreamPartitions containing them all.
*/
private def getInputStreamPartitions(config: Config, streamMetadataCache: StreamMetadataCache) = {
val inputSystemStreams = config.getInputStreams
// Get the set of partitions for each SystemStream from the stream metadata
streamMetadataCache
.getStreamMetadata(inputSystemStreams, true)
.flatMap {
case (systemStream, metadata) =>
metadata
.getSystemStreamPartitionMetadata
.asScala
.keys
.map(new SystemStreamPartition(systemStream, _))
}.toSet
}
private def getMatchedInputStreamPartitions(config: Config, streamMetadataCache: StreamMetadataCache): Set[SystemStreamPartition] = {
val allSystemStreamPartitions = getInputStreamPartitions(config, streamMetadataCache)
config.getSSPMatcherClass match {
case Some(s) => {
val jfr = config.getSSPMatcherConfigJobFactoryRegex.r
config.getStreamJobFactoryClass match {
case Some(jfr(_*)) => {
info("before match: allSystemStreamPartitions.size = %s" format (allSystemStreamPartitions.size))
val sspMatcher = Util.getObj(s, classOf[SystemStreamPartitionMatcher])
val matchedPartitions = sspMatcher.filter(allSystemStreamPartitions.asJava, config).asScala.toSet
// Usually a small set hence ok to log at info level
info("after match: matchedPartitions = %s" format (matchedPartitions))
matchedPartitions
}
case _ => allSystemStreamPartitions
}
}
case _ => allSystemStreamPartitions
}
}
/**
* Gets a SystemStreamPartitionGrouper object from the configuration.
*/
private def getSystemStreamPartitionGrouper(config: Config) = {
val factoryString = config.getSystemStreamPartitionGrouperFactory
val factory = Util.getObj(factoryString, classOf[SystemStreamPartitionGrouperFactory])
factory.getSystemStreamPartitionGrouper(config)
}
/**
* The function reads the latest checkpoint from the underlying coordinator stream and
* builds a new JobModel.
*/
def readJobModel(config: Config,
changeLogPartitionMapping: util.Map[TaskName, Integer],
localityManager: LocalityManager,
streamMetadataCache: StreamMetadataCache,
containerIds: java.util.List[String]): JobModel = {
// Do grouping to fetch TaskName to SSP mapping
val allSystemStreamPartitions = getMatchedInputStreamPartitions(config, streamMetadataCache)
// processor list is required by some of the groupers. So, let's pass them as part of the config.
// Copy the config and add the processor list to the config copy.
val configMap = new util.HashMap[String, String](config)
configMap.put(JobConfig.PROCESSOR_LIST, String.join(",", containerIds))
val grouper = getSystemStreamPartitionGrouper(new MapConfig(configMap))
val groups = grouper.group(allSystemStreamPartitions.asJava)
info("SystemStreamPartitionGrouper %s has grouped the SystemStreamPartitions into %d tasks with the following taskNames: %s" format(grouper, groups.size(), groups.keySet()))
val isHostAffinityEnabled = new ClusterManagerConfig(config).getHostAffinityEnabled
// If no mappings are present(first time the job is running) we return -1, this will allow 0 to be the first change
// mapping.
var maxChangelogPartitionId = changeLogPartitionMapping.asScala.values.map(_.toInt).toList.sorted.lastOption.getOrElse(-1)
// Sort the groups prior to assigning the changelog mapping so that the mapping is reproducible and intuitive
val sortedGroups = new util.TreeMap[TaskName, util.Set[SystemStreamPartition]](groups)
// Assign all SystemStreamPartitions to TaskNames.
val taskModels = {
sortedGroups.asScala.map { case (taskName, systemStreamPartitions) =>
val changelogPartition = Option(changeLogPartitionMapping.get(taskName)) match {
case Some(changelogPartitionId) => new Partition(changelogPartitionId)
case _ =>
// If we've never seen this TaskName before, then assign it a
// new changelog.
maxChangelogPartitionId += 1
info("New task %s is being assigned changelog partition %s." format(taskName, maxChangelogPartitionId))
new Partition(maxChangelogPartitionId)
}
new TaskModel(taskName, systemStreamPartitions, changelogPartition)
}.toSet
}
// Here is where we should put in a pluggable option for the
// SSPTaskNameGrouper for locality, load-balancing, etc.
val containerGrouperFactory = Util.getObj(config.getTaskNameGrouperFactory, classOf[TaskNameGrouperFactory])
val containerGrouper = containerGrouperFactory.build(config)
val containerModels = {
containerGrouper match {
case grouper: BalancingTaskNameGrouper if isHostAffinityEnabled => grouper.balance(taskModels.asJava, localityManager)
case _ => containerGrouper.group(taskModels.asJava, containerIds)
}
}
val containerMap = containerModels.asScala.map { case (containerModel) => containerModel.getProcessorId -> containerModel }.toMap
if (isHostAffinityEnabled) {
new JobModel(config, containerMap.asJava, localityManager)
} else {
new JobModel(config, containerMap.asJava)
}
}
private def getSystemNames(config: Config) = config.getSystemNames.toSet
}
/**
* <p>JobModelManager is responsible for managing the lifecycle of a Samza job
* once it's been started. This includes starting and stopping containers,
* managing configuration, etc.</p>
*
* <p>Any new cluster manager that's integrated with Samza (YARN, Mesos, etc)
* must integrate with the job coordinator.</p>
*
* <p>This class' API is currently unstable, and likely to change. The
* responsibility is simply to propagate the job model, and HTTP
* server right now.</p>
*/
class JobModelManager(
/**
* The data model that describes the Samza job's containers and tasks.
*/
val jobModel: JobModel,
/**
* HTTP server used to serve a Samza job's container model to SamzaContainers when they start up.
*/
val server: HttpServer = null) extends Logging {
debug("Got job model: %s." format jobModel)
def start {
if (server != null) {
debug("Starting HTTP server.")
server.start
info("Started HTTP server: %s" format server.getUrl)
}
}
def stop {
if (server != null) {
debug("Stopping HTTP server.")
server.stop
info("Stopped HTTP server.")
}
}
}
|
fredji97/samza
|
samza-core/src/main/scala/org/apache/samza/coordinator/JobModelManager.scala
|
Scala
|
apache-2.0
| 11,129
|
package net.mtgto.garoon.schedule
import com.github.nscala_time.time.Imports._
import net.mtgto.garoon.{Authentication, Id, GaroonClient}
import org.sisioh.dddbase.core.lifecycle.{EntityNotFoundException, EntityIOContext}
import org.sisioh.dddbase.core.lifecycle.sync.SyncEntityReader
import scala.util.Try
import scala.xml.XML
class EventRepository(client: GaroonClient, val auth: Authentication) extends SyncEntityReader[EventId, Event] {
def resolve(identity: EventId)(implicit ctx: EntityIOContext[Try]): Try[Event] = {
val actionName = "ScheduleGetEventsById"
val parameters = client.factory.createOMElement("parameters", null)
val eventNode = client.factory.createOMElement("event_id", null)
eventNode.setText(identity.value)
parameters.addChild(eventNode)
val result = client.sendReceive(actionName, "/cbpapi/schedule/api", parameters)(auth, None)
result.map { element =>
val node = XML.loadString(element.toString)
(node \\ "returns" \\ "schedule_event").map(Event(_)).headOption.getOrElse(throw new EntityNotFoundException)
}
}
def containsByIdentity(identity: EventId)(implicit ctx: EntityIOContext[Try]): Try[Boolean] =
resolve(identity).map(_ => true)
def findByUserId(userId: Id, interval: Interval): Try[Seq[Event]] = {
val actionName = "ScheduleGetEventsByTarget"
val parameters = client.factory.createOMElement("parameters", null)
parameters.addAttribute("start", interval.getStart.toString(), null)
parameters.addAttribute("end", interval.getEnd.toString(), null)
val memberNode = client.factory.createOMElement("user", null)
memberNode.addAttribute("id", userId.value, null)
parameters.addChild(memberNode)
val result = client.sendReceive(actionName, "/cbpapi/schedule/api", parameters)(auth, None)
result.map { element =>
val node = XML.loadString(element.toString)
node \\ "returns" \\ "schedule_event" map (Event(_))
}
}
}
|
mtgto/garoon
|
src/main/scala/net/mtgto/garoon/schedule/EventRepository.scala
|
Scala
|
gpl-3.0
| 1,957
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import scala.collection.mutable
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types.StringType
import org.apache.spark.util.SerializableConfiguration
/**
* Abstract class for writing out data in a single Spark task.
* Exceptions thrown by the implementation of this trait will automatically trigger task aborts.
*/
abstract class FileFormatDataWriter(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol) {
/**
* Max number of files a single task writes out due to file size. In most cases the number of
* files written should be very small. This is just a safe guard to protect some really bad
* settings, e.g. maxRecordsPerFile = 1.
*/
protected val MAX_FILE_COUNTER: Int = 1000 * 1000
protected val updatedPartitions: mutable.Set[String] = mutable.Set[String]()
protected var currentWriter: OutputWriter = _
/** Trackers for computing various statistics on the data as it's being written out. */
protected val statsTrackers: Seq[WriteTaskStatsTracker] =
description.statsTrackers.map(_.newTaskInstance())
protected def releaseResources(): Unit = {
if (currentWriter != null) {
try {
currentWriter.close()
} finally {
currentWriter = null
}
}
}
/** Writes a record */
def write(record: InternalRow): Unit
/**
* Returns the summary of relative information which
* includes the list of partition strings written out. The list of partitions is sent back
* to the driver and used to update the catalog. Other information will be sent back to the
* driver too and used to e.g. update the metrics in UI.
*/
def commit(): WriteTaskResult = {
releaseResources()
val summary = ExecutedWriteSummary(
updatedPartitions = updatedPartitions.toSet,
stats = statsTrackers.map(_.getFinalStats()))
WriteTaskResult(committer.commitTask(taskAttemptContext), summary)
}
def abort(): Unit = {
try {
releaseResources()
} finally {
committer.abortTask(taskAttemptContext)
}
}
}
/** FileFormatWriteTask for empty partitions */
class EmptyDirectoryDataWriter(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol
) extends FileFormatDataWriter(description, taskAttemptContext, committer) {
override def write(record: InternalRow): Unit = {}
}
/** Writes data to a single directory (used for non-dynamic-partition writes). */
class SingleDirectoryDataWriter(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol)
extends FileFormatDataWriter(description, taskAttemptContext, committer) {
private var fileCounter: Int = _
private var recordsInFile: Long = _
// Initialize currentWriter and statsTrackers
newOutputWriter()
private def newOutputWriter(): Unit = {
recordsInFile = 0
releaseResources()
val ext = description.outputWriterFactory.getFileExtension(taskAttemptContext)
val currentPath = committer.newTaskTempFile(
taskAttemptContext,
None,
f"-c$fileCounter%03d" + ext)
currentWriter = description.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = description.dataColumns.toStructType,
context = taskAttemptContext)
statsTrackers.foreach(_.newFile(currentPath))
}
override def write(record: InternalRow): Unit = {
if (description.maxRecordsPerFile > 0 && recordsInFile >= description.maxRecordsPerFile) {
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
newOutputWriter()
}
currentWriter.write(record)
statsTrackers.foreach(_.newRow(record))
recordsInFile += 1
}
}
/**
* Writes data to using dynamic partition writes, meaning this single function can write to
* multiple directories (partitions) or files (bucketing).
*/
class DynamicPartitionDataWriter(
description: WriteJobDescription,
taskAttemptContext: TaskAttemptContext,
committer: FileCommitProtocol)
extends FileFormatDataWriter(description, taskAttemptContext, committer) {
/** Flag saying whether or not the data to be written out is partitioned. */
private val isPartitioned = description.partitionColumns.nonEmpty
/** Flag saying whether or not the data to be written out is bucketed. */
private val isBucketed = description.bucketIdExpression.isDefined
assert(isPartitioned || isBucketed,
s"""DynamicPartitionWriteTask should be used for writing out data that's either
|partitioned or bucketed. In this case neither is true.
|WriteJobDescription: $description
""".stripMargin)
private var fileCounter: Int = _
private var recordsInFile: Long = _
private var currentPartionValues: Option[UnsafeRow] = None
private var currentBucketId: Option[Int] = None
/** Extracts the partition values out of an input row. */
private lazy val getPartitionValues: InternalRow => UnsafeRow = {
val proj = UnsafeProjection.create(description.partitionColumns, description.allColumns)
row => proj(row)
}
/** Expression that given partition columns builds a path string like: col1=val/col2=val/... */
private lazy val partitionPathExpression: Expression = Concat(
description.partitionColumns.zipWithIndex.flatMap { case (c, i) =>
val partitionName = ScalaUDF(
ExternalCatalogUtils.getPartitionPathString _,
StringType,
Seq(Literal(c.name), Cast(c, StringType, Option(description.timeZoneId))),
Seq(true, true))
if (i == 0) Seq(partitionName) else Seq(Literal(Path.SEPARATOR), partitionName)
})
/** Evaluates the `partitionPathExpression` above on a row of `partitionValues` and returns
* the partition string. */
private lazy val getPartitionPath: InternalRow => String = {
val proj = UnsafeProjection.create(Seq(partitionPathExpression), description.partitionColumns)
row => proj(row).getString(0)
}
/** Given an input row, returns the corresponding `bucketId` */
private lazy val getBucketId: InternalRow => Int = {
val proj =
UnsafeProjection.create(description.bucketIdExpression.toSeq, description.allColumns)
row => proj(row).getInt(0)
}
/** Returns the data columns to be written given an input row */
private val getOutputRow =
UnsafeProjection.create(description.dataColumns, description.allColumns)
/**
* Opens a new OutputWriter given a partition key and/or a bucket id.
* If bucket id is specified, we will append it to the end of the file name, but before the
* file extension, e.g. part-r-00009-ea518ad4-455a-4431-b471-d24e03814677-00002.gz.parquet
*
* @param partitionValues the partition which all tuples being written by this `OutputWriter`
* belong to
* @param bucketId the bucket which all tuples being written by this `OutputWriter` belong to
*/
private def newOutputWriter(partitionValues: Option[InternalRow], bucketId: Option[Int]): Unit = {
recordsInFile = 0
releaseResources()
val partDir = partitionValues.map(getPartitionPath(_))
partDir.foreach(updatedPartitions.add)
val bucketIdStr = bucketId.map(BucketingUtils.bucketIdToString).getOrElse("")
// This must be in a form that matches our bucketing format. See BucketingUtils.
val ext = f"$bucketIdStr.c$fileCounter%03d" +
description.outputWriterFactory.getFileExtension(taskAttemptContext)
val customPath = partDir.flatMap { dir =>
description.customPartitionLocations.get(PartitioningUtils.parsePathFragment(dir))
}
val currentPath = if (customPath.isDefined) {
committer.newTaskTempFileAbsPath(taskAttemptContext, customPath.get, ext)
} else {
committer.newTaskTempFile(taskAttemptContext, partDir, ext)
}
currentWriter = description.outputWriterFactory.newInstance(
path = currentPath,
dataSchema = description.dataColumns.toStructType,
context = taskAttemptContext)
statsTrackers.foreach(_.newFile(currentPath))
}
override def write(record: InternalRow): Unit = {
val nextPartitionValues = if (isPartitioned) Some(getPartitionValues(record)) else None
val nextBucketId = if (isBucketed) Some(getBucketId(record)) else None
if (currentPartionValues != nextPartitionValues || currentBucketId != nextBucketId) {
// See a new partition or bucket - write to a new partition dir (or a new bucket file).
if (isPartitioned && currentPartionValues != nextPartitionValues) {
currentPartionValues = Some(nextPartitionValues.get.copy())
statsTrackers.foreach(_.newPartition(currentPartionValues.get))
}
if (isBucketed) {
currentBucketId = nextBucketId
statsTrackers.foreach(_.newBucket(currentBucketId.get))
}
fileCounter = 0
newOutputWriter(currentPartionValues, currentBucketId)
} else if (description.maxRecordsPerFile > 0 &&
recordsInFile >= description.maxRecordsPerFile) {
// Exceeded the threshold in terms of the number of records per file.
// Create a new file by increasing the file counter.
fileCounter += 1
assert(fileCounter < MAX_FILE_COUNTER,
s"File counter $fileCounter is beyond max value $MAX_FILE_COUNTER")
newOutputWriter(currentPartionValues, currentBucketId)
}
val outputRow = getOutputRow(record)
currentWriter.write(outputRow)
statsTrackers.foreach(_.newRow(outputRow))
recordsInFile += 1
}
}
/** A shared job description for all the write tasks. */
class WriteJobDescription(
val uuid: String, // prevent collision between different (appending) write jobs
val serializableHadoopConf: SerializableConfiguration,
val outputWriterFactory: OutputWriterFactory,
val allColumns: Seq[Attribute],
val dataColumns: Seq[Attribute],
val partitionColumns: Seq[Attribute],
val bucketIdExpression: Option[Expression],
val path: String,
val customPartitionLocations: Map[TablePartitionSpec, String],
val maxRecordsPerFile: Long,
val timeZoneId: String,
val statsTrackers: Seq[WriteJobStatsTracker])
extends Serializable {
assert(AttributeSet(allColumns) == AttributeSet(partitionColumns ++ dataColumns),
s"""
|All columns: ${allColumns.mkString(", ")}
|Partition columns: ${partitionColumns.mkString(", ")}
|Data columns: ${dataColumns.mkString(", ")}
""".stripMargin)
}
/** The result of a successful write task. */
case class WriteTaskResult(commitMsg: TaskCommitMessage, summary: ExecutedWriteSummary)
/**
* Wrapper class for the metrics of writing data out.
*
* @param updatedPartitions the partitions updated during writing data out. Only valid
* for dynamic partition.
* @param stats one `WriteTaskStats` object for every `WriteJobStatsTracker` that the job had.
*/
case class ExecutedWriteSummary(
updatedPartitions: Set[String],
stats: Seq[WriteTaskStats])
|
hhbyyh/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatDataWriter.scala
|
Scala
|
apache-2.0
| 12,389
|
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord
import scala.collection.mutable
import ackcord.cachehandlers.CacheSnapshotBuilder
import ackcord.gateway.GatewayEvent.ReadyData
import ackcord.gateway.GatewayMessage
import ackcord.requests.SupervisionStreams
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source}
import org.slf4j.Logger
object CacheStreams {
/**
* Creates a set of publish subscribe streams that go through the cache updated.
*/
def cacheStreams(
implicit system: ActorSystem[Nothing]
): (Sink[CacheEvent, NotUsed], Source[(CacheEvent, CacheState), NotUsed]) = {
SupervisionStreams
.addLogAndContinueFunction(
MergeHub
.source[CacheEvent](perProducerBufferSize = 16)
.via(cacheUpdater)
.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
.addAttributes
)
.run()
}
/**
* Creates a set of publish subscribe streams for gateway events.
*/
def gatewayEvents[D](
implicit system: ActorSystem[Nothing]
): (Sink[GatewayMessage[D], NotUsed], Source[GatewayMessage[D], NotUsed]) =
SupervisionStreams
.addLogAndContinueFunction(
MergeHub
.source[GatewayMessage[D]](perProducerBufferSize = 16)
.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
.addAttributes
)
.run()
/**
* A flow that creates [[APIMessage]]s from update events.
*/
def createApiMessages: Flow[(CacheEvent, CacheState), APIMessage, NotUsed] = {
Flow[(CacheEvent, CacheState)]
.collect {
case (APIMessageCacheUpdate(_, sendEvent, _, _), state) => sendEvent(state)
}
.mapConcat(_.toList)
}
/**
* A flow that keeps track of the current cache state, and updates it
* from cache update events.
*/
def cacheUpdater(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] =
Flow[CacheEvent].statefulMapConcat { () =>
var state: CacheState = null
implicit val log: Logger = system.log
//We only handle events when we are ready to, and we have received the ready event.
def isReady: Boolean = state != null
{
case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _) =>
val builder = new CacheSnapshotBuilder(
null, //The event will populate this,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty,
mutable.Map.empty
)
readyEvent.process(builder)
val snapshot = builder.toImmutable
state = CacheState(snapshot, snapshot)
List(readyEvent -> state)
case handlerEvent: CacheEvent if isReady =>
val builder = CacheSnapshotBuilder(state.current)
handlerEvent.process(builder)
state = state.update(builder.toImmutable)
List(handlerEvent -> state)
case _ if !isReady =>
log.error("Received event before ready")
Nil
}
}
}
|
Katrix-/AckCord
|
core/src/main/scala/ackcord/CacheStreams.scala
|
Scala
|
mit
| 4,384
|
/*
* Copyright 2020 Lenses.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lenses.streamreactor.connect.aws.s3.formats.parquet
import java.io.ByteArrayInputStream
import org.apache.parquet.io.{DelegatingSeekableInputStream, InputFile, SeekableInputStream}
class SeekableByteArrayInputStream(val bArr: Array[Byte]) extends ByteArrayInputStream(bArr) {
def setPos(pos: Int): Unit = this.pos = pos
def getPos: Int = this.pos
}
class ParquetInputFile(inputStream: SeekableByteArrayInputStream) extends InputFile {
override def getLength: Long = inputStream.available()
override def newStream(): SeekableInputStream = new DelegatingSeekableInputStream(inputStream) {
override def getPos: Long = getStream.asInstanceOf[SeekableByteArrayInputStream].getPos.longValue
override def seek(newPos: Long): Unit = getStream.asInstanceOf[SeekableByteArrayInputStream].setPos(newPos.intValue)
}
}
|
datamountaineer/stream-reactor
|
kafka-connect-aws-s3/src/main/scala/io/lenses/streamreactor/connect/aws/s3/formats/parquet/ParquetInputFile.scala
|
Scala
|
apache-2.0
| 1,441
|
import com.github.tminglei.slickpg
import com.vividsolutions.jts.geom._
import play.api.data._
import play.api.data.format._
import java.util.UUID
import org.joda.time.{LocalDateTime, LocalDate}
import play.api.libs.json.JsValue
package object myUtils {
type Range[T] = slickpg.Range[T]
val Range = slickpg.Range
/// play form formatter aliases
implicit val stringFormat = Formats.stringFormat
implicit val longFormat = Formats.longFormat
implicit val intFormat = Formats.intFormat
implicit val booleanFormat = Formats.booleanFormat
implicit val jodaDateFormat = Formats.jodaLocalDateFormat
implicit val jodaDateTimeFormat = MyFormats.jodaDateTimeFormat
implicit val uuidFormat = MyFormats.uuidFormat
implicit val intRangeFormat = MyFormats.rangeFormat[Int](_.toInt)
implicit val longRangeFormat = MyFormats.rangeFormat[Long](_.toLong)
implicit val floatRangeFormat = MyFormats.rangeFormat[Float](_.toFloat)
implicit val dateRangeFormat = MyFormats.rangeFormat[LocalDate](LocalDate.parse)
implicit val dateTimeRangeFormat = MyFormats.rangeFormat[LocalDateTime](LocalDateTime.parse)
implicit val geometryFormat = MyFormats.geometryFormat[Geometry]
implicit val pointFormat = MyFormats.geometryFormat[Point]
implicit val polygonFormat = MyFormats.geometryFormat[Polygon]
implicit val lineStringFormat = MyFormats.geometryFormat[LineString]
implicit val linearRingFormat = MyFormats.geometryFormat[LinearRing]
implicit val geometryCollectionFormat = MyFormats.geometryFormat[GeometryCollection]
implicit val multiPointFormat = MyFormats.geometryFormat[MultiPoint]
implicit val multiPolygonFormat = MyFormats.geometryFormat[MultiPolygon]
implicit val multiLineStringFormat = MyFormats.geometryFormat[MultiLineString]
implicit val strMapFormat = MyFormats.strMapFormat
implicit val jsonFormat = MyFormats.jsonFormat
// play form mappings
val uuid: Mapping[UUID] = Forms.of[UUID]
val point: Mapping[Point] = Forms.of[Point]
val polygon: Mapping[Polygon] = Forms.of[Polygon]
val date: Mapping[LocalDate] = Forms.of[LocalDate]
val datetime: Mapping[LocalDateTime] = Forms.of[LocalDateTime]
val strMap: Mapping[Map[String, String]] = Forms.of[Map[String, String]]
val intRange: Mapping[Range[Int]] = Forms.of[Range[Int]]
val longRange: Mapping[Range[Long]] = Forms.of[Range[Long]]
val floatRange: Mapping[Range[Float]] = Forms.of[Range[Float]]
val dateRange: Mapping[Range[LocalDate]] = Forms.of[Range[LocalDate]]
val dateTimeRange: Mapping[Range[LocalDateTime]] = Forms.of[Range[LocalDateTime]]
val json: Mapping[JsValue] = Forms.of[JsValue]
}
|
hardmettle/slick-postgress-samples
|
app/myUtils/package.scala
|
Scala
|
apache-2.0
| 2,617
|
package com.github.al.roulette.load.impl
case class LoadTestEvent(msg: String)
|
andrei-l/reactive-roulette
|
load-test-impl/src/main/scala/com/github/al/roulette/load/impl/LoadTestEvent.scala
|
Scala
|
mit
| 80
|
package org.scalajs.core.tools.linker.backend.closure
import org.scalajs.core.ir
import ir.Position.NoPosition
import org.scalajs.core.tools.javascript.Trees.Tree
import org.scalajs.core.tools.javascript.JSTreeBuilder
import com.google.javascript.rhino._
import com.google.javascript.jscomp._
import scala.collection.mutable
import java.net.URI
private[closure] class ClosureAstBuilder(
relativizeBaseURI: Option[URI] = None) extends JSTreeBuilder {
private val transformer = new ClosureAstTransformer(relativizeBaseURI)
private val treeBuf = mutable.ListBuffer.empty[Node]
def addJSTree(tree: Tree): Unit =
treeBuf += transformer.transformStat(tree)(NoPosition)
lazy val closureAST: SourceAst = {
val root = transformer.setNodePosition(IR.script(treeBuf: _*), NoPosition)
treeBuf.clear()
new ClosureAstBuilder.ScalaJSSourceAst(root)
}
}
private object ClosureAstBuilder {
// Dummy Source AST class
private class ScalaJSSourceAst(root: Node) extends SourceAst {
def getAstRoot(compiler: AbstractCompiler): Node = root
def clearAst(): Unit = () // Just for GC. Nonsensical here.
def getInputId(): InputId = root.getInputId()
def getSourceFile(): SourceFile =
root.getStaticSourceFile().asInstanceOf[SourceFile]
def setSourceFile(file: SourceFile): Unit =
if (getSourceFile() ne file) throw new IllegalStateException
}
}
|
lrytz/scala-js
|
tools/jvm/src/main/scala/org/scalajs/core/tools/linker/backend/closure/ClosureAstBuilder.scala
|
Scala
|
bsd-3-clause
| 1,401
|
package com.github.jpmossin.charjump
import java.awt.event._
import java.awt.{Dimension, Graphics}
import javax.swing.JTextField
import com.intellij.openapi.editor.colors.EditorFontType
import com.intellij.openapi.editor.{Editor, VisualPosition}
import com.intellij.openapi.ui.popup.JBPopupFactory
import com.intellij.ui.awt.RelativePoint
/**
* Displays a small popup box for capturing the search input.
* Once an input char is captured, the search box will close itself
* and call the registered listener.
*/
class SearchBox(editor: Editor, keyPressedHandler: Char => Unit) extends JTextField {
private val popup = createPopup()
def setupAndShow(): Unit = {
setupKeyListener()
popup.show(guessBestLocation())
popup.setRequestFocus(true)
requestFocus()
}
private def createPopup() = {
val popupBuilder = JBPopupFactory.getInstance().createComponentPopupBuilder(this, this)
val popup = popupBuilder.createPopup()
popup.setSize(computeDimensions())
popup
}
private def guessBestLocation(): RelativePoint = {
val logicalPosition = editor.getCaretModel.getVisualPosition
getPointFromVisualPosition(logicalPosition)
}
private def getPointFromVisualPosition(logicalPosition: VisualPosition): RelativePoint = {
val p = editor.visualPositionToXY(new VisualPosition(logicalPosition.line, logicalPosition.column))
new RelativePoint(editor.getContentComponent, p)
}
private def computeDimensions() = {
val editorFont = editor.getColorsScheme.getFont(EditorFontType.PLAIN)
val width = getFontMetrics(editorFont).stringWidth("W")
new Dimension(width + 1, editorFont.getSize + 1)
}
private def setupKeyListener(): Unit = {
addKeyListener(new KeyAdapter {
override def keyTyped(e: KeyEvent): Unit = {
closePopup()
keyPressedHandler(e.getKeyChar)
}
})
}
private def closePopup(): Unit = {
popup.cancel()
popup.dispose()
}
override def paintBorder(g: Graphics): Unit = {}
}
|
jpmossin/CharJump
|
src/main/scala/com/github/jpmossin/charjump/SearchBox.scala
|
Scala
|
mit
| 2,015
|
package cats.scalatest
import org.scalatest.matchers.{ MatchResult, BeMatcher, Matcher }
import scala.util.Either
import cats.syntax.either._
trait EitherMatchers {
/**
* Checks to see if `scala.util.Either` is a specific Left element.
*/
def beLeft[E](element: E): Matcher[E Either _] = new BeCatsLeftEither[E](element)
/**
* Checks to see if `scala.util.Either` is a `Left`.
*/
def left[E]: BeMatcher[E Either _] = new IsCatsLeftEitherMatcher[E]
/**
* Checks to see if `scala.util.Either` is a specific Right element.
*/
def beRight[T](element: T): Matcher[_ Either T] = new BeCatsRightEitherMatcher[T](element)
/**
* Checks to see if `scala.util.Either` is a `Right`.
*/
def right[T]: BeMatcher[_ Either T] = new IsCatsRightEitherMatcher[T]
}
/**
* Import singleton in case you prefer to import rather than mix in.
* {{{
* import EitherMatchers._
* result should beRight(100)
* }}}
*/
final object EitherMatchers extends EitherMatchers
final private[scalatest] class BeCatsRightEitherMatcher[T](element: T) extends Matcher[_ Either T] {
def apply(either: _ Either T): MatchResult = {
MatchResult(
either.fold(_ => false, _ == element),
s"'$either' did not contain an Right element matching '$element'.",
s"'$either' contained an Right element matching '$element', but should not have."
)
}
}
final private[scalatest] class BeCatsLeftEither[E](element: E) extends Matcher[E Either _] {
def apply(either: E Either _): MatchResult = {
MatchResult(
either.fold(_ == element, _ => false),
s"'$either' did not contain an Left element matching '$element'.",
s"'$either' contained an Left element matching '$element', but should not have."
)
}
}
final private[scalatest] class IsCatsLeftEitherMatcher[E] extends BeMatcher[E Either _] {
def apply(either: E Either _): MatchResult = MatchResult(
either.isLeft,
s"'$either' was not an Left, but should have been.",
s"'$either' was an Left, but should *NOT* have been."
)
}
final private[scalatest] class IsCatsRightEitherMatcher[T] extends BeMatcher[_ Either T] {
def apply(either: _ Either T): MatchResult = MatchResult(
either.isRight,
s"'$either' was not an Right, but should have been.",
s"'$either' was an Right, but should *NOT* have been."
)
}
|
coltfred/cats-scalatest
|
src/main/scala/cats/scalatest/EitherMatchers.scala
|
Scala
|
apache-2.0
| 2,343
|
package com.arcusys.valamis.web.servlet.grade
import javax.servlet.http.HttpServletResponse
import com.arcusys.learn.liferay.util.PortletName
import com.arcusys.valamis.web.portlet.base.ViewAllPermission
import com.arcusys.valamis.web.servlet.base.{BaseJsonApiController, PermissionUtil}
import com.arcusys.valamis.web.servlet.grade.notification.GradebookNotificationHelper
import com.arcusys.valamis.web.servlet.grade.request.NotificationRequest
class NotificationServlet extends BaseJsonApiController {
post("/notifications/gradebook(/)") {
val notificationRequest = NotificationRequest(this)
PermissionUtil.requirePermissionApi(ViewAllPermission, PortletName.Gradebook)
GradebookNotificationHelper.sendStatementCommentNotification(
notificationRequest.courseId,
PermissionUtil.getUserId,
notificationRequest.targetId,
notificationRequest.packageTitle,
request
)
response.reset()
response.setStatus(HttpServletResponse.SC_NO_CONTENT)
}
}
|
igor-borisov/valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/grade/NotificationServlet.scala
|
Scala
|
gpl-3.0
| 1,006
|
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.tasks
import akka.actor.ActorSelection
import akka.actor.FSM.Failure
import akka.util.Timeout
import cmwell.ctrl.checkers.{ComponentState, GreenStatus, YellowStatus}
import cmwell.ctrl.commands._
import cmwell.ctrl.hc._
import cmwell.ctrl.server.CommandActor
import com.typesafe.scalalogging.LazyLogging
import k.grid.Grid
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import akka.pattern.ask
import scala.util.Success
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by michael on 3/9/16.
*/
case class AddNode(node: String) extends Task with LazyLogging {
implicit val timeout = Timeout(15.seconds)
private def startElasticsearch(cmd: ActorSelection, prom: Promise[Unit]): Unit = {
logger.info(s"Starting Elasticsearch on node $node")
cmd ! StartElasticsearch
Grid.system.scheduler.scheduleOnce(60.seconds) {
(HealthActor.ref ? GetElasticsearchDetailedStatus).mapTo[ElasticsearchGridStatus].map { f =>
f.getStatesMap.get(node) match {
case Some(s) =>
if (s.getColor == GreenStatus || s.getColor == YellowStatus) prom.success(())
else startElasticsearch(cmd, prom)
case None => startElasticsearch(cmd, prom)
}
}
}
}
private def startCassandra(cmd: ActorSelection, prom: Promise[Unit]): Unit = {
logger.info(s"Starting Cassandra on node $node")
cmd ! StartCassandra
Grid.system.scheduler.scheduleOnce(60.seconds) {
(HealthActor.ref ? GetCassandraDetailedStatus).mapTo[CassandraGridStatus].map { f =>
f.getStatesMap.get(node) match {
case Some(s) =>
if (s.getColor == GreenStatus) prom.success(())
else startCassandra(cmd, prom)
case None => startCassandra(cmd, prom)
}
}
}
}
override def exec: Future[TaskResult] = {
val cmd = CommandActor.select(node)
val esPromise = Promise[Unit]
val casPromise = Promise[Unit]
startElasticsearch(cmd, esPromise)
startCassandra(cmd, casPromise)
val esCancelable = cancel(esPromise, 24.hours)
val casCancelable = cancel(casPromise, 24.hours)
val esFuture = esPromise.future
val casFuture = casPromise.future
// cancel the cancelables when the future succeeded
esFuture.foreach(x => esCancelable.cancel())
casFuture.foreach(x => casCancelable.cancel())
val fut = for {
esStarted <- esFuture
casStarted <- casFuture
} yield {
logger.info("Starting CM-WELL components")
cmd ! StartKafka
cmd ! StartBg
cmd ! StartWebserver
cmd ! StartCw
cmd ! StartDc
}
fut.map(r => TaskSuccessful).recover { case err: Throwable => TaskFailed }
}
}
|
dudi3001/CM-Well
|
server/cmwell-controller/src/main/scala/cmwell/ctrl/tasks/AddNode.scala
|
Scala
|
apache-2.0
| 3,380
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.util.Date
import org.locationtech.jts.geom.Point
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinConversionProcessTest extends Specification {
import scala.collection.JavaConversions._
val sft = SimpleFeatureTypes.createType("bin",
"name:String,track:String,dtg:Date,dtg2:Date,*geom:Point:srid=4326,geom2:Point:srid=4326")
val process = new BinConversionProcess
val features = (0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"0$i")
sf.setAttribute("name", s"name$i")
sf.setAttribute("track", s"$i")
sf.setAttribute("dtg", s"2017-02-20T00:00:0$i.000Z")
sf.setAttribute("dtg2", s"2017-02-21T00:00:0$i.000Z")
sf.setAttribute("geom", s"POINT(40 ${50 + i})")
sf.setAttribute("geom2", s"POINT(20 ${30 + i})")
sf
}
val ids = features.map(_.getID.hashCode)
val names = features.map(_.getAttribute("name").hashCode)
val tracks = features.map(_.getAttribute("track").hashCode)
val dates = features.map(_.getAttribute("dtg").asInstanceOf[Date].getTime)
val dates2 = features.map(_.getAttribute("dtg2").asInstanceOf[Date].getTime)
val lonlat = features.map(_.getAttribute("geom").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon = lonlat.map(_.swap)
val lonlat2 = features.map(_.getAttribute("geom2").asInstanceOf[Point]).map(p => (p.getY.toFloat, p.getX.toFloat))
val latlon2 = lonlat2.map(_.swap)
val listCollection = new ListFeatureCollection(sft, features)
// converts to tuples that we can compare to zipped values
def toTuples(value: EncodedValues): Any = value match {
case EncodedValues(trackId, lat, lon, dtg, label) if label == -1L => ((trackId, dtg), (lat, lon))
case EncodedValues(trackId, lat, lon, dtg, label) => (((trackId, dtg), (lat, lon)), label)
}
"BinConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, "lonlat")
bytes must beEmpty
}
"encode a generic feature collection" in {
val bytes = process.execute(listCollection, null, null, null, null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat))
}
"encode a generic feature collection with alternate values" in {
val bytes = process.execute(listCollection, "name", "geom2", "dtg2", null, "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(names.zip(dates2).zip(lonlat2))
}
"encode a generic feature collection with labels" in {
val bytes = process.execute(listCollection, null, null, null, "track", "lonlat").toList
bytes must haveLength(10)
val decoded = bytes.map(BinaryOutputEncoder.decode).map(toTuples)
decoded must containTheSameElementsAs(ids.zip(dates).zip(lonlat).zip(tracks))
}
}
}
|
locationtech/geomesa
|
geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/transform/BinConversionProcessTest.scala
|
Scala
|
apache-2.0
| 3,989
|
/*
* Copyright 2016 Nikolay Tatarinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.rockjam.iqnotes
import akka.http.scaladsl.testkit.ScalatestRouteTest
import com.github.rockjam.iqnotes.db.MongoExtension
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Seconds, Span }
import org.scalatest.{ BeforeAndAfterEach, FlatSpecLike, Matchers }
trait SpecBase
extends FlatSpecLike
with Matchers
with ScalaFutures
with BeforeAndAfterEach
with ScalatestRouteTest {
override implicit def patienceConfig: PatienceConfig =
PatienceConfig(timeout = Span(15, Seconds))
override def beforeEach: Unit =
whenReady(MongoExtension(system).db.flatMap { db ⇒
db.drop()
})(_ ⇒ ())
}
|
rockjam/iq-notes
|
src/test/scala/com/github/rockjam/iqnotes/SpecBase.scala
|
Scala
|
apache-2.0
| 1,273
|
import sbt._
import Keys._
object BuildSettings {
lazy val repos = Seq(
"Sonatype OSS Repo" at "https://oss.sonatype.org/content/repositories/releases",
"Concurrent Maven Repo" at "http://conjars.org/repo"
/*,
"Apache HBase" at "https://repository.apache.org/content/repositories/releases",
"Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
"Twitter Maven Repo" at "http://maven.twttr.com",
"Maven Repository" at "http://mvnrepository.com/artifact/",
"releases" at "http://oss.sonatype.org/content/repositories/releases",
"snapshots" at "http://oss.sonatype.org/content/repositories/snapshots",
"Sonatype OSS Repo 2" at "https://oss.sonatype.org/content/groups/scala-tools"*/)
lazy val basicSettings = Seq[Setting[_]](
organization := "com.ivan",
version := "0.0.1",
description := "Sample data processing pipeline using cascading / scalding / hbase. Stream double -> (min, max, avg)",
scalaVersion := "2.10.4",
scalacOptions := Seq("-deprecation", "-encoding", "utf8"),
resolvers ++= repos
)
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Slightly cleaner jar name
jarName in assembly := { name.value + "-" + version.value + ".jar" },
// Drop these jars
excludedJars in assembly <<= (fullClasspath in assembly) map { cp =>
val excludes = Set(
"ant-1.6.5.jar",
"asm-3.1.jar",
"objenesis-1.2.jar",
"mockito-all-1.8.5.jar",
"jsp-api-2.0.jar",
"jsp-api-2.1-6.1.14.jar",
"jsp-2.1-6.1.14.jar",
"stax-api-1.0.1.jar",
"jasper-compiler-5.5.12.jar",
"minlog-1.2.jar", // Otherwise causes conflicts with Kyro (which bundles it)
"janino-2.5.16.jar", // Janino includes a broken signature, and is not needed anyway
"commons-beanutils-core-1.8.0.jar", // Clash with each other and with commons-collections
"commons-beanutils-1.7.0.jar", // "
"servlet-api-2.4.jar",
"servlet-api-2.5-20081211.jar"
)
cp filter { jar => excludes(jar.data.getName) }
},
mergeStrategy in assembly <<= (mergeStrategy in assembly) {
(old) => {
case "project.clj" => MergeStrategy.discard // Leiningen build files
case x => old(x)
}
}
)
lazy val buildSettings = basicSettings ++ sbtAssemblySettings
}
|
sitano/hbase-cascading-test1
|
project/BuildSettings.scala
|
Scala
|
mit
| 2,449
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.arrow.io
import java.io.ByteArrayOutputStream
import org.apache.arrow.vector.ipc.message.IpcOption
import org.locationtech.geomesa.arrow.vector.ArrowDictionary
import org.locationtech.geomesa.arrow.vector.SimpleFeatureVector.SimpleFeatureEncoding
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.io.{CloseQuietly, WithClose}
import org.opengis.feature.simple.SimpleFeatureType
import scala.util.control.NonFatal
object ConcatenatedFileWriter {
import org.locationtech.geomesa.utils.conversions.ScalaImplicits.RichTraversableLike
/**
* Reduce function for concatenating separate arrow files
*
* @param sft simple feature type
* @param dictionaryFields dictionary fields
* @param encoding simple feature encoding
* @param sort sort
* @param files full logical arrow files encoded in arrow streaming format
* @return
*/
def reduce(
sft: SimpleFeatureType,
dictionaryFields: Seq[String],
encoding: SimpleFeatureEncoding,
ipcOpts: IpcOption,
sort: Option[(String, Boolean)],
files: CloseableIterator[Array[Byte]]): CloseableIterator[Array[Byte]] = {
// NB: This is not a WithClose situation.
// If there is an empty/non-exceptional iterator, we wish to use it.
// If there are any issues, we wish to close the iterator to free memory.
try {
if (files.hasNext) {
files
} else {
files.close() // Closing the input just to be sure.
generateEmptyResponse(sft, dictionaryFields, encoding, ipcOpts, sort)
}
} catch {
case NonFatal(e) =>
CloseQuietly(files).foreach(e.addSuppressed)
throw e
}
}
private def generateEmptyResponse(sft: SimpleFeatureType, dictionaryFields: Seq[String], encoding: SimpleFeatureEncoding, ipcOpts: IpcOption, sort: Option[(String, Boolean)]) = {
val dictionaries = dictionaryFields.mapWithIndex { case (name, i) =>
name -> ArrowDictionary.create(sft.getTypeName, i, Array.empty[AnyRef])
}
val os = new ByteArrayOutputStream()
WithClose(SimpleFeatureArrowFileWriter(os, sft, dictionaries.toMap, encoding, ipcOpts, sort)) { writer =>
writer.flush() // ensure header and dictionaries are written, and write an empty batch
}
dictionaries.foreach { case (_, dictionary) => dictionary.close() }
CloseableIterator.single(os.toByteArray)
}
}
|
locationtech/geomesa
|
geomesa-arrow/geomesa-arrow-gt/src/main/scala/org/locationtech/geomesa/arrow/io/ConcatenatedFileWriter.scala
|
Scala
|
apache-2.0
| 2,916
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import java.util.Properties
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.config.APP_CALLER_CONTEXT
import org.apache.spark.memory.{MemoryMode, TaskMemoryManager}
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.util._
/**
* A unit of execution. We have two kinds of Task's in Spark:
*
* - [[org.apache.spark.scheduler.ShuffleMapTask]]
* - [[org.apache.spark.scheduler.ResultTask]]
*
* A Spark job consists of one or more stages. The very last stage in a job consists of multiple
* ResultTasks, while earlier stages consist of ShuffleMapTasks. A ResultTask executes the task
* and sends the task output back to the driver application. A ShuffleMapTask executes the task
* and divides the task output to multiple buckets (based on the task's partitioner).
*
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param partitionId index of the number in the RDD
* @param localProperties copy of thread-local properties set by the user on the driver side.
* @param serializedTaskMetrics a `TaskMetrics` that is created and serialized on the driver side
* and sent to executor side.
*
* The parameters below are optional:
* @param jobId id of the job this task belongs to
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param isBarrier whether this task belongs to a barrier stage. Spark must launch all the tasks
* at the same time for a barrier stage.
*/
private[spark] abstract class Task[T](
val stageId: Int,
val stageAttemptId: Int,
val partitionId: Int,
@transient var localProperties: Properties = new Properties,
// The default value is only used in tests.
serializedTaskMetrics: Array[Byte] =
SparkEnv.get.closureSerializer.newInstance().serialize(TaskMetrics.registered).array(),
val jobId: Option[Int] = None,
val appId: Option[String] = None,
val appAttemptId: Option[String] = None,
val isBarrier: Boolean = false) extends Serializable {
@transient lazy val metrics: TaskMetrics =
SparkEnv.get.closureSerializer.newInstance().deserialize(ByteBuffer.wrap(serializedTaskMetrics))
/**
* Called by [[org.apache.spark.executor.Executor]] to run this task.
*
* @param taskAttemptId an identifier for this task attempt that is unique within a SparkContext.
* @param attemptNumber how many times this task has been attempted (0 for the first attempt)
* @return the result of the task along with updates of Accumulators.
*/
final def run(
taskAttemptId: Long,
attemptNumber: Int,
metricsSystem: MetricsSystem): T = {
SparkEnv.get.blockManager.registerTask(taskAttemptId)
// TODO SPARK-24874 Allow create BarrierTaskContext based on partitions, instead of whether
// the stage is barrier.
val taskContext = new TaskContextImpl(
stageId,
stageAttemptId, // stageAttemptId and stageAttemptNumber are semantically equal
partitionId,
taskAttemptId,
attemptNumber,
taskMemoryManager,
localProperties,
metricsSystem,
metrics)
context = if (isBarrier) {
new BarrierTaskContext(taskContext)
} else {
taskContext
}
TaskContext.setTaskContext(context)
taskThread = Thread.currentThread()
if (_reasonIfKilled != null) {
kill(interruptThread = false, _reasonIfKilled)
}
new CallerContext(
"TASK",
SparkEnv.get.conf.get(APP_CALLER_CONTEXT),
appId,
appAttemptId,
jobId,
Option(stageId),
Option(stageAttemptId),
Option(taskAttemptId),
Option(attemptNumber)).setCurrentContext()
try {
runTask(context)
} catch {
case e: Throwable =>
// Catch all errors; run task failure callbacks, and rethrow the exception.
try {
context.markTaskFailed(e)
} catch {
case t: Throwable =>
e.addSuppressed(t)
}
context.markTaskCompleted(Some(e))
throw e
} finally {
try {
// Call the task completion callbacks. If "markTaskCompleted" is called twice, the second
// one is no-op.
context.markTaskCompleted(None)
} finally {
try {
Utils.tryLogNonFatalError {
// Release memory used by this thread for unrolling blocks
SparkEnv.get.blockManager.memoryStore.releaseUnrollMemoryForThisTask(MemoryMode.ON_HEAP)
SparkEnv.get.blockManager.memoryStore.releaseUnrollMemoryForThisTask(
MemoryMode.OFF_HEAP)
// Notify any tasks waiting for execution memory to be freed to wake up and try to
// acquire memory again. This makes impossible the scenario where a task sleeps forever
// because there are no other tasks left to notify it. Since this is safe to do but may
// not be strictly necessary, we should revisit whether we can remove this in the
// future.
val memoryManager = SparkEnv.get.memoryManager
memoryManager.synchronized { memoryManager.notifyAll() }
}
} finally {
// Though we unset the ThreadLocal here, the context member variable itself is still
// queried directly in the TaskRunner to check for FetchFailedExceptions.
TaskContext.unset()
}
}
}
}
private var taskMemoryManager: TaskMemoryManager = _
def setTaskMemoryManager(taskMemoryManager: TaskMemoryManager): Unit = {
this.taskMemoryManager = taskMemoryManager
}
def runTask(context: TaskContext): T
def preferredLocations: Seq[TaskLocation] = Nil
// Map output tracker epoch. Will be set by TaskSetManager.
var epoch: Long = -1
// Task context, to be initialized in run().
@transient var context: TaskContext = _
// The actual Thread on which the task is running, if any. Initialized in run().
@volatile @transient private var taskThread: Thread = _
// If non-null, this task has been killed and the reason is as specified. This is used in case
// context is not yet initialized when kill() is invoked.
@volatile @transient private var _reasonIfKilled: String = null
protected var _executorDeserializeTimeNs: Long = 0
protected var _executorDeserializeCpuTime: Long = 0
/**
* If defined, this task has been killed and this option contains the reason.
*/
def reasonIfKilled: Option[String] = Option(_reasonIfKilled)
/**
* Returns the amount of time spent deserializing the RDD and function to be run.
*/
def executorDeserializeTimeNs: Long = _executorDeserializeTimeNs
def executorDeserializeCpuTime: Long = _executorDeserializeCpuTime
/**
* Collect the latest values of accumulators used in this task. If the task failed,
* filter out the accumulators whose values should not be included on failures.
*/
def collectAccumulatorUpdates(taskFailed: Boolean = false): Seq[AccumulatorV2[_, _]] = {
if (context != null) {
// Note: internal accumulators representing task metrics always count failed values
context.taskMetrics.nonZeroInternalAccums() ++
// zero value external accumulators may still be useful, e.g. SQLMetrics, we should not
// filter them out.
context.taskMetrics.externalAccums.filter(a => !taskFailed || a.countFailedValues)
} else {
Seq.empty
}
}
/**
* Kills a task by setting the interrupted flag to true. This relies on the upper level Spark
* code and user code to properly handle the flag. This function should be idempotent so it can
* be called multiple times.
* If interruptThread is true, we will also call Thread.interrupt() on the Task's executor thread.
*/
def kill(interruptThread: Boolean, reason: String) {
require(reason != null)
_reasonIfKilled = reason
if (context != null) {
context.markInterrupted(reason)
}
if (interruptThread && taskThread != null) {
taskThread.interrupt()
}
}
}
|
yanboliang/spark
|
core/src/main/scala/org/apache/spark/scheduler/Task.scala
|
Scala
|
apache-2.0
| 9,033
|
package mypipe.producer
import com.typesafe.config.Config
import mypipe.api.event.{ AlterEvent, UpdateMutation, SingleValuedMutation, Mutation }
import mypipe.avro.schema.{ AvroSchemaUtils, GenericSchemaRepository }
import mypipe.avro.AvroVersionedRecordSerializer
import mypipe.kafka.KafkaUtil
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
class KafkaMutationSpecificAvroProducer(config: Config)
extends KafkaMutationAvroProducer[Short](config) {
private val schemaRepoClientClassName = config.getString("schema-repo-client")
override protected val schemaRepoClient = Class.forName(schemaRepoClientClassName + "$")
.getField("MODULE$").get(null)
.asInstanceOf[GenericSchemaRepository[Short, Schema]]
override protected val serializer = new AvroVersionedRecordSerializer[InputRecord](schemaRepoClient)
override def handleAlter(event: AlterEvent): Boolean = {
// FIXME: if the table is not in the cache already, by it's ID, this will fail
// FIXME: this sucks and needs to be parsed properly
val tableName = {
val t = event.sql.split(" ")(2)
// account for db.table
if (t.contains(".")) t.split("""\\.""")(1)
else t
}
// refresh insert, update, and delete schemas
(for (
i ← schemaRepoClient.getLatestSchema(AvroSchemaUtils.specificSubject(event.database, tableName, Mutation.InsertString), flushCache = true);
u ← schemaRepoClient.getLatestSchema(AvroSchemaUtils.specificSubject(event.database, tableName, Mutation.UpdateString), flushCache = true);
d ← schemaRepoClient.getLatestSchema(AvroSchemaUtils.specificSubject(event.database, tableName, Mutation.DeleteString), flushCache = true)
) yield {
true
}).getOrElse(false)
}
override protected def schemaIdToByteArray(s: Short) = Array[Byte](((s & 0xFF00) >> 8).toByte, (s & 0x00FF).toByte)
override protected def getKafkaTopic(mutation: Mutation): String = KafkaUtil.specificTopic(mutation)
override protected def avroRecord(mutation: Mutation, schema: Schema): List[GenericData.Record] = {
Mutation.getMagicByte(mutation) match {
case Mutation.InsertByte ⇒ insertOrDeleteMutationToAvro(mutation.asInstanceOf[SingleValuedMutation], schema)
case Mutation.UpdateByte ⇒ updateMutationToAvro(mutation.asInstanceOf[UpdateMutation], schema)
case Mutation.DeleteByte ⇒ insertOrDeleteMutationToAvro(mutation.asInstanceOf[SingleValuedMutation], schema)
case _ ⇒
logger.error(s"Unexpected mutation type ${mutation.getClass} encountered; retuning empty Avro GenericData.Record(schema=$schema")
List(new GenericData.Record(schema))
}
}
/** Given a mutation, returns the Avro subject that this mutation's
* Schema is registered under in the Avro schema repository.
*
* @param mutation mutation to get subject for
* @return returns "mutationDbName_mutationTableName_mutationType" where mutationType is "insert", "update", or "delete"
*/
override protected def avroSchemaSubject(mutation: Mutation): String = AvroSchemaUtils.specificSubject(mutation)
protected def insertOrDeleteMutationToAvro(mutation: SingleValuedMutation, schema: Schema): List[GenericData.Record] = {
mutation.rows.map(row ⇒ {
val record = new GenericData.Record(schema)
row.columns.foreach(col ⇒ Option(schema.getField(col._1)).foreach(f ⇒ record.put(f.name(), col._2.value)))
header(record, mutation)
record
})
}
protected def updateMutationToAvro(mutation: UpdateMutation, schema: Schema): List[GenericData.Record] = {
mutation.rows.map(row ⇒ {
val record = new GenericData.Record(schema)
row._1.columns.foreach(col ⇒ Option(schema.getField("old_" + col._1)).foreach(f ⇒ record.put(f.name(), col._2.value)))
row._2.columns.foreach(col ⇒ Option(schema.getField("new_" + col._1)).foreach(f ⇒ record.put(f.name(), col._2.value)))
header(record, mutation)
record
})
}
}
|
Asana/mypipe
|
mypipe-kafka/src/main/scala/mypipe/producer/KafkaMutationSpecificAvroProducer.scala
|
Scala
|
apache-2.0
| 4,007
|
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright (C) 2014 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.server.priming.prepared
import org.scalatest.{FunSuite, Matchers}
import org.scassandra.codec.Consistency._
import org.scassandra.codec.datatype.DataType
import org.scassandra.codec.messages.ColumnSpec.column
import org.scassandra.codec.messages._
import org.scassandra.codec.{Execute, Prepare, Prepared, ProtocolVersion}
import org.scassandra.server.priming.ConflictingPrimes
import org.scassandra.server.priming.query.Reply
import scodec.bits.ByteVector
class PrimePreparedStoreTest extends FunSuite with Matchers {
implicit val protocolVersion = ProtocolVersion.latest
val id = ByteVector(1)
test("Finds prime - no variables") {
//given
val underTest = new PrimePreparedStore
val query: String = "select * from people where name = ?"
val when = WhenPrepared(Some(query))
val thenDo = ThenPreparedSingle(Some(List()))
val prime = PrimePreparedSingle(when, thenDo)
//when
underTest.record(prime)
val actualPrime = underTest(query, Execute(id))
//then
actualPrime.get should equal(thenDo.prime)
}
test("Clearing all the primes") {
//given
val underTest = new PrimePreparedStore
val when = WhenPrepared(Some(""))
val thenDo = ThenPreparedSingle(None)
val prime = PrimePreparedSingle(when, thenDo)
underTest.record(prime)
//when
underTest.clear()
//then
underTest.retrievePrimes().size should equal(0)
}
test("Priming consistency. Should match on consistency") {
val underTest = new PrimePreparedStore
val query: String = "select * from people where name = ?"
val consistencies = List(ONE, TWO)
val when = WhenPrepared(Some(query), None, Some(consistencies))
val thenDo = ThenPreparedSingle(Some(List()))
val prime = PrimePreparedSingle(when, thenDo)
underTest.record(prime)
//when
val primeForOne = underTest(query, Execute(id, QueryParameters(consistency=ONE)))
val primeForTwo = underTest(query, Execute(id, QueryParameters(consistency=TWO)))
val primeForAll = underTest(query, Execute(id, QueryParameters(consistency=ALL)))
//then
primeForOne.isDefined should equal(true)
primeForTwo.isDefined should equal(true)
primeForAll.isDefined should equal(false)
}
test("Priming consistency. Should default to all consistencies") {
val underTest = new PrimePreparedStore
val query: String = "select * from people where name = ?"
val when = WhenPrepared(Some(query), None)
val thenDo = ThenPreparedSingle(Some(List()))
val prime = PrimePreparedSingle(when, thenDo)
underTest.record(prime)
//when
val primeForOne = underTest(query, Execute(id, QueryParameters(consistency=ONE)))
val primeForTwo = underTest(query, Execute(id, QueryParameters(consistency=TWO)))
val primeForAll = underTest(query, Execute(id, QueryParameters(consistency=ALL)))
val primeForLocalOne = underTest(query, Execute(id, QueryParameters(consistency=LOCAL_ONE)))
//then
primeForOne.isDefined should equal(true)
primeForTwo.isDefined should equal(true)
primeForAll.isDefined should equal(true)
primeForLocalOne.isDefined should equal(true)
}
test("Conflicting primes") {
val underTest = new PrimePreparedStore
val query: String = "select * from people where name = ?"
val thenDo = ThenPreparedSingle(Some(List()))
val primeForOneAndTwo = PrimePreparedSingle(WhenPrepared(Some(query), None, Some(List(ONE, TWO))), thenDo)
val primeForTwoAndThree = PrimePreparedSingle(WhenPrepared(Some(query), None, Some(List(TWO, THREE))), thenDo)
//when
underTest.record(primeForOneAndTwo)
val result = underTest.record(primeForTwoAndThree)
//then
result.isInstanceOf[ConflictingPrimes] should equal(true)
}
val factory = (p: PreparedMetadata, r: RowMetadata) => Prepared(id, p, r)
test("Prepared prime - None when no match") {
val underTest = new PrimePreparedStore
// when
val prepared = underTest(Prepare("select * from people where a = ? and b = ? and c = ?"), factory)
// then
prepared.isDefined should equal(false)
}
test("Prepared prime - no parameters") {
val underTest = new PrimePreparedStore
val query: String = "select * from people"
val when = WhenPrepared(Some(query), None)
val thenDo = ThenPreparedSingle(Some(List()))
val prime = PrimePreparedSingle(when, thenDo)
underTest.record(prime)
// when
val prepared = underTest(Prepare(query), factory)
// then - should be a prepared with no column spec
prepared should matchPattern { case Some(Reply(Prepared(`id`, PreparedMetadata(_, _, _, `Nil`), _), _, _)) => }
}
test("Prepared prime - with parameters") {
val underTest = new PrimePreparedStore
val query: String = "select * from people where first=? and last=?"
val columnSpec = List(column("0", DataType.Varchar), column("1", DataType.Bigint))
val when = WhenPrepared(Some(query), None)
val thenDo = ThenPreparedSingle(Some(List()), Some(columnSpec.map(_.dataType)))
val prime = PrimePreparedSingle(when, thenDo)
underTest.record(prime)
// when
val prepared = underTest(Prepare(query), factory)
// then - should be a prepared with a column spec containing parameters.
prepared should matchPattern { case Some(Reply(Prepared(`id`, PreparedMetadata(_, _, _, `columnSpec`), _), _, _)) => }
}
}
|
mikefero/cpp-driver
|
gtests/src/integration/scassandra/server/server/src/test/scala/org/scassandra/server/priming/prepared/PrimePreparedStoreTest.scala
|
Scala
|
apache-2.0
| 6,610
|
import stainless.lang._
import stainless.collection._
import stainless.lang.Option._
import stainless.annotation._
import stainless.proof.check
import stainless.lang.StaticChecks._
object StackExample {
final case class Node(val value: BigInt, var nextOpt: Option[Node]) extends AnyHeapRef {}
final case class Q(var first: Option[Node],
@ghost var nodes: List[AnyHeapRef])
extends AnyHeapRef
{
@ghost
def valid: Boolean = {
reads(nodes.content ++ Set(this))
!nodes.contains(this) &&
inv(nodes, first)
}
@ghost
def inv(nodesLeft: List[AnyHeapRef], current: Option[Node]): Boolean = {
reads(nodesLeft.content)
decreases(nodesLeft.size)
nodesLeft match {
case Cons(hh, tail) => {
hh.isInstanceOf[Node] &&
{
val h = hh.asInstanceOf[Node]
current == Some(h) && inv(tail, h.nextOpt)
}
}
case Nil() => current == None[Node]()
}
}
def push(n: Node): Unit = {
reads(nodes.content ++ Set(this, n))
modifies(Set(this, n))
require(valid && !nodes.contains(n))
n.nextOpt = first
first = Some(n)
nodes = n :: nodes
} ensuring { _ => valid && nodes == n :: old(nodes) }
def pop: BigInt = {
reads(nodes.content ++ Set(this))
require(valid && !nodes.isEmpty)
modifies(Set(this))
val n = first.get
first = n.nextOpt
nodes = nodes.tail
n.value
} ensuring { _ => valid && nodes == old(nodes.tail) }
}
@extern
def main(args: Array[String]): Unit = {
val n = Node(-1, None[Node]())
val s = Q(None[Node](), List[AnyHeapRef]())
println("Stack with nodes")
s.push(Node(5, None[Node]()))
s.push(Node(10, None[Node]()))
s.push(Node(14, None[Node]()))
println("Stack is: " + s)
println(s.pop)
println(s.pop)
println(s.pop)
println("Stack is: " + s)
}
}
|
epfl-lara/stainless
|
frontends/benchmarks/full-imperative/valid/Stack.scala
|
Scala
|
apache-2.0
| 1,964
|
package com.sksamuel.elastic4s.searches.aggs
import com.sksamuel.elastic4s.searches.aggs.pipeline.PipelineAggregationBuilderFn
import org.elasticsearch.search.aggregations.AggregationBuilders
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder
import scala.collection.JavaConverters._
object MissingAggregationBuilder {
def apply(agg: MissingAggregationDefinition): MissingAggregationBuilder = {
val builder = AggregationBuilders.missing(agg.name)
agg.field.foreach(builder.field)
agg.subaggs.map(AggregationBuilder.apply).foreach(builder.subAggregation)
agg.pipelines.map(PipelineAggregationBuilderFn.apply).foreach(builder.subAggregation)
if (agg.metadata.nonEmpty) builder.setMetaData(agg.metadata.asJava)
builder
}
}
|
tyth/elastic4s
|
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/aggs/MissingAggregationBuilder.scala
|
Scala
|
apache-2.0
| 787
|
package com.jeff.chaser.models.systems.common
import com.badlogic.ashley.core.{ComponentMapper, Entity, Family}
import com.badlogic.ashley.systems.IteratingSystem
import com.badlogic.gdx.math.{MathUtils, Vector2}
import com.jeff.chaser.models.components.motion.{TransformComponent, VelocityComponent}
import com.jeff.chaser.models.components.util.ControlledComponent
class ControlSystem extends IteratingSystem(Family.all(classOf[ControlledComponent]).get()) {
private val tm = ComponentMapper.getFor(classOf[TransformComponent])
private val vm = ComponentMapper.getFor(classOf[VelocityComponent])
private var turnDir = 0
private var throttle = 0
override def processEntity(entity: Entity, deltaTime: Float): Unit = {
val t = tm.get(entity)
val v = vm.get(entity)
var end = t.rotation
end += (turnDir match {
case 0 => 0
case 1 => -45.0f * deltaTime
case _ => 45.0f * deltaTime
})
if (end < 0) {
end = 360 - (-end)
}
t.rotation = end
val x = MathUtils.cosDeg(end)
val y = MathUtils.sinDeg(end)
val vec = new Vector2(x, y)
vec.nor()
vec.scl(v.maxX)
v.targetX = vec.x * throttle
v.targetY = vec.y * throttle
}
def updateKeyStates(a: Boolean, s: Boolean, d: Boolean, w: Boolean): Unit = {
if (w) {
throttle = 1
} else if (s) {
throttle = -1
} else {
throttle = 0
}
if (a) {
turnDir = -1
} else if (d) {
turnDir = 1
} else {
turnDir = 0
}
}
}
|
jregistr/Academia
|
CSC455-Game-Programming/Chaser/core/src/com/jeff/chaser/models/systems/common/ControlSystem.scala
|
Scala
|
mit
| 1,519
|
package nak.data
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* Represents a single unlabeled example from a collection of data. Intentionally overly general.
*
* @author dlwh
*/
trait Observation[+T] extends Serializable { outer=>
def id : String
def features: T
/**
* strict, but cached, transformation of features
*/
def map[U](f: T=>U):Observation[U] = new Observation[U] {
val id = outer.id
val features = f(outer.features)
}
/**
* non-strict, but cached, transformation of features
*/
def flatMap[U](f: T=>U) = map(f)
override def toString = {
"Observation { ids =" + id + ", features = " + features + "}";
}
}
object Observation {
/**
* Create an observation.
*/
def apply[T](_features: T, _id: String = "") = new Observation[T] {
val id = _id
val features = _features
}
/**
* Lifts a function to operate over Observations,
* Rather than the contained object.
*/
def lift[T,U](f: T=>U) = (o : Observation[T]) => o.map(f)
}
|
scalanlp/nak
|
src/main/scala/nak/data/Observation.scala
|
Scala
|
apache-2.0
| 1,559
|
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.ingest
package service
import akka.dispatch.{ExecutionContext, Future}
import blueeyes.core.data.ByteChunk
import blueeyes.core.http.MimeType
import blueeyes.core.http.HttpHeaders._
import blueeyes.core.http.HttpRequest
import blueeyes.core.http.MimeTypes._
import blueeyes.json.{AsyncParser, AsyncParse }
import AsyncParser.{More, Done}
import com.precog.common.Path
import com.precog.common.ingest._
import com.precog.common.jobs.JobId
import com.precog.common.security.{APIKey, Authorities, WriteMode}
import java.io.File
import java.nio.ByteBuffer
import scala.annotation.tailrec
import scalaz._
import scalaz.syntax.std.boolean._
object IngestProcessing {
sealed trait ErrorHandling
case object StopOnFirstError extends ErrorHandling
case object AllOrNothing extends ErrorHandling
case object IngestAllPossible extends ErrorHandling
sealed trait Durability { def jobId: Option[JobId] }
case object LocalDurability extends Durability { val jobId = None }
case class GlobalDurability(jid: JobId) extends Durability { val jobId = Some(jid) }
sealed trait IngestResult
case class BatchResult(total: Int, ingested: Int, errors: Vector[(Int, String)]) extends IngestResult
case class StreamingResult(ingested: Int, error: Option[String]) extends IngestResult
case class NotIngested(reason: String) extends IngestResult
val JSON = application/json
val JSON_STREAM = MimeType("application", "x-json-stream")
val CSV = text/csv
/** Chain of responsibility element used to determine a IngestProcessing strategy */
@tailrec final def select(from: List[IngestProcessingSelector], partialData: Array[Byte], request: HttpRequest[_]): Option[IngestProcessing] = {
from match {
case hd :: tl =>
hd.select(partialData, request) match { // not using map so as to get tailrec
case None => select(tl, partialData, request)
case some => some
}
case Nil => None
}
}
}
trait IngestProcessing {
import IngestProcessing._
type IngestProcessor <: IngestProcessorLike
/**
* Build an ingest processor based only upon the request metadata. The type of HttpRequest is existential here
* specifically to prohibit implementations from peeking at the data.
*/
def forRequest(request: HttpRequest[_]): ValidationNel[String, IngestProcessor]
trait IngestProcessorLike {
def ingest(durability: Durability, errorHandling: ErrorHandling, storeMode: WriteMode, data: ByteChunk): Future[IngestResult]
}
}
trait IngestProcessingSelector {
def select(partialData: Array[Byte], request: HttpRequest[_]): Option[IngestProcessing]
}
class DefaultIngestProcessingSelectors(maxFields: Int, batchSize: Int, tmpdir: File, ingestStore: IngestStore)(implicit M: Monad[Future], executor: ExecutionContext){
import IngestProcessing._
class MimeIngestProcessingSelector(apiKey: APIKey, path: Path, authorities: Authorities) extends IngestProcessingSelector {
def select(partialData: Array[Byte], request: HttpRequest[_]): Option[IngestProcessing] = {
request.headers.header[`Content-Type`].toSeq.flatMap(_.mimeTypes) collectFirst {
case JSON => new JSONIngestProcessing(apiKey, path, authorities, JSONValueStyle, maxFields, ingestStore)
case JSON_STREAM => new JSONIngestProcessing(apiKey, path, authorities, JSONStreamStyle, maxFields, ingestStore)
case CSV => new CSVIngestProcessing(apiKey, path, authorities, batchSize, tmpdir, ingestStore)
}
}
}
class JSONIngestProcessingSelector(apiKey: APIKey, path: Path, authorities: Authorities) extends IngestProcessingSelector {
def select(partialData: Array[Byte], request: HttpRequest[_]): Option[IngestProcessing] = {
val (AsyncParse(errors, values), parser) = AsyncParser.stream().apply(More(ByteBuffer.wrap(partialData)))
if (errors.isEmpty && !values.isEmpty) {
request.headers.header[`Content-Type`].toSeq.flatMap(_.mimeTypes) collectFirst {
case JSON_STREAM => new JSONIngestProcessing(apiKey, path, authorities, JSONStreamStyle, maxFields, ingestStore)
} orElse {
Some(new JSONIngestProcessing(apiKey, path, authorities, JSONValueStyle, maxFields, ingestStore))
}
} else {
None
}
}
}
def selectors(apiKey: APIKey, path: Path, authorities: Authorities): List[IngestProcessingSelector] = List(
new MimeIngestProcessingSelector(apiKey, path, authorities),
new JSONIngestProcessingSelector(apiKey, path, authorities)
)
}
|
precog/platform
|
ingest/src/main/scala/com/precog/ingest/service/IngestProcessor.scala
|
Scala
|
agpl-3.0
| 5,606
|
package com.arcusys.learn.models.response.certificates
import com.arcusys.valamis.user.model.User
case class CertificateSuccessUsersResponse(
id: Int,
title: String,
shortDescription: String,
description: String,
logo: String,
succeedUsers: Seq[User])
|
ViLPy/Valamis
|
learn-portlet/src/main/scala/com/arcusys/learn/models/response/certificates/CertificateSuccessUsersResponse.scala
|
Scala
|
lgpl-3.0
| 267
|
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.docs
object MonteCarloPi extends App {
//#example
import swave.core.util.XorShiftRandom
import swave.core._
implicit val env = StreamEnv()
val random = XorShiftRandom()
Spout.continually(random.nextDouble())
.grouped(2)
.map { case x +: y +: Nil => Point(x, y) }
.fanOutBroadcast()
.sub.filter(_.isInner).map(_ => InnerSample).end
.sub.filterNot(_.isInner).map(_ => OuterSample).end
.fanInMerge()
.scan(State(0, 0)) { _ withNextSample _ }
.drop(1)
.injectSequential() // these three transformations together have
.map(_ drop 999999 take 1) // the same effect as `.takeEveryNth(1000000)`
.flattenConcat() // (in fact, this is how `takeEveryNth` is implemented)
.map(state ⇒ f"After ${state.totalSamples}%,10d samples π is approximated as ${state.π}%.6f")
.take(50)
.foreach(println)
val time = System.currentTimeMillis() - env.startTime
println(f"Done. Total time: $time%,6d ms, Throughput: ${50000.0 / time}%.3fM samples/sec\\n")
//////////////// MODEL ///////////////
case class Point(x: Double, y: Double) {
def isInner: Boolean = x * x + y * y < 1.0
}
sealed trait Sample
case object InnerSample extends Sample
case object OuterSample extends Sample
case class State(totalSamples: Long, inCircle: Long) {
def π: Double = (inCircle.toDouble / totalSamples) * 4.0
def withNextSample(sample: Sample) =
State(totalSamples + 1, if (sample == InnerSample) inCircle + 1 else inCircle)
}
//#example
}
|
sirthias/swave
|
docs/src/test/scala/swave/docs/MonteCarloPi.scala
|
Scala
|
mpl-2.0
| 1,766
|
package elcurto99.scalaquestrade
import java.time.{LocalDateTime, ZonedDateTime}
import elcurto99.scalaquestrade.models.OrderStateFilterType.OrderStateFilterType
import elcurto99.scalaquestrade.models._
/**
* Definition of the Questrade API interface
*/
trait QuestradeAPI {
/**
* Perform a login request to get an access token for an API server
* @see http://www.questrade.com/api/documentation/security
*/
def login(loginDomain: String, refreshToken: String): Login
/**
* Get the current server time in ISO format and Eastern time zone
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/time
*/
def getTime(accessToken: String, apiServer: String): ZonedDateTime
/**
* Retrieves the accounts associated with the user on behalf of which the API client is authorized
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts
*/
def getAccounts(accessToken: String, apiServer: String): AccountsResponse
/**
* Retrieves positions in a specified account
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-positions
*/
def getAccountPositions(accessToken: String, apiServer: String, accountNumber: String): List[Position]
/**
* Retrieves positions in a specified account
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-balances
*/
def getAccountBalances(accessToken: String, apiServer: String, accountNumber: String): BalancesResponse
/**
* Retrieves executions in a specified account in the defined time range
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-executions
*/
def getAccountExecutions(accessToken: String, apiServer: String, accountNumber: String, startDateTimeOption: Option[LocalDateTime], endTimeOption: Option[LocalDateTime]): List[Execution]
/**
* Retrieves orders for a specified account
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-orders
*/
def getAccountOrders(accessToken: String, apiServer: String, accountNumber: String, startDateTimeOption: Option[LocalDateTime], endTimeOption: Option[LocalDateTime], stateFilterOption: Option[OrderStateFilterType], orderIdsList: List[Int]): List[Order]
/**
* Retrieves an order for a specified account
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-orders
*/
def getAccountOrder(accessToken: String, apiServer: String, accountNumber: String, orderId: Int): Order
/**
* Retrieve account activities, including cash transactions, dividends, trades, etc.
* Due to this endpoints limit of 31 days of data can be requested at a time, requests for larger ranges of data will be broken up and consume more API calls
* @see http://www.questrade.com/api/documentation/rest-operations/account-calls/accounts-id-activities
*/
def getAccountActivities(accessToken: String, apiServer: String, accountNumber: String, startDateTime: LocalDateTime, endDateTimeOption: Option[LocalDateTime]): List[Activity]
}
|
elcurto99/scala-questrade
|
src/main/scala/elcurto99/scalaquestrade/QuestradeAPI.scala
|
Scala
|
mit
| 3,193
|
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import org.specs2.matcher.JsonMatchers
import play.api.libs.json.{JsString, Json}
import play.api.libs.ws.WS
import play.api.{Play, Logger}
import play.api.test._
/**
* add your integration spec here.
* An integration test will fire up a whole play application in a real (or headless) browser
*/
@RunWith(classOf[JUnitRunner])
class HMSAPISpec extends Specification {
/*"HMS API" should {
"deliver an API token" in new WithBrowser {
var username = Play.configuration.getString("hms.username").get
val password = Play.configuration.getString("hms.password").get
val apiUrl = Play.configuration.getString("hms.apiURL").get + ("/login/")
val authData = Json.obj(
"UserName" -> JsString(username),
"Password" -> JsString(password)
)
Logger.info(authData.toString())
val respString = WS.url(apiUrl)
.withHeaders("x-api-version" -> "1.0")
.withRequestTimeout(2000)
.post(authData).map { response =>
response.status match {
case s if s < 300 =>
response.json.toString()
case _ =>
""
}
}
Logger.info(respString.map {wups => wups} toString)
failure("Puups")
}
}*/
}
|
indarium/hbbTVPlugin
|
test/HMSAPISpec.scala
|
Scala
|
agpl-3.0
| 1,320
|
package me.yingrui.segment.filter
import org.junit.Assert
import org.junit.Test
import me.yingrui.segment.core.SegmentResult
import me.yingrui.segment.dict.POSUtil
class ReduplicatingFilterTest {
var filter = new ReduplicatingFilter()
@Test
def should_merge_two_reduplicating_words() {
val segmentResult = new SegmentResult(4)
segmentResult.setWords(List[String]("一", "件", "件", "物品").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_M, POSUtil.POS_A, POSUtil.POS_A, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("一件件", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_A, segmentResult.getPOS(0))
}
@Test
def should_merge_three_reduplicating_words() {
var segmentResult = new SegmentResult(3)
segmentResult.setWords(List[String]("件", "件", "物品").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_M, POSUtil.POS_M, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("件件", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_M, segmentResult.getPOS(0))
}
}
|
yingrui/mahjong
|
lib-segment/src/test/scala/me/yingrui/segment/filter/ReduplicatingFilterTest.scala
|
Scala
|
gpl-3.0
| 1,560
|
/**
* Copyright (C) 2012 Inria, University Lille 1.
*
* This file is part of PowerAPI.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI. If not, see <http://www.gnu.org/licenses/>.
*
* Contact: powerapi-user-list@googlegroups.com.
*/
package fr.inria.powerapi.listener.cpu.jfreechart
import fr.inria.powerapi.core.Listener
import fr.inria.powerapi.formula.cpu.api.CpuFormulaMessage
import javax.swing.SwingUtilities
/**
* CPU listener which display received CpuFormulaMessage into a JFreeChart chart.
*
* @see http://www.jfree.org/jfreechart
*
* @author abourdon
*/
class CpuListener extends Listener {
override def preStart() {
SwingUtilities.invokeLater(new Runnable {
def run() {
Chart.run()
}
})
}
def messagesToListen = Array(classOf[CpuFormulaMessage])
def process(cpuFormulaMessage: CpuFormulaMessage) {
Chart.process(cpuFormulaMessage)
}
def acquire = {
case cpuFormulaMessage: CpuFormulaMessage => process(cpuFormulaMessage)
}
}
|
abourdon/powerapi-akka
|
listeners/listener-cpu-jfreechart/src/main/scala/fr/inria/powerapi/listener/cpu/jfreechart/CpuListener.scala
|
Scala
|
agpl-3.0
| 1,568
|
package exercises.ch10
trait Monoid[A] {
def op(a1: A, a2: A): A
def zero: A
}
|
VladMinzatu/fpinscala-exercises
|
src/main/scala/exercises/ch10/Monoid.scala
|
Scala
|
mit
| 84
|
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iep.clienttest
import com.google.inject.AbstractModule
import com.google.inject.Module
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.guice.GuiceHelper
import com.netflix.iep.service.Service
import com.netflix.iep.service.ServiceManager
import com.netflix.spectator.api.NoopRegistry
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.slf4j.LoggerFactory
object Main {
private val logger = LoggerFactory.getLogger(getClass)
private val config = ConfigFactory.load()
private def getBaseModules: java.util.List[Module] = {
val modules = GuiceHelper.getModulesUsingServiceLoader
if (!sys.env.contains("NETFLIX_ENVIRONMENT")) {
// If we are running in a local environment provide simple versions of registry
// and config bindings. These bindings are normally provided by the final package
// config for the app in the production setup.
modules.add(new AbstractModule {
override def configure(): Unit = {
bind(classOf[Registry]).toInstance(new NoopRegistry)
bind(classOf[Config]).toInstance(config)
}
})
}
modules
}
def main(args: Array[String]): Unit = {
try {
val modules = getBaseModules
modules.add(new InstrumentationModule)
val guice = new GuiceHelper
guice.start(modules)
guice.getInjector.getInstance(classOf[ServiceManager])
guice.addShutdownHook()
} catch {
// Send exceptions to main log file instead of wherever STDERR is sent for the process
case t: Throwable => logger.error("fatal error on startup", t)
}
}
class InstrumentationModule extends AbstractModule {
override def configure(): Unit = {
val cls = Class.forName(config.getString("netflix.iep.clienttest.class"))
bind(classOf[MetricLibrary]).to(cls.asInstanceOf[Class[MetricLibrary]])
val serviceBinder = Multibinder.newSetBinder(binder(), classOf[Service])
serviceBinder.addBinding().to(classOf[InstrumentationService])
bind(classOf[InstrumentationService])
}
}
}
|
Netflix-Skunkworks/iep-apps
|
iep-clienttest/src/main/scala/com/netflix/iep/clienttest/Main.scala
|
Scala
|
apache-2.0
| 2,761
|
/*
* Copyright (c) 2015, Nightfall Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package moe.nightfall.instrumentality.editor.control
import moe.nightfall.instrumentality.animations.NewPCAAnimation
import moe.nightfall.instrumentality.editor.UIUtils
import moe.nightfall.instrumentality.{Loader, ModelCache, PMXInstance, PMXModel}
import org.lwjgl.opengl.GL11
class ModelElement(ib: Boolean) extends View3DElement {
var workModel: PMXInstance = null
var workModelName: String = null
var isButton: Boolean = ib
var isHover: Boolean = false
if (!isButton) {
setModel(Loader.currentFile)
Loader.currentFileListeners += (() => {
setModel(Loader.currentFile)
})
}
def setModel(modelName: String) {
if (workModel != null) {
workModel.cleanupGL()
workModel = null
}
workModelName = modelName
if (modelName == null) return
val b: PMXModel = ModelCache.getLocal(modelName)
if (b == null) return
workModel = ModelElement.makeTestInstance(b)
}
override def draw() {
if (isButton) {
if (workModelName == null) {
colourStrength = 0.6f
colourStrength *= (if (isHover) 1.4f else 1f)
}
else {
colourStrength = 0.9f
if (workModelName.equalsIgnoreCase(Loader.currentFile)) colourStrength = 0.8f
colourStrength *= (if (isHover) 1.1f else 1f)
}
}
super.draw()
if (isButton) {
var text: String = "<null>"
if (workModelName != null) text = workModelName
UIUtils.drawBoundedText(text, width, height, borderWidth * 2, true)
}
}
protected def draw3D() {
if (workModel != null) {
val sFactor: Float = 1.0f / workModel.theModel.height
GL11.glTranslated(0, -0.5f, 0)
GL11.glScaled(sFactor, sFactor, sFactor)
workModel.render(1, 1, 1, 1.0f, 1.0f)
} else {
GL11.glTranslated(-0.25, 0, 0)
GL11.glScaled(0.01, -0.01, 0.01)
var text = "Load Fail"
if (workModelName == null)
text = "Use Steve"
UIUtils.drawText(text)
}
}
override def mouseStateChange(x: Int, y: Int, isDown: Boolean, button: Int) {
super.mouseStateChange(x, y, isDown, button)
if (isButton)
if (isDown)
if (button == 0)
Loader.setCurrentFile(workModelName)
}
override def update(dTime: Double) {
super.update(dTime)
if (workModel != null) workModel.update(dTime)
}
override def mouseEnterLeave(isInside: Boolean) {
super.mouseEnterLeave(isInside)
isHover = isInside
}
override def cleanup() {
super.cleanup()
if (workModel != null) {
workModel.cleanupGL()
workModel = null
}
}
}
object ModelElement {
def makeTestInstance(pm: PMXModel): PMXInstance = {
val pi: PMXInstance = new PMXInstance(pm)
val npca = new NewPCAAnimation(pm.defaultAnims)
npca.walkStrengthTarget = 1
npca.walkAnimation.speed = 1
pi.anim = npca
/*return*/ pi
}
}
|
Nightfall/Instrumentality
|
core/src/main/scala/moe/nightfall/instrumentality/editor/control/ModelElement.scala
|
Scala
|
bsd-2-clause
| 4,572
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import scala.collection.mutable
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.scheduler._
/**
* :: DeveloperApi ::
* A SparkListener that maintains executor storage status.
*
* This class is thread-safe (unlike JobProgressListener)
*/
@DeveloperApi
@deprecated("This class will be removed in a future release.", "2.2.0")
class StorageStatusListener(conf: SparkConf) extends SparkListener {
// This maintains only blocks that are cached (i.e. storage level is not StorageLevel.NONE)
private[storage] val executorIdToStorageStatus = mutable.Map[String, StorageStatus]()
private[storage] val deadExecutorStorageStatus = new mutable.ListBuffer[StorageStatus]()
private[this] val retainedDeadExecutors = conf.getInt("spark.ui.retainedDeadExecutors", 100)
def storageStatusList: Seq[StorageStatus] = synchronized {
executorIdToStorageStatus.values.toSeq
}
def deadStorageStatusList: Seq[StorageStatus] = synchronized {
deadExecutorStorageStatus
}
/** Update storage status list to reflect updated block statuses */
private def updateStorageStatus(execId: String, updatedBlocks: Seq[(BlockId, BlockStatus)]) {
executorIdToStorageStatus.get(execId).foreach { storageStatus =>
updatedBlocks.foreach { case (blockId, updatedStatus) =>
if (updatedStatus.storageLevel == StorageLevel.NONE) {
storageStatus.removeBlock(blockId)
} else {
storageStatus.updateBlock(blockId, updatedStatus)
}
}
}
}
/** Update storage status list to reflect the removal of an RDD from the cache */
private def updateStorageStatus(unpersistedRDDId: Int) {
storageStatusList.foreach { storageStatus =>
storageStatus.rddBlocksById(unpersistedRDDId).foreach { case (blockId, _) =>
storageStatus.removeBlock(blockId)
}
}
}
override def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit = synchronized {
updateStorageStatus(unpersistRDD.rddId)
}
override def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded) {
synchronized {
val blockManagerId = blockManagerAdded.blockManagerId
val executorId = blockManagerId.executorId
// The onHeap and offHeap memory are always defined for new applications,
// but they can be missing if we are replaying old event logs.
val storageStatus = new StorageStatus(blockManagerId, blockManagerAdded.maxMem,
blockManagerAdded.maxOnHeapMem, blockManagerAdded.maxOffHeapMem)
executorIdToStorageStatus(executorId) = storageStatus
// Try to remove the dead storage status if same executor register the block manager twice.
deadExecutorStorageStatus.zipWithIndex.find(_._1.blockManagerId.executorId == executorId)
.foreach(toRemoveExecutor => deadExecutorStorageStatus.remove(toRemoveExecutor._2))
}
}
override def onBlockManagerRemoved(blockManagerRemoved: SparkListenerBlockManagerRemoved) {
synchronized {
val executorId = blockManagerRemoved.blockManagerId.executorId
executorIdToStorageStatus.remove(executorId).foreach { status =>
deadExecutorStorageStatus += status
}
if (deadExecutorStorageStatus.size > retainedDeadExecutors) {
deadExecutorStorageStatus.trimStart(1)
}
}
}
override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = {
val executorId = blockUpdated.blockUpdatedInfo.blockManagerId.executorId
val blockId = blockUpdated.blockUpdatedInfo.blockId
val storageLevel = blockUpdated.blockUpdatedInfo.storageLevel
val memSize = blockUpdated.blockUpdatedInfo.memSize
val diskSize = blockUpdated.blockUpdatedInfo.diskSize
val blockStatus = BlockStatus(storageLevel, memSize, diskSize)
updateStorageStatus(executorId, Seq((blockId, blockStatus)))
}
}
|
bOOm-X/spark
|
core/src/main/scala/org/apache/spark/storage/StorageStatusListener.scala
|
Scala
|
apache-2.0
| 4,716
|
/**
* COPYRIGHT (C) 2015 Alpine Data Labs Inc. All Rights Reserved.
*/
package com.alpine.plugin.core
import com.alpine.plugin.core.annotation.AlpineSdkApi
import com.alpine.plugin.core.datasource.OperatorDataSourceManager
import com.alpine.plugin.core.dialog.OperatorDialog
import com.alpine.plugin.core.io._
import com.alpine.plugin.core.visualization.{VisualModel, VisualModelFactory}
/**
* This is the required return type for the onInputOrParameterChange function
* in OperatorGUINode class. If the schemas of connected inputs or selected
* parameters are invalid, this should be false, along with an optional message
* about why this is false.
* @param isValid true if the operator is valid. false otherwise.
* @param msg An optional message that will show up in the UI. You can return a
* message even if the operator is valid.
*/
case class OperatorStatus(
isValid: Boolean,
msg: Option[String]
)
object OperatorStatus {
def apply(isValid: Boolean): OperatorStatus = {
OperatorStatus(isValid = isValid, msg = None)
}
def apply(isValid: Boolean, msg: String): OperatorStatus = {
OperatorStatus(isValid = isValid, msg = Some(msg))
}
}
/**
* :: AlpineSdkApi ::
* Control the behavior of the operator GUI node within the editor.
*/
@AlpineSdkApi
abstract class OperatorGUINode[I <: IOBase, O <: IOBase] {
/**
* Define actions to be performed when the operator GUI node is placed in
* the workflow. This involves defining the property dialog content and/or
* refining what the output schema is supposed to be like. E.g., if the output
* is a tabular dataset, provide some outline about the output schema (partial
* or complete).
* @param operatorDialog The operator dialog where the operator could add
* input text boxes, etc. to define UI for parameter
* inputs.
* @param operatorDataSourceManager Before executing the runtime of the operator
* the developer should determine the underlying
* platform that the runtime will execute against.
* E.g., it is possible for an operator to have
* accesses to two different Hadoop clusters
* or multiple databases. A runtime can run
* on only one platform. A default platform
* will be used if nothing is done.
* @param operatorSchemaManager This can be used to provide information about
* the nature of the output/input schemas.
* E.g., provide the output schema.
*/
def onPlacement(
operatorDialog: OperatorDialog,
operatorDataSourceManager: OperatorDataSourceManager,
operatorSchemaManager: OperatorSchemaManager): Unit
/**
* If there's a change in the inputs/connections or parameters then this
* function will get called so that the operator can redefine the input/output
* schema.
* @param inputSchemas If the connected inputs contain tabular schemas, this is
* where they can be accessed, each with unique Ids.
* @param params The current parameter values to the operator.
* @param operatorSchemaManager This should be used to change the input/output
* schema, etc.
* @return A status object about whether the inputs and/or parameters are valid.
* The default implementation assumes that the connected inputs and/or
* parameters are valid.
*/
def onInputOrParameterChange(
inputSchemas: Map[String, TabularSchema],
params: OperatorParameters,
operatorSchemaManager: OperatorSchemaManager): OperatorStatus = {
OperatorStatus(isValid = true, msg = None)
}
/**
* This is invoked for GUI to customize the operator output visualization after
* the operator finishes running. Each output should have associated default
* visualization, but the developer can customize it here.
* @param params The parameter values to the operator.
* @param output This is the output from running the operator.
* @param visualFactory For creating visual models.
* @return The visual model to be sent to the GUI for visualization.
*/
def onOutputVisualization(
params: OperatorParameters,
output: O,
visualFactory: VisualModelFactory): VisualModel = {
visualFactory.createDefaultVisualModel(output)
}
}
|
holdenk/PluginSDK
|
plugin-core/src/main/scala/com/alpine/plugin/core/OperatorGUINode.scala
|
Scala
|
apache-2.0
| 4,561
|
package com.github.diegopacheco.sandbox.sacala.twoten.dynamic
import scala.language.dynamics
class Ruby extends Dynamic {
def applyDynamic(methodName:String)(args:Any*): Any = {
println("Calling method: " + methodName + ", with args: " + args)
}
}
object BackToRubyworld extends App {
val r = new Ruby
r.foo(23)
r.bar()
}
|
diegopacheco/scala-playground
|
scala-2.10-playground/src/com/github/diegopacheco/sandbox/sacala/twoten/dynamic/BackToRubyworld.scala
|
Scala
|
unlicense
| 352
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.