code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package scala.meta
package internal
package prettyprinters
import org.scalameta.adt._
import org.scalameta.invariants._
import org.scalameta.unreachable
import scala.{Seq => _}
import scala.collection.immutable.Seq
import scala.meta.tokens._
import scala.annotation.implicitNotFound
import scala.collection.mutable
import scala.meta.internal.semantic._
import scala.meta.internal.{equality => e}
import scala.compat.Platform.EOL
import scala.language.implicitConversions
import scala.meta.prettyprinters._
import Show.{ sequence => s, repeat => r, indent => i, newline => n }
@implicitNotFound(msg = "don't know how to show[Attributes] for ${T}")
trait Attributes[T] extends Show[T]
object Attributes {
def apply[T](f: T => Show.Result): Attributes[T] = new Attributes[T] { def apply(input: T) = f(input) }
@root trait Recursion
trait LowPriorityRecursion {
@leaf implicit object Shallow extends Recursion
}
object Recursion extends LowPriorityRecursion {
@leaf implicit object Deep extends Recursion
}
@root trait Force
trait LowPriorityForce {
@leaf implicit object Never extends Force
}
object Force extends LowPriorityForce {
@leaf implicit object Always extends Force
}
// TODO: would be nice to generate this with a macro for all tree nodes that we have
implicit def attributesTree[T <: Tree](implicit recursion: Recursion, force: Force): Attributes[T] = new Attributes[T] {
private def deep = recursion == Recursion.Deep
private def forceTypes = force == Force.Always
def apply(x: T): Show.Result = {
val bodyPart = body(x) // NOTE: body may side-effect on footnotes
val footnotePart = footnotes.toString
s(bodyPart, if (footnotePart.nonEmpty) EOL + footnotePart else footnotePart)
}
object footnotes {
trait Footnote {
def entity: Any
def tag: Class[_]
def prettyprint(): String
final override def toString: String = s"Footnote($entity)"
final override def equals(that: Any): Boolean = entity.equals(that)
final override def hashCode: Int = entity.hashCode()
}
object Footnote {
implicit def envFootnote(env: Environment): Footnote = new Footnote {
def entity = env
def tag = classOf[Environment]
def prettyprint() = env match {
case Environment.None => unreachable
}
}
implicit def denotFootnote(denot: Denotation): Footnote = new Footnote {
def entity = denot
def tag = classOf[Denotation]
def prettyprint() = {
def prettyprintPrefix(pre: Prefix): String = {
pre match {
case Prefix.None => "{0}"
case Prefix.Type(tpe) => s"{${footnotes.insert(Typing.Nonrecursive(tpe))}}"
}
}
def prettyprintSymbol(sym: Symbol): String = {
def loop(sym: Symbol): String = sym match {
case Symbol.None => "0"
case Symbol.RootPackage => "_root_"
case Symbol.EmptyPackage => "_empty_"
case Symbol.Global(owner, ScalaSig.Type(name), _) => loop(owner) + "#" + name
case Symbol.Global(owner, ScalaSig.Term(name), _) => loop(owner) + "." + name
case Symbol.Global(owner, ScalaSig.Method(name, jvmSignature), _) => loop(owner) + "." + name + jvmSignature
case Symbol.Global(owner, ScalaSig.TypeParameter(name), _) => loop(owner) + "[" + name + "]"
case Symbol.Global(owner, ScalaSig.TermParameter(name), _) => loop(owner) + "(" + name + ")"
case Symbol.Global(owner, ScalaSig.Self(_), _) => loop(owner) + ".this"
case Symbol.Local(id) => "local#" + id
}
var result = loop(sym)
if (result != "_root_") result = result.stripPrefix("_root_.")
result
}
val symbol = denot.require[Denotation.Single].symbol
prettyprintPrefix(denot.prefix) + "::" + prettyprintSymbol(symbol)
}
}
implicit def typingFootnote(typing: Typing): Footnote = new Footnote {
def entity = typing
def tag = classOf[Typing]
def prettyprint() = typing match {
case Typing.None => unreachable
case Typing.Recursive => unreachable
case Typing.Nonrecursive(tpe) => if (deep) body(tpe) else tpe.show[Structure]
}
}
}
private var size = 0
private val repr = mutable.Map[Class[_], CustomMap[Any, (Int, Footnote)]]()
def previewInsert[T <% Footnote](x: T): Int = {
val footnote = implicitly[T => Footnote].apply(x)
val miniRepr = repr.getOrElseUpdate(footnote.tag, CustomMap[Any, (Int, Footnote)]())
val existingId = miniRepr.get(new CustomWrapper(x)).map(_._1)
existingId.getOrElse((miniRepr.values.map(_._1) ++ List(0)).max + 1)
}
def insert[T <% Footnote](x: T): Int = {
val id = previewInsert(x)
val footnote = implicitly[T => Footnote].apply(x)
val miniRepr = repr.getOrElseUpdate(footnote.tag, CustomMap[Any, (Int, Footnote)]())
if (!miniRepr.contains(new CustomWrapper(x))) size += 1
miniRepr.getOrElseUpdate(new CustomWrapper(x), (id, footnote))._1
}
override def toString: String = {
if (deep) {
var prevSize = 0 // NOTE: prettyprint may side-effect on footnotes
do {
prevSize = size
val stableMinis = repr.toList.sortBy(_._1.getName).map(_._2)
val stableFootnotes = stableMinis.flatMap(_.toList.sortBy(_._2._1).map(_._2._2))
stableFootnotes.foreach(_.prettyprint())
} while (size != prevSize)
}
def byType(tag: Class[_], bracket1: String, bracket2: String): List[String] = {
val miniRepr = repr.getOrElseUpdate(tag, CustomMap[Any, (Int, Footnote)]())
val sortedMiniCache = miniRepr.toList.sortBy{ case (_, (id, footnote)) => id }
sortedMiniCache.map{ case (_, (id, footnote)) => s"$bracket1$id$bracket2 ${footnote.prettyprint()}" }
}
(
byType(classOf[Environment], "[", "]") ++
byType(classOf[Denotation], "[", "]") ++
byType(classOf[Typing], "{", "}")
).mkString(EOL)
}
}
val recursions = CustomMap[Term, Int]()
def body(x: Tree): String = {
def whole(x: Any): String = x match {
case x: String => enquote(x, DoubleQuotes)
case x: Tree => body(x)
case x: Nil.type => "Nil"
case el @ Seq(Seq()) => "Seq(Seq())"
case x: Seq[_] => "Seq(" + x.map(whole).mkString(", ") + ")"
case x: None.type => "None"
case x: Some[_] => "Some(" + whole(x.get) + ")"
case x => x.toString
}
def contents(x: Tree): String = x match {
case x @ Lit(s: String) => enquote(s, DoubleQuotes)
case x @ Lit(_) => import scala.meta.dialects.Scala211; x.show[Syntax]
case x => x.productIterator.map(whole).mkString(", ")
}
val syntax = x.productPrefix + "(" + contents(x) + ")"
val attributes = {
val envPart = x.privateEnv match {
case env @ Environment.None =>
""
case null =>
""
}
val denotPart = x.privateDenot match {
case Denotation.None =>
""
case denot @ Denotation.Single(prefix, symbol) =>
s"[${footnotes.insert(denot)}]"
case denot @ Denotation.Multi(prefix, symbols) =>
val symbolFootnotes = symbols.map(symbol => footnotes.insert(Denotation.Single(prefix, symbol)))
s"[${symbolFootnotes.mkString(", ")}]"
case null =>
""
}
val typingPart = x.privateTyping match {
case Typing.None =>
""
case Typing.Recursive =>
val xkey = new CustomWrapper(x.require[Term])
if (recursions.contains(xkey)) {
s"{${recursions(xkey)}}"
} else {
if (x.isTypechecked) {
val xsub = Type.Singleton(x.require[Term.Ref]).setTypechecked
val typing = Typing.Nonrecursive(xsub)
recursions(xkey) = footnotes.previewInsert(typing)
s"{${footnotes.insert(typing)}}"
} else {
// NOTE: if x is not TYPECHECKED, then trying to insert it into a typing will crash.
// It would be ideal if we could just print the type as is in typing footnotes,
// but there's no easy way of doing that, so I'm going for something real simple.
s"{}"
}
}
case typing: Typing.Nonrecursive =>
if (forceTypes || typing.isTpeLoaded) s"{${footnotes.insert(typing)}}"
else s"{...}"
case null =>
""
}
val typecheckedPart = {
if (!x.isTypechecked) "*"
else ""
}
envPart + denotPart + typingPart + typecheckedPart
}
syntax + attributes
}
// NOTE: This is a map that does semantic comparisons of its keys.
// Since we can't plug custom equality and hashcode implementations into the standard map,
// we have to use custom keys instead.
private type CustomMap[T, U] = mutable.Map[CustomWrapper[T], U]
private def CustomMap[T, U]() = mutable.Map[CustomWrapper[T], U]()
private class CustomWrapper[+T](val x: T) {
override def equals(that: Any): Boolean = that match {
case that: CustomWrapper[_] => e.Semantic.equals(x, that.x)
case _ => false
}
override def hashCode: Int = e.Semantic.hashCode(x)
override def toString: String = s"CustomWrapper($x)"
}
}
}
| Dveim/scalameta | scalameta/trees/src/main/scala/scala/meta/internal/prettyprinters/TreeAttributes.scala | Scala | bsd-3-clause | 9,856 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.regex
import java.util.regex.Pattern
import scala.annotation.tailrec
import io.gatling.core.util.cache.Cache
import com.github.benmanes.caffeine.cache.LoadingCache
final class Patterns(cacheMaxCapacity: Long) {
private val patternCache: LoadingCache[String, Pattern] =
Cache.newConcurrentLoadingCache(cacheMaxCapacity, Pattern.compile)
private def compilePattern(pattern: String): Pattern = patternCache.get(pattern)
def find[X: GroupExtractor](string: String, pattern: String, n: Int): Option[X] = {
val matcher = compilePattern(pattern).matcher(string)
@tailrec
def findRec(countDown: Int): Boolean = matcher.find && (countDown == 0 || findRec(countDown - 1))
if (findRec(n))
Some(GroupExtractor[X].extract(matcher))
else
None
}
def findAll[X: GroupExtractor](string: String, pattern: String): Seq[X] = {
val matcher = compilePattern(pattern).matcher(string)
var acc = List.empty[X]
while (matcher.find) {
acc = GroupExtractor[X].extract(matcher) :: acc
}
acc.reverse
}
def count(string: String, pattern: String): Int = {
val matcher = compilePattern(pattern).matcher(string)
var count = 0
while (matcher.find) count = count + 1
count
}
}
| gatling/gatling | gatling-core/src/main/scala/io/gatling/core/check/regex/Patterns.scala | Scala | apache-2.0 | 1,900 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue, ThreadPoolExecutor}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, HashSet, ListBuffer, Map}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import org.apache.spark.broadcast.{Broadcast, BroadcastManager}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.MetadataFetchFailedException
import org.apache.spark.storage.{BlockId, BlockManagerId, ShuffleBlockId}
import org.apache.spark.util._
/**
* Helper class used by the [[MapOutputTrackerMaster]] to perform bookkeeping for a single
* ShuffleMapStage.
*
* This class maintains a mapping from mapIds to `MapStatus`. It also maintains a cache of
* serialized map statuses in order to speed up tasks' requests for map output statuses.
*
* All public methods of this class are thread-safe.
*/
private class ShuffleStatus(numPartitions: Int) {
// All accesses to the following state must be guarded with `this.synchronized`.
/**
* MapStatus for each partition. The index of the array is the map partition id.
* Each value in the array is the MapStatus for a partition, or null if the partition
* is not available. Even though in theory a task may run multiple times (due to speculation,
* stage retries, etc.), in practice the likelihood of a map output being available at multiple
* locations is so small that we choose to ignore that case and store only a single location
* for each output.
*/
// Exposed for testing
val mapStatuses = new Array[MapStatus](numPartitions)
/**
* The cached result of serializing the map statuses array. This cache is lazily populated when
* [[serializedMapStatus]] is called. The cache is invalidated when map outputs are removed.
*/
private[this] var cachedSerializedMapStatus: Array[Byte] = _
/**
* Broadcast variable holding serialized map output statuses array. When [[serializedMapStatus]]
* serializes the map statuses array it may detect that the result is too large to send in a
* single RPC, in which case it places the serialized array into a broadcast variable and then
* sends a serialized broadcast variable instead. This variable holds a reference to that
* broadcast variable in order to keep it from being garbage collected and to allow for it to be
* explicitly destroyed later on when the ShuffleMapStage is garbage-collected.
*/
private[this] var cachedSerializedBroadcast: Broadcast[Array[Byte]] = _
/**
* Counter tracking the number of partitions that have output. This is a performance optimization
* to avoid having to count the number of non-null entries in the `mapStatuses` array and should
* be equivalent to`mapStatuses.count(_ ne null)`.
*/
private[this] var _numAvailableOutputs: Int = 0
/**
* Register a map output. If there is already a registered location for the map output then it
* will be replaced by the new location.
*/
def addMapOutput(mapId: Int, status: MapStatus): Unit = synchronized {
if (mapStatuses(mapId) == null) {
_numAvailableOutputs += 1
invalidateSerializedMapOutputStatusCache()
}
mapStatuses(mapId) = status
}
/**
* Remove the map output which was served by the specified block manager.
* This is a no-op if there is no registered map output or if the registered output is from a
* different block manager.
*/
def removeMapOutput(mapId: Int, bmAddress: BlockManagerId): Unit = synchronized {
if (mapStatuses(mapId) != null && mapStatuses(mapId).location == bmAddress) {
_numAvailableOutputs -= 1
mapStatuses(mapId) = null
invalidateSerializedMapOutputStatusCache()
}
}
/**
* Removes all shuffle outputs associated with this host. Note that this will also remove
* outputs which are served by an external shuffle server (if one exists).
*/
def removeOutputsOnHost(host: String): Unit = {
removeOutputsByFilter(x => x.host == host)
}
/**
* Removes all map outputs associated with the specified executor. Note that this will also
* remove outputs which are served by an external shuffle server (if one exists), as they are
* still registered with that execId.
*/
def removeOutputsOnExecutor(execId: String): Unit = synchronized {
removeOutputsByFilter(x => x.executorId == execId)
}
/**
* Removes all shuffle outputs which satisfies the filter. Note that this will also
* remove outputs which are served by an external shuffle server (if one exists).
*/
def removeOutputsByFilter(f: (BlockManagerId) => Boolean): Unit = synchronized {
for (mapId <- 0 until mapStatuses.length) {
if (mapStatuses(mapId) != null && f(mapStatuses(mapId).location)) {
_numAvailableOutputs -= 1
mapStatuses(mapId) = null
invalidateSerializedMapOutputStatusCache()
}
}
}
/**
* Number of partitions that have shuffle outputs.
*/
def numAvailableOutputs: Int = synchronized {
_numAvailableOutputs
}
/**
* Returns the sequence of partition ids that are missing (i.e. needs to be computed).
*/
def findMissingPartitions(): Seq[Int] = synchronized {
val missing = (0 until numPartitions).filter(id => mapStatuses(id) == null)
assert(missing.size == numPartitions - _numAvailableOutputs,
s"${missing.size} missing, expected ${numPartitions - _numAvailableOutputs}")
missing
}
/**
* Serializes the mapStatuses array into an efficient compressed format. See the comments on
* `MapOutputTracker.serializeMapStatuses()` for more details on the serialization format.
*
* This method is designed to be called multiple times and implements caching in order to speed
* up subsequent requests. If the cache is empty and multiple threads concurrently attempt to
* serialize the map statuses then serialization will only be performed in a single thread and all
* other threads will block until the cache is populated.
*/
def serializedMapStatus(
broadcastManager: BroadcastManager,
isLocal: Boolean,
minBroadcastSize: Int): Array[Byte] = synchronized {
if (cachedSerializedMapStatus eq null) {
val serResult = MapOutputTracker.serializeMapStatuses(
mapStatuses, broadcastManager, isLocal, minBroadcastSize)
cachedSerializedMapStatus = serResult._1
cachedSerializedBroadcast = serResult._2
}
cachedSerializedMapStatus
}
// Used in testing.
def hasCachedSerializedBroadcast: Boolean = synchronized {
cachedSerializedBroadcast != null
}
/**
* Helper function which provides thread-safe access to the mapStatuses array.
* The function should NOT mutate the array.
*/
def withMapStatuses[T](f: Array[MapStatus] => T): T = synchronized {
f(mapStatuses)
}
/**
* Clears the cached serialized map output statuses.
*/
def invalidateSerializedMapOutputStatusCache(): Unit = synchronized {
if (cachedSerializedBroadcast != null) {
// Prevent errors during broadcast cleanup from crashing the DAGScheduler (see SPARK-21444)
Utils.tryLogNonFatalError {
// Use `blocking = false` so that this operation doesn't hang while trying to send cleanup
// RPCs to dead executors.
cachedSerializedBroadcast.destroy(blocking = false)
}
cachedSerializedBroadcast = null
}
cachedSerializedMapStatus = null
}
}
private[spark] sealed trait MapOutputTrackerMessage
private[spark] case class GetMapOutputStatuses(shuffleId: Int)
extends MapOutputTrackerMessage
private[spark] case object StopMapOutputTracker extends MapOutputTrackerMessage
private[spark] case class GetMapOutputMessage(shuffleId: Int, context: RpcCallContext)
/** RpcEndpoint class for MapOutputTrackerMaster */
private[spark] class MapOutputTrackerMasterEndpoint(
override val rpcEnv: RpcEnv, tracker: MapOutputTrackerMaster, conf: SparkConf)
extends RpcEndpoint with Logging {
logDebug("init") // force eager creation of logger
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case GetMapOutputStatuses(shuffleId: Int) =>
val hostPort = context.senderAddress.hostPort
logInfo("Asked to send map output locations for shuffle " + shuffleId + " to " + hostPort)
val mapOutputStatuses = tracker.post(new GetMapOutputMessage(shuffleId, context))
case StopMapOutputTracker =>
logInfo("MapOutputTrackerMasterEndpoint stopped!")
context.reply(true)
stop()
}
}
/**
* Class that keeps track of the location of the map output of a stage. This is abstract because the
* driver and executor have different versions of the MapOutputTracker. In principle the driver-
* and executor-side classes don't need to share a common base class; the current shared base class
* is maintained primarily for backwards-compatibility in order to avoid having to update existing
* test code.
*/
private[spark] abstract class MapOutputTracker(conf: SparkConf) extends Logging {
/** Set to the MapOutputTrackerMasterEndpoint living on the driver. */
var trackerEndpoint: RpcEndpointRef = _
/**
* The driver-side counter is incremented every time that a map output is lost. This value is sent
* to executors as part of tasks, where executors compare the new epoch number to the highest
* epoch number that they received in the past. If the new epoch number is higher then executors
* will clear their local caches of map output statuses and will re-fetch (possibly updated)
* statuses from the driver.
*/
protected var epoch: Long = 0
protected val epochLock = new AnyRef
/**
* Send a message to the trackerEndpoint and get its result within a default timeout, or
* throw a SparkException if this fails.
*/
protected def askTracker[T: ClassTag](message: Any): T = {
try {
trackerEndpoint.askSync[T](message)
} catch {
case e: Exception =>
logError("Error communicating with MapOutputTracker", e)
throw new SparkException("Error communicating with MapOutputTracker", e)
}
}
/** Send a one-way message to the trackerEndpoint, to which we expect it to reply with true. */
protected def sendTracker(message: Any) {
val response = askTracker[Boolean](message)
if (response != true) {
throw new SparkException(
"Error reply received from MapOutputTracker. Expecting true, got " + response.toString)
}
}
// For testing
def getMapSizesByExecutorId(shuffleId: Int, reduceId: Int)
: Iterator[(BlockManagerId, Seq[(BlockId, Long)])] = {
getMapSizesByExecutorId(shuffleId, reduceId, reduceId + 1)
}
/**
* Called from executors to get the server URIs and output sizes for each shuffle block that
* needs to be read from a given range of map output partitions (startPartition is included but
* endPartition is excluded from the range).
*
* @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
* and the second item is a sequence of (shuffle block id, shuffle block size) tuples
* describing the shuffle blocks that are stored at that block manager.
*/
def getMapSizesByExecutorId(shuffleId: Int, startPartition: Int, endPartition: Int)
: Iterator[(BlockManagerId, Seq[(BlockId, Long)])]
/**
* Deletes map output status information for the specified shuffle stage.
*/
def unregisterShuffle(shuffleId: Int): Unit
def stop() {}
}
/**
* Driver-side class that keeps track of the location of the map output of a stage.
*
* The DAGScheduler uses this class to (de)register map output statuses and to look up statistics
* for performing locality-aware reduce task scheduling.
*
* ShuffleMapStage uses this class for tracking available / missing outputs in order to determine
* which tasks need to be run.
*/
private[spark] class MapOutputTrackerMaster(
conf: SparkConf,
broadcastManager: BroadcastManager,
isLocal: Boolean)
extends MapOutputTracker(conf) {
// The size at which we use Broadcast to send the map output statuses to the executors
private val minSizeForBroadcast =
conf.getSizeAsBytes("spark.shuffle.mapOutput.minSizeForBroadcast", "512k").toInt
/** Whether to compute locality preferences for reduce tasks */
private val shuffleLocalityEnabled = conf.getBoolean("spark.shuffle.reduceLocality.enabled", true)
// Number of map and reduce tasks above which we do not assign preferred locations based on map
// output sizes. We limit the size of jobs for which assign preferred locations as computing the
// top locations by size becomes expensive.
private val SHUFFLE_PREF_MAP_THRESHOLD = 1000
// NOTE: This should be less than 2000 as we use HighlyCompressedMapStatus beyond that
private val SHUFFLE_PREF_REDUCE_THRESHOLD = 1000
// Fraction of total map output that must be at a location for it to considered as a preferred
// location for a reduce task. Making this larger will focus on fewer locations where most data
// can be read locally, but may lead to more delay in scheduling if those locations are busy.
private val REDUCER_PREF_LOCS_FRACTION = 0.2
// HashMap for storing shuffleStatuses in the driver.
// Statuses are dropped only by explicit de-registering.
// Exposed for testing
val shuffleStatuses = new ConcurrentHashMap[Int, ShuffleStatus]().asScala
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
// requests for map output statuses
private val mapOutputRequests = new LinkedBlockingQueue[GetMapOutputMessage]
// Thread pool used for handling map output status requests. This is a separate thread pool
// to ensure we don't block the normal dispatcher threads.
private val threadpool: ThreadPoolExecutor = {
val numThreads = conf.getInt("spark.shuffle.mapOutput.dispatcher.numThreads", 8)
val pool = ThreadUtils.newDaemonFixedThreadPool(numThreads, "map-output-dispatcher")
for (i <- 0 until numThreads) {
pool.execute(new MessageLoop)
}
pool
}
// Make sure that we aren't going to exceed the max RPC message size by making sure
// we use broadcast to send large map output statuses.
if (minSizeForBroadcast > maxRpcMessageSize) {
val msg = s"spark.shuffle.mapOutput.minSizeForBroadcast ($minSizeForBroadcast bytes) must " +
s"be <= spark.rpc.message.maxSize ($maxRpcMessageSize bytes) to prevent sending an rpc " +
"message that is too large."
logError(msg)
throw new IllegalArgumentException(msg)
}
def post(message: GetMapOutputMessage): Unit = {
mapOutputRequests.offer(message)
}
/** Message loop used for dispatching messages. */
private class MessageLoop extends Runnable {
override def run(): Unit = {
try {
while (true) {
try {
val data = mapOutputRequests.take()
if (data == PoisonPill) {
// Put PoisonPill back so that other MessageLoops can see it.
mapOutputRequests.offer(PoisonPill)
return
}
val context = data.context
val shuffleId = data.shuffleId
val hostPort = context.senderAddress.hostPort
logDebug("Handling request to send map output locations for shuffle " + shuffleId +
" to " + hostPort)
val shuffleStatus = shuffleStatuses.get(shuffleId).head
context.reply(
shuffleStatus.serializedMapStatus(broadcastManager, isLocal, minSizeForBroadcast))
} catch {
case NonFatal(e) => logError(e.getMessage, e)
}
}
} catch {
case ie: InterruptedException => // exit
}
}
}
/** A poison endpoint that indicates MessageLoop should exit its message loop. */
private val PoisonPill = new GetMapOutputMessage(-99, null)
// Used only in unit tests.
private[spark] def getNumCachedSerializedBroadcast: Int = {
shuffleStatuses.valuesIterator.count(_.hasCachedSerializedBroadcast)
}
def registerShuffle(shuffleId: Int, numMaps: Int) {
if (shuffleStatuses.put(shuffleId, new ShuffleStatus(numMaps)).isDefined) {
throw new IllegalArgumentException("Shuffle ID " + shuffleId + " registered twice")
}
}
def registerMapOutput(shuffleId: Int, mapId: Int, status: MapStatus) {
shuffleStatuses(shuffleId).addMapOutput(mapId, status)
}
/** Unregister map output information of the given shuffle, mapper and block manager */
def unregisterMapOutput(shuffleId: Int, mapId: Int, bmAddress: BlockManagerId) {
shuffleStatuses.get(shuffleId) match {
case Some(shuffleStatus) =>
shuffleStatus.removeMapOutput(mapId, bmAddress)
incrementEpoch()
case None =>
throw new SparkException("unregisterMapOutput called for nonexistent shuffle ID")
}
}
/** Unregister all map output information of the given shuffle. */
def unregisterAllMapOutput(shuffleId: Int) {
shuffleStatuses.get(shuffleId) match {
case Some(shuffleStatus) =>
shuffleStatus.removeOutputsByFilter(x => true)
incrementEpoch()
case None =>
throw new SparkException(
s"unregisterAllMapOutput called for nonexistent shuffle ID $shuffleId.")
}
}
/** Unregister shuffle data */
def unregisterShuffle(shuffleId: Int) {
shuffleStatuses.remove(shuffleId).foreach { shuffleStatus =>
shuffleStatus.invalidateSerializedMapOutputStatusCache()
}
}
/**
* Removes all shuffle outputs associated with this host. Note that this will also remove
* outputs which are served by an external shuffle server (if one exists).
*/
def removeOutputsOnHost(host: String): Unit = {
shuffleStatuses.valuesIterator.foreach { _.removeOutputsOnHost(host) }
incrementEpoch()
}
/**
* Removes all shuffle outputs associated with this executor. Note that this will also remove
* outputs which are served by an external shuffle server (if one exists), as they are still
* registered with this execId.
*/
def removeOutputsOnExecutor(execId: String): Unit = {
shuffleStatuses.valuesIterator.foreach { _.removeOutputsOnExecutor(execId) }
incrementEpoch()
}
/** Check if the given shuffle is being tracked */
def containsShuffle(shuffleId: Int): Boolean = shuffleStatuses.contains(shuffleId)
def getNumAvailableOutputs(shuffleId: Int): Int = {
shuffleStatuses.get(shuffleId).map(_.numAvailableOutputs).getOrElse(0)
}
/**
* Returns the sequence of partition ids that are missing (i.e. needs to be computed), or None
* if the MapOutputTrackerMaster doesn't know about this shuffle.
*/
def findMissingPartitions(shuffleId: Int): Option[Seq[Int]] = {
shuffleStatuses.get(shuffleId).map(_.findMissingPartitions())
}
/**
* Grouped function of Range, this is to avoid traverse of all elements of Range using
* IterableLike's grouped function.
*/
def rangeGrouped(range: Range, size: Int): Seq[Range] = {
val start = range.start
val step = range.step
val end = range.end
for (i <- start.until(end, size * step)) yield {
i.until(i + size * step, step)
}
}
/**
* To equally divide n elements into m buckets, basically each bucket should have n/m elements,
* for the remaining n%m elements, add one more element to the first n%m buckets each.
*/
def equallyDivide(numElements: Int, numBuckets: Int): Seq[Seq[Int]] = {
val elementsPerBucket = numElements / numBuckets
val remaining = numElements % numBuckets
val splitPoint = (elementsPerBucket + 1) * remaining
if (elementsPerBucket == 0) {
rangeGrouped(0.until(splitPoint), elementsPerBucket + 1)
} else {
rangeGrouped(0.until(splitPoint), elementsPerBucket + 1) ++
rangeGrouped(splitPoint.until(numElements), elementsPerBucket)
}
}
/**
* Return statistics about all of the outputs for a given shuffle.
*/
def getStatistics(dep: ShuffleDependency[_, _, _]): MapOutputStatistics = {
shuffleStatuses(dep.shuffleId).withMapStatuses { statuses =>
val totalSizes = new Array[Long](dep.partitioner.numPartitions)
val parallelAggThreshold = conf.get(
SHUFFLE_MAP_OUTPUT_PARALLEL_AGGREGATION_THRESHOLD)
val parallelism = math.min(
Runtime.getRuntime.availableProcessors(),
statuses.length.toLong * totalSizes.length / parallelAggThreshold + 1).toInt
if (parallelism <= 1) {
for (s <- statuses) {
for (i <- 0 until totalSizes.length) {
totalSizes(i) += s.getSizeForBlock(i)
}
}
} else {
val threadPool = ThreadUtils.newDaemonFixedThreadPool(parallelism, "map-output-aggregate")
try {
implicit val executionContext = ExecutionContext.fromExecutor(threadPool)
val mapStatusSubmitTasks = equallyDivide(totalSizes.length, parallelism).map {
reduceIds => Future {
for (s <- statuses; i <- reduceIds) {
totalSizes(i) += s.getSizeForBlock(i)
}
}
}
ThreadUtils.awaitResult(Future.sequence(mapStatusSubmitTasks), Duration.Inf)
} finally {
threadPool.shutdown()
}
}
new MapOutputStatistics(dep.shuffleId, totalSizes)
}
}
/**
* Return the preferred hosts on which to run the given map output partition in a given shuffle,
* i.e. the nodes that the most outputs for that partition are on.
*
* @param dep shuffle dependency object
* @param partitionId map output partition that we want to read
* @return a sequence of host names
*/
def getPreferredLocationsForShuffle(dep: ShuffleDependency[_, _, _], partitionId: Int)
: Seq[String] = {
if (shuffleLocalityEnabled && dep.rdd.partitions.length < SHUFFLE_PREF_MAP_THRESHOLD &&
dep.partitioner.numPartitions < SHUFFLE_PREF_REDUCE_THRESHOLD) {
val blockManagerIds = getLocationsWithLargestOutputs(dep.shuffleId, partitionId,
dep.partitioner.numPartitions, REDUCER_PREF_LOCS_FRACTION)
if (blockManagerIds.nonEmpty) {
blockManagerIds.get.map(_.host)
} else {
Nil
}
} else {
Nil
}
}
/**
* Return a list of locations that each have fraction of map output greater than the specified
* threshold.
*
* @param shuffleId id of the shuffle
* @param reducerId id of the reduce task
* @param numReducers total number of reducers in the shuffle
* @param fractionThreshold fraction of total map output size that a location must have
* for it to be considered large.
*/
def getLocationsWithLargestOutputs(
shuffleId: Int,
reducerId: Int,
numReducers: Int,
fractionThreshold: Double)
: Option[Array[BlockManagerId]] = {
val shuffleStatus = shuffleStatuses.get(shuffleId).orNull
if (shuffleStatus != null) {
shuffleStatus.withMapStatuses { statuses =>
if (statuses.nonEmpty) {
// HashMap to add up sizes of all blocks at the same location
val locs = new HashMap[BlockManagerId, Long]
var totalOutputSize = 0L
var mapIdx = 0
while (mapIdx < statuses.length) {
val status = statuses(mapIdx)
// status may be null here if we are called between registerShuffle, which creates an
// array with null entries for each output, and registerMapOutputs, which populates it
// with valid status entries. This is possible if one thread schedules a job which
// depends on an RDD which is currently being computed by another thread.
if (status != null) {
val blockSize = status.getSizeForBlock(reducerId)
if (blockSize > 0) {
locs(status.location) = locs.getOrElse(status.location, 0L) + blockSize
totalOutputSize += blockSize
}
}
mapIdx = mapIdx + 1
}
val topLocs = locs.filter { case (loc, size) =>
size.toDouble / totalOutputSize >= fractionThreshold
}
// Return if we have any locations which satisfy the required threshold
if (topLocs.nonEmpty) {
return Some(topLocs.keys.toArray)
}
}
}
}
None
}
def incrementEpoch() {
epochLock.synchronized {
epoch += 1
logDebug("Increasing epoch to " + epoch)
}
}
/** Called to get current epoch number. */
def getEpoch: Long = {
epochLock.synchronized {
return epoch
}
}
// Get blocks sizes by executor Id. Note that zero-sized blocks are excluded in the result.
// This method is only called in local-mode.
def getMapSizesByExecutorId(shuffleId: Int, startPartition: Int, endPartition: Int)
: Iterator[(BlockManagerId, Seq[(BlockId, Long)])] = {
logDebug(s"Fetching outputs for shuffle $shuffleId, partitions $startPartition-$endPartition")
shuffleStatuses.get(shuffleId) match {
case Some (shuffleStatus) =>
shuffleStatus.withMapStatuses { statuses =>
MapOutputTracker.convertMapStatuses(shuffleId, startPartition, endPartition, statuses)
}
case None =>
Iterator.empty
}
}
override def stop() {
mapOutputRequests.offer(PoisonPill)
threadpool.shutdown()
sendTracker(StopMapOutputTracker)
trackerEndpoint = null
shuffleStatuses.clear()
}
}
/**
* Executor-side client for fetching map output info from the driver's MapOutputTrackerMaster.
* Note that this is not used in local-mode; instead, local-mode Executors access the
* MapOutputTrackerMaster directly (which is possible because the master and worker share a comon
* superclass).
*/
private[spark] class MapOutputTrackerWorker(conf: SparkConf) extends MapOutputTracker(conf) {
val mapStatuses: Map[Int, Array[MapStatus]] =
new ConcurrentHashMap[Int, Array[MapStatus]]().asScala
/** Remembers which map output locations are currently being fetched on an executor. */
private val fetching = new HashSet[Int]
// Get blocks sizes by executor Id. Note that zero-sized blocks are excluded in the result.
override def getMapSizesByExecutorId(shuffleId: Int, startPartition: Int, endPartition: Int)
: Iterator[(BlockManagerId, Seq[(BlockId, Long)])] = {
logDebug(s"Fetching outputs for shuffle $shuffleId, partitions $startPartition-$endPartition")
val statuses = getStatuses(shuffleId)
try {
MapOutputTracker.convertMapStatuses(shuffleId, startPartition, endPartition, statuses)
} catch {
case e: MetadataFetchFailedException =>
// We experienced a fetch failure so our mapStatuses cache is outdated; clear it:
mapStatuses.clear()
throw e
}
}
/**
* Get or fetch the array of MapStatuses for a given shuffle ID. NOTE: clients MUST synchronize
* on this array when reading it, because on the driver, we may be changing it in place.
*
* (It would be nice to remove this restriction in the future.)
*/
private def getStatuses(shuffleId: Int): Array[MapStatus] = {
val statuses = mapStatuses.get(shuffleId).orNull
if (statuses == null) {
logInfo("Don't have map outputs for shuffle " + shuffleId + ", fetching them")
val startTime = System.currentTimeMillis
var fetchedStatuses: Array[MapStatus] = null
fetching.synchronized {
// Someone else is fetching it; wait for them to be done
while (fetching.contains(shuffleId)) {
try {
fetching.wait()
} catch {
case e: InterruptedException =>
}
}
// Either while we waited the fetch happened successfully, or
// someone fetched it in between the get and the fetching.synchronized.
fetchedStatuses = mapStatuses.get(shuffleId).orNull
if (fetchedStatuses == null) {
// We have to do the fetch, get others to wait for us.
fetching += shuffleId
}
}
if (fetchedStatuses == null) {
// We won the race to fetch the statuses; do so
logInfo("Doing the fetch; tracker endpoint = " + trackerEndpoint)
// This try-finally prevents hangs due to timeouts:
try {
val fetchedBytes = askTracker[Array[Byte]](GetMapOutputStatuses(shuffleId))
fetchedStatuses = MapOutputTracker.deserializeMapStatuses(fetchedBytes)
logInfo("Got the output locations")
mapStatuses.put(shuffleId, fetchedStatuses)
} finally {
fetching.synchronized {
fetching -= shuffleId
fetching.notifyAll()
}
}
}
logDebug(s"Fetching map output statuses for shuffle $shuffleId took " +
s"${System.currentTimeMillis - startTime} ms")
if (fetchedStatuses != null) {
fetchedStatuses
} else {
logError("Missing all output locations for shuffle " + shuffleId)
throw new MetadataFetchFailedException(
shuffleId, -1, "Missing all output locations for shuffle " + shuffleId)
}
} else {
statuses
}
}
/** Unregister shuffle data. */
def unregisterShuffle(shuffleId: Int): Unit = {
mapStatuses.remove(shuffleId)
}
/**
* Called from executors to update the epoch number, potentially clearing old outputs
* because of a fetch failure. Each executor task calls this with the latest epoch
* number on the driver at the time it was created.
*/
def updateEpoch(newEpoch: Long): Unit = {
epochLock.synchronized {
if (newEpoch > epoch) {
logInfo("Updating epoch to " + newEpoch + " and clearing cache")
epoch = newEpoch
mapStatuses.clear()
}
}
}
}
private[spark] object MapOutputTracker extends Logging {
val ENDPOINT_NAME = "MapOutputTracker"
private val DIRECT = 0
private val BROADCAST = 1
// Serialize an array of map output locations into an efficient byte format so that we can send
// it to reduce tasks. We do this by compressing the serialized bytes using GZIP. They will
// generally be pretty compressible because many map outputs will be on the same hostname.
def serializeMapStatuses(statuses: Array[MapStatus], broadcastManager: BroadcastManager,
isLocal: Boolean, minBroadcastSize: Int): (Array[Byte], Broadcast[Array[Byte]]) = {
val out = new ByteArrayOutputStream
out.write(DIRECT)
val objOut = new ObjectOutputStream(new GZIPOutputStream(out))
Utils.tryWithSafeFinally {
// Since statuses can be modified in parallel, sync on it
statuses.synchronized {
objOut.writeObject(statuses)
}
} {
objOut.close()
}
val arr = out.toByteArray
if (arr.length >= minBroadcastSize) {
// Use broadcast instead.
// Important arr(0) is the tag == DIRECT, ignore that while deserializing !
val bcast = broadcastManager.newBroadcast(arr, isLocal)
// toByteArray creates copy, so we can reuse out
out.reset()
out.write(BROADCAST)
val oos = new ObjectOutputStream(new GZIPOutputStream(out))
oos.writeObject(bcast)
oos.close()
val outArr = out.toByteArray
logInfo("Broadcast mapstatuses size = " + outArr.length + ", actual size = " + arr.length)
(outArr, bcast)
} else {
(arr, null)
}
}
// Opposite of serializeMapStatuses.
def deserializeMapStatuses(bytes: Array[Byte]): Array[MapStatus] = {
assert (bytes.length > 0)
def deserializeObject(arr: Array[Byte], off: Int, len: Int): AnyRef = {
val objIn = new ObjectInputStream(new GZIPInputStream(
new ByteArrayInputStream(arr, off, len)))
Utils.tryWithSafeFinally {
objIn.readObject()
} {
objIn.close()
}
}
bytes(0) match {
case DIRECT =>
deserializeObject(bytes, 1, bytes.length - 1).asInstanceOf[Array[MapStatus]]
case BROADCAST =>
// deserialize the Broadcast, pull .value array out of it, and then deserialize that
val bcast = deserializeObject(bytes, 1, bytes.length - 1).
asInstanceOf[Broadcast[Array[Byte]]]
logInfo("Broadcast mapstatuses size = " + bytes.length +
", actual size = " + bcast.value.length)
// Important - ignore the DIRECT tag ! Start from offset 1
deserializeObject(bcast.value, 1, bcast.value.length - 1).asInstanceOf[Array[MapStatus]]
case _ => throw new IllegalArgumentException("Unexpected byte tag = " + bytes(0))
}
}
/**
* Given an array of map statuses and a range of map output partitions, returns a sequence that,
* for each block manager ID, lists the shuffle block IDs and corresponding shuffle block sizes
* stored at that block manager.
* Note that empty blocks are filtered in the result.
*
* If any of the statuses is null (indicating a missing location due to a failed mapper),
* throws a FetchFailedException.
*
* @param shuffleId Identifier for the shuffle
* @param startPartition Start of map output partition ID range (included in range)
* @param endPartition End of map output partition ID range (excluded from range)
* @param statuses List of map statuses, indexed by map ID.
* @return A sequence of 2-item tuples, where the first item in the tuple is a BlockManagerId,
* and the second item is a sequence of (shuffle block ID, shuffle block size) tuples
* describing the shuffle blocks that are stored at that block manager.
*/
def convertMapStatuses(
shuffleId: Int,
startPartition: Int,
endPartition: Int,
statuses: Array[MapStatus]): Iterator[(BlockManagerId, Seq[(BlockId, Long)])] = {
assert (statuses != null)
val splitsByAddress = new HashMap[BlockManagerId, ListBuffer[(BlockId, Long)]]
for ((status, mapId) <- statuses.iterator.zipWithIndex) {
if (status == null) {
val errorMessage = s"Missing an output location for shuffle $shuffleId"
logError(errorMessage)
throw new MetadataFetchFailedException(shuffleId, startPartition, errorMessage)
} else {
for (part <- startPartition until endPartition) {
val size = status.getSizeForBlock(part)
if (size != 0) {
splitsByAddress.getOrElseUpdate(status.location, ListBuffer()) +=
((ShuffleBlockId(shuffleId, mapId, part), size))
}
}
}
}
splitsByAddress.iterator
}
}
| michalsenkyr/spark | core/src/main/scala/org/apache/spark/MapOutputTracker.scala | Scala | apache-2.0 | 35,734 |
package kata.scala
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
class DepthFirstSearchTest extends FlatSpec with Matchers with BeforeAndAfter {
var graph = new Graph
before {
graph = new Graph
}
it should "create a depth first search" in {
graph.addEdge(1, 2)
DepthFirstSearch.create(graph, 1) should not be None
}
it should "not create a depth first search when graph does not contain the starting vertex" in {
DepthFirstSearch.create(graph, 1) shouldBe None
}
it should "have a path to a transient vertex" in {
graph.addEdge(1, 2)
graph.addEdge(2, 3)
graph.addEdge(3, 4)
val search = DepthFirstSearch.create(graph, 1).get
search.hasPathTo(4) shouldBe true
}
it should "not have a path to a not connected vertex" in {
graph.addEdge(1, 2)
graph.addEdge(3, 4)
val search = DepthFirstSearch.create(graph, 1).get
search.hasPathTo(4) shouldBe false
}
}
| Alex-Diez/Scala-TDD-Katas | old-katas/graph-search-kata/day-6/src/test/scala/kata/scala/DepthFirstSearchTest.scala | Scala | mit | 1,024 |
package chapter.seventeen
object ExerciseSix {
def middle[T](iter: Iterable[T]): T = {
val size = iter.size
require(size > 2 && size % 2 == 1)
iter.toIndexedSeq(size / 2)
}
}
| deekim/impatient-scala | src/main/scala/chapter/seventeen/ExerciseSix.scala | Scala | apache-2.0 | 195 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.portlet
import java.{util ⇒ ju}
import javax.portlet._
import org.orbeon.oxf.externalcontext.WSRPURLRewriter.PathParameterName
import org.orbeon.oxf.fr.embedding._
import org.orbeon.oxf.http._
import org.orbeon.oxf.portlet.BufferedPortlet._
import org.orbeon.oxf.portlet.liferay.LiferayURL
import org.orbeon.oxf.util.NetUtils
import org.orbeon.oxf.util.ScalaUtils._
import scala.collection.JavaConverters._
class PortletEmbeddingContext(
context : PortletContext,
request : PortletRequest,
response : PortletResponse,
val httpClient : HttpClient
) extends EmbeddingContext {
private val session = request.getPortletSession(true) ensuring (_ ne null)
val namespace = BufferedPortlet.shortIdNamespace(response.getNamespace, context) ensuring (_ ne null)
def getSessionAttribute(name: String) = session.getAttribute(name)
def setSessionAttribute(name: String, value: AnyRef) = session.setAttribute(name, value)
def removeSessionAttribute(name: String) = session.removeAttribute(name)
}
class PortletEmbeddingContextWithResponse(
context : PortletContext,
request : PortletRequest,
response : MimeResponse,
httpClient : HttpClient
) extends PortletEmbeddingContext(
context,
request,
response,
httpClient
) with EmbeddingContextWithResponse {
def writer = response.getWriter
def outputStream = response.getPortletOutputStream
def decodeURL(encoded: String) = LiferayURL.wsrpToPortletURL(encoded, response)
def setStatusCode(code: Int) = () // Q: Can we do anything meaningful for resource caching?
def setHeader(name: String, value: String): Unit =
if (name equalsIgnoreCase Headers.ContentType)
response.setContentType(value)
else
response.setProperty(name, value)
}
// Abstract portlet logic including buffering of portlet actions
// This doesn't deal direct with ProcessorService or HTTP proxying
trait BufferedPortlet {
def title(request: RenderRequest): String
def portletContext: PortletContext
// Immutable response with parameters
case class ResponseWithParameters(response: BufferedContentOrRedirect, parameters: Map[String, List[String]])
def bufferedRender(
request : RenderRequest,
response : RenderResponse,
render : ⇒ StreamedContentOrRedirect)(implicit
ctx : EmbeddingContextWithResponse
): Unit =
getStoredResponseWithParameters match {
case Some(ResponseWithParameters(content: BufferedContent, parameters)) if toScalaMap(request.getParameterMap) == parameters ⇒
// The result of an action with the current parameters is available
// NOTE: Until we can correctly handle multiple render requests for an XForms page, we should detect the
// situation where a second render request tries to load a deferred action response, and display an
// error message.
writeResponseWithParameters(request, response, content)
case _ ⇒
// No matching action result, call the render function
// NOTE: The Portlet API does not support sendRedirect() and setRenderParameters() upon render(). This
// means we cannot easily simulate redirects upon render. For internal redirects, we could maybe
// implement the redirect loop here. The issue would be what happens upon subsequent renders, as they
// would again request the first path, not the redirected path. For now we throw.
render match {
case content: StreamedContent ⇒ useAndClose(content)(writeResponseWithParameters(request, response, _))
case redirect: Redirect ⇒ throw new IllegalStateException("Processor execution did not return content.")
}
}
def bufferedProcessAction(
request : ActionRequest,
response : ActionResponse,
action : ⇒ StreamedContentOrRedirect)(implicit
ctx : EmbeddingContext
): Unit = {
// Make sure the previously cached output is cleared, if there is any. We keep the result of only one action.
clearResponseWithParameters()
action match {
case Redirect(location, true) ⇒
response.sendRedirect(location)
case Redirect(location, false) ⇒
// Just update the render parameters to simulate a redirect within the portlet
val (path, queryOpt) = splitQuery(location)
val parameters = queryOpt match {
case (Some(query)) ⇒
val m = NetUtils.decodeQueryString(query)
m.put(PathParameter, Array(path))
ju.Collections.unmodifiableMap[String, Array[String]](m)
case None ⇒
ju.Collections.singletonMap(PathParameter, Array(path))
}
// Set the new parameters for the subsequent render requests
response.setRenderParameters(parameters)
case content: StreamedContent ⇒
// Content was written, keep it in the session for subsequent render requests with the current action parameters
useAndClose(content) { _ ⇒
// NOTE: Don't use the action parameters, as in the case of a form POST there can be dozens of those
// or more, and anyway those don't make sense as subsequent render parameters. Instead, we just use
// the path and a method indicator. Later we should either indicate an error, or handle XForms Ajax
// updates properly.
val newRenderParameters = Map(
PathParameter → Array(request.getParameter(PathParameter)),
MethodParameter → Array("post")
).asJava
response.setRenderParameters(newRenderParameters)
// Store response
storeResponseWithParameters(ResponseWithParameters(BufferedContent(content), toScalaMap(newRenderParameters)))
}
}
}
private def writeResponseWithParameters(
request : RenderRequest,
response : RenderResponse,
responseContent : Content)(implicit
ctx : EmbeddingContextWithResponse
): Unit = {
// Set title and content type
responseContent.title orElse Option(title(request)) foreach response.setTitle
responseContent.contentType foreach response.setContentType
// Write response out directly
APISupport.writeResponseBody(responseContent)
}
protected def getStoredResponseWithParameters(implicit ctx: EmbeddingContext) =
Option(ctx.getSessionAttribute(ResponseSessionKey).asInstanceOf[ResponseWithParameters])
private def storeResponseWithParameters(responseWithParameters: ResponseWithParameters)(implicit ctx: EmbeddingContext) =
ctx.setSessionAttribute(ResponseSessionKey, responseWithParameters)
private def clearResponseWithParameters()(implicit ctx: EmbeddingContext) =
ctx.removeSessionAttribute(ResponseSessionKey)
}
object BufferedPortlet {
val PathParameter = PathParameterName
val MethodParameter = "orbeon.method"
val ResponseSessionKey = "org.orbeon.oxf.response"
// Convert to immutable String → List[String] so that map equality works as expected
def toScalaMap(m: ju.Map[String, Array[String]]) =
m.asScala map { case (k, v) ⇒ k → v.toList } toMap
// Convert back an immutable String → List[String] to a Java String → Array[String] map
def toJavaMap(m: Map[String, List[String]]) =
m map { case (k, v) ⇒ k → v.toArray } asJava
// Immutable portletNamespace → idNamespace information stored in the portlet context
private object NamespaceMappings {
private def newId(seq: Int) = "o" + seq
def apply(portletNamespace: String): NamespaceMappings = NamespaceMappings(0, Map(portletNamespace → newId(0)))
}
private case class NamespaceMappings(private val last: Int, map: Map[String, String]) {
def next(key: String) = NamespaceMappings(last + 1, map + (key → NamespaceMappings.newId(last + 1)))
}
// Return the short id namespace for this portlet. The idea of this is that portal-provided namespaces are large,
// and since the XForms engine produces lots of ids, the DOM size increases a lot. All we want really are unique ids
// in the DOM, so we make up our own short prefixes, hope they don't conflict within anything, and we map the portal
// namespaces to our short ids.
def shortIdNamespace(portletNamespace: String, portletContext: PortletContext) =
// PLT.10.1: "There is one instance of the PortletContext interface associated with each portlet application
// deployed into a portlet container." In order for multiple Orbeon portlets to not walk on each other, we
// synchronize.
portletContext.synchronized {
val IdNamespacesSessionKey = "org.orbeon.oxf.id-namespaces"
// Get or create NamespaceMappings
val mappings = Option(portletContext.getAttribute(IdNamespacesSessionKey).asInstanceOf[NamespaceMappings]) getOrElse {
val newMappings = NamespaceMappings(portletNamespace)
portletContext.setAttribute(IdNamespacesSessionKey, newMappings)
newMappings
}
// Get or create specific mapping portletNamespace → idNamespace
mappings.map.getOrElse(portletNamespace, {
val newMappings = mappings.next(portletNamespace)
portletContext.setAttribute(IdNamespacesSessionKey, newMappings)
newMappings.map(portletNamespace)
})
}
} | joansmith/orbeon-forms | src/main/scala/org/orbeon/oxf/portlet/BufferedPortlet.scala | Scala | lgpl-2.1 | 10,009 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.util.Collections
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
import java.util.regex.Pattern
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue}
import scala.util.control.NonFatal
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.AMRMClient
import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.rpc.{RpcCallContext, RpcEndpointRef}
import org.apache.spark.scheduler.{ExecutorExited, ExecutorLossReason}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RetrieveLastAllocatedExecutorId
import org.apache.spark.scheduler.cluster.SchedulerBackendUtils
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils}
/**
* YarnAllocator is charged with requesting containers from the YARN ResourceManager and deciding
* what to do with containers when YARN fulfills these requests.
*
* This class makes use of YARN's AMRMClient APIs. We interact with the AMRMClient in three ways:
* * Making our resource needs known, which updates local bookkeeping about containers requested.
* * Calling "allocate", which syncs our local container requests with the RM, and returns any
* containers that YARN has granted to us. This also functions as a heartbeat.
* * Processing the containers granted to us to possibly launch executors inside of them.
*
* The public methods of this class are thread-safe. All methods that mutate state are
* synchronized.
*/
private[yarn] class YarnAllocator(
driverUrl: String,
driverRef: RpcEndpointRef,
conf: YarnConfiguration,
sparkConf: SparkConf,
amClient: AMRMClient[ContainerRequest],
appAttemptId: ApplicationAttemptId,
securityMgr: SecurityManager,
localResources: Map[String, LocalResource],
resolver: SparkRackResolver)
extends Logging {
import YarnAllocator._
// Visible for testing.
val allocatedHostToContainersMap = new HashMap[String, collection.mutable.Set[ContainerId]]
val allocatedContainerToHostMap = new HashMap[ContainerId, String]
// Containers that we no longer care about. We've either already told the RM to release them or
// will on the next heartbeat. Containers get removed from this map after the RM tells us they've
// completed.
private val releasedContainers = Collections.newSetFromMap[ContainerId](
new ConcurrentHashMap[ContainerId, java.lang.Boolean])
private val numExecutorsRunning = new AtomicInteger(0)
private val numExecutorsStarting = new AtomicInteger(0)
/**
* Used to generate a unique ID per executor
*
* Init `executorIdCounter`. when AM restart, `executorIdCounter` will reset to 0. Then
* the id of new executor will start from 1, this will conflict with the executor has
* already created before. So, we should initialize the `executorIdCounter` by getting
* the max executorId from driver.
*
* And this situation of executorId conflict is just in yarn client mode, so this is an issue
* in yarn client mode. For more details, can check in jira.
*
* @see SPARK-12864
*/
private var executorIdCounter: Int =
driverRef.askSync[Int](RetrieveLastAllocatedExecutorId)
// Queue to store the timestamp of failed executors
private val failedExecutorsTimeStamps = new Queue[Long]()
private var clock: Clock = new SystemClock
private val executorFailuresValidityInterval =
sparkConf.get(EXECUTOR_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).getOrElse(-1L)
@volatile private var targetNumExecutors =
SchedulerBackendUtils.getInitialTargetExecutorNumber(sparkConf)
private var currentNodeBlacklist = Set.empty[String]
// Executor loss reason requests that are pending - maps from executor ID for inquiry to a
// list of requesters that should be responded to once we find out why the given executor
// was lost.
private val pendingLossReasonRequests = new HashMap[String, mutable.Buffer[RpcCallContext]]
// Maintain loss reasons for already released executors, it will be added when executor loss
// reason is got from AM-RM call, and be removed after querying this loss reason.
private val releasedExecutorLossReasons = new HashMap[String, ExecutorLossReason]
// Keep track of which container is running which executor to remove the executors later
// Visible for testing.
private[yarn] val executorIdToContainer = new HashMap[String, Container]
private var numUnexpectedContainerRelease = 0L
private val containerIdToExecutorId = new HashMap[ContainerId, String]
// Executor memory in MB.
protected val executorMemory = sparkConf.get(EXECUTOR_MEMORY).toInt
// Additional memory overhead.
protected val memoryOverhead: Int = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toInt, MEMORY_OVERHEAD_MIN)).toInt
// Number of cores per executor.
protected val executorCores = sparkConf.get(EXECUTOR_CORES)
// Resource capability requested for each executors
private[yarn] val resource = Resource.newInstance(executorMemory + memoryOverhead, executorCores)
private val launcherPool = ThreadUtils.newDaemonCachedThreadPool(
"ContainerLauncher", sparkConf.get(CONTAINER_LAUNCH_MAX_THREADS))
// For testing
private val launchContainers = sparkConf.getBoolean("spark.yarn.launchContainers", true)
private val labelExpression = sparkConf.get(EXECUTOR_NODE_LABEL_EXPRESSION)
// A map to store preferred hostname and possible task numbers running on it.
private var hostToLocalTaskCounts: Map[String, Int] = Map.empty
// Number of tasks that have locality preferences in active stages
private var numLocalityAwareTasks: Int = 0
// A container placement strategy based on pending tasks' locality preference
private[yarn] val containerPlacementStrategy =
new LocalityPreferredContainerPlacementStrategy(sparkConf, conf, resource, resolver)
/**
* Use a different clock for YarnAllocator. This is mainly used for testing.
*/
def setClock(newClock: Clock): Unit = {
clock = newClock
}
def getNumExecutorsRunning: Int = numExecutorsRunning.get()
def getNumExecutorsFailed: Int = synchronized {
val endTime = clock.getTimeMillis()
while (executorFailuresValidityInterval > 0
&& failedExecutorsTimeStamps.nonEmpty
&& failedExecutorsTimeStamps.head < endTime - executorFailuresValidityInterval) {
failedExecutorsTimeStamps.dequeue()
}
failedExecutorsTimeStamps.size
}
/**
* A sequence of pending container requests that have not yet been fulfilled.
*/
def getPendingAllocate: Seq[ContainerRequest] = getPendingAtLocation(ANY_HOST)
/**
* A sequence of pending container requests at the given location that have not yet been
* fulfilled.
*/
private def getPendingAtLocation(location: String): Seq[ContainerRequest] = {
amClient.getMatchingRequests(RM_REQUEST_PRIORITY, location, resource).asScala
.flatMap(_.asScala)
.toSeq
}
/**
* Request as many executors from the ResourceManager as needed to reach the desired total. If
* the requested total is smaller than the current number of running executors, no executors will
* be killed.
* @param requestedTotal total number of containers requested
* @param localityAwareTasks number of locality aware tasks to be used as container placement hint
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts to be used as
* container placement hint.
* @param nodeBlacklist a set of blacklisted nodes, which is passed in to avoid allocating new
* containers on them. It will be used to update the application master's
* blacklist.
* @return Whether the new requested total is different than the old value.
*/
def requestTotalExecutorsWithPreferredLocalities(
requestedTotal: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int],
nodeBlacklist: Set[String]): Boolean = synchronized {
this.numLocalityAwareTasks = localityAwareTasks
this.hostToLocalTaskCounts = hostToLocalTaskCount
if (requestedTotal != targetNumExecutors) {
logInfo(s"Driver requested a total number of $requestedTotal executor(s).")
targetNumExecutors = requestedTotal
// Update blacklist infomation to YARN ResouceManager for this application,
// in order to avoid allocating new Containers on the problematic nodes.
val blacklistAdditions = nodeBlacklist -- currentNodeBlacklist
val blacklistRemovals = currentNodeBlacklist -- nodeBlacklist
if (blacklistAdditions.nonEmpty) {
logInfo(s"adding nodes to YARN application master's blacklist: $blacklistAdditions")
}
if (blacklistRemovals.nonEmpty) {
logInfo(s"removing nodes from YARN application master's blacklist: $blacklistRemovals")
}
amClient.updateBlacklist(blacklistAdditions.toList.asJava, blacklistRemovals.toList.asJava)
currentNodeBlacklist = nodeBlacklist
true
} else {
false
}
}
/**
* Request that the ResourceManager release the container running the specified executor.
*/
def killExecutor(executorId: String): Unit = synchronized {
if (executorIdToContainer.contains(executorId)) {
val container = executorIdToContainer.get(executorId).get
internalReleaseContainer(container)
numExecutorsRunning.decrementAndGet()
} else {
logWarning(s"Attempted to kill unknown executor $executorId!")
}
}
/**
* Request resources such that, if YARN gives us all we ask for, we'll have a number of containers
* equal to maxExecutors.
*
* Deal with any containers YARN has granted to us by possibly launching executors in them.
*
* This must be synchronized because variables read in this method are mutated by other methods.
*/
def allocateResources(): Unit = synchronized {
updateResourceRequests()
val progressIndicator = 0.1f
// Poll the ResourceManager. This doubles as a heartbeat if there are no pending container
// requests.
val allocateResponse = amClient.allocate(progressIndicator)
val allocatedContainers = allocateResponse.getAllocatedContainers()
if (allocatedContainers.size > 0) {
logDebug(("Allocated containers: %d. Current executor count: %d. " +
"Launching executor count: %d. Cluster resources: %s.")
.format(
allocatedContainers.size,
numExecutorsRunning.get,
numExecutorsStarting.get,
allocateResponse.getAvailableResources))
handleAllocatedContainers(allocatedContainers.asScala)
}
val completedContainers = allocateResponse.getCompletedContainersStatuses()
if (completedContainers.size > 0) {
logDebug("Completed %d containers".format(completedContainers.size))
processCompletedContainers(completedContainers.asScala)
logDebug("Finished processing %d completed containers. Current running executor count: %d."
.format(completedContainers.size, numExecutorsRunning.get))
}
}
/**
* Update the set of container requests that we will sync with the RM based on the number of
* executors we have currently running and our target number of executors.
*
* Visible for testing.
*/
def updateResourceRequests(): Unit = {
val pendingAllocate = getPendingAllocate
val numPendingAllocate = pendingAllocate.size
val missing = targetNumExecutors - numPendingAllocate -
numExecutorsStarting.get - numExecutorsRunning.get
logDebug(s"Updating resource requests, target: $targetNumExecutors, " +
s"pending: $numPendingAllocate, running: ${numExecutorsRunning.get}, " +
s"executorsStarting: ${numExecutorsStarting.get}")
if (missing > 0) {
logInfo(s"Will request $missing executor container(s), each with " +
s"${resource.getVirtualCores} core(s) and " +
s"${resource.getMemory} MB memory (including $memoryOverhead MB of overhead)")
// Split the pending container request into three groups: locality matched list, locality
// unmatched list and non-locality list. Take the locality matched container request into
// consideration of container placement, treat as allocated containers.
// For locality unmatched and locality free container requests, cancel these container
// requests, since required locality preference has been changed, recalculating using
// container placement strategy.
val (localRequests, staleRequests, anyHostRequests) = splitPendingAllocationsByLocality(
hostToLocalTaskCounts, pendingAllocate)
// cancel "stale" requests for locations that are no longer needed
staleRequests.foreach { stale =>
amClient.removeContainerRequest(stale)
}
val cancelledContainers = staleRequests.size
if (cancelledContainers > 0) {
logInfo(s"Canceled $cancelledContainers container request(s) (locality no longer needed)")
}
// consider the number of new containers and cancelled stale containers available
val availableContainers = missing + cancelledContainers
// to maximize locality, include requests with no locality preference that can be cancelled
val potentialContainers = availableContainers + anyHostRequests.size
val containerLocalityPreferences = containerPlacementStrategy.localityOfRequestedContainers(
potentialContainers, numLocalityAwareTasks, hostToLocalTaskCounts,
allocatedHostToContainersMap, localRequests)
val newLocalityRequests = new mutable.ArrayBuffer[ContainerRequest]
containerLocalityPreferences.foreach {
case ContainerLocalityPreferences(nodes, racks) if nodes != null =>
newLocalityRequests += createContainerRequest(resource, nodes, racks)
case _ =>
}
if (availableContainers >= newLocalityRequests.size) {
// more containers are available than needed for locality, fill in requests for any host
for (i <- 0 until (availableContainers - newLocalityRequests.size)) {
newLocalityRequests += createContainerRequest(resource, null, null)
}
} else {
val numToCancel = newLocalityRequests.size - availableContainers
// cancel some requests without locality preferences to schedule more local containers
anyHostRequests.slice(0, numToCancel).foreach { nonLocal =>
amClient.removeContainerRequest(nonLocal)
}
if (numToCancel > 0) {
logInfo(s"Canceled $numToCancel unlocalized container requests to resubmit with locality")
}
}
newLocalityRequests.foreach { request =>
amClient.addContainerRequest(request)
}
if (log.isInfoEnabled()) {
val (localized, anyHost) = newLocalityRequests.partition(_.getNodes() != null)
if (anyHost.nonEmpty) {
logInfo(s"Submitted ${anyHost.size} unlocalized container requests.")
}
localized.foreach { request =>
logInfo(s"Submitted container request for host ${hostStr(request)}.")
}
}
} else if (numPendingAllocate > 0 && missing < 0) {
val numToCancel = math.min(numPendingAllocate, -missing)
logInfo(s"Canceling requests for $numToCancel executor container(s) to have a new desired " +
s"total $targetNumExecutors executors.")
val matchingRequests = amClient.getMatchingRequests(RM_REQUEST_PRIORITY, ANY_HOST, resource)
if (!matchingRequests.isEmpty) {
matchingRequests.iterator().next().asScala
.take(numToCancel).foreach(amClient.removeContainerRequest)
} else {
logWarning("Expected to find pending requests, but found none.")
}
}
}
private def hostStr(request: ContainerRequest): String = {
Option(request.getNodes) match {
case Some(nodes) => nodes.asScala.mkString(",")
case None => "Any"
}
}
/**
* Creates a container request, handling the reflection required to use YARN features that were
* added in recent versions.
*/
private def createContainerRequest(
resource: Resource,
nodes: Array[String],
racks: Array[String]): ContainerRequest = {
new ContainerRequest(resource, nodes, racks, RM_REQUEST_PRIORITY, true, labelExpression.orNull)
}
/**
* Handle containers granted by the RM by launching executors on them.
*
* Due to the way the YARN allocation protocol works, certain healthy race conditions can result
* in YARN granting containers that we no longer need. In this case, we release them.
*
* Visible for testing.
*/
def handleAllocatedContainers(allocatedContainers: Seq[Container]): Unit = {
val containersToUse = new ArrayBuffer[Container](allocatedContainers.size)
// Match incoming requests by host
val remainingAfterHostMatches = new ArrayBuffer[Container]
for (allocatedContainer <- allocatedContainers) {
matchContainerToRequest(allocatedContainer, allocatedContainer.getNodeId.getHost,
containersToUse, remainingAfterHostMatches)
}
// Match remaining by rack
val remainingAfterRackMatches = new ArrayBuffer[Container]
for (allocatedContainer <- remainingAfterHostMatches) {
val rack = resolver.resolve(conf, allocatedContainer.getNodeId.getHost)
matchContainerToRequest(allocatedContainer, rack, containersToUse,
remainingAfterRackMatches)
}
// Assign remaining that are neither node-local nor rack-local
val remainingAfterOffRackMatches = new ArrayBuffer[Container]
for (allocatedContainer <- remainingAfterRackMatches) {
matchContainerToRequest(allocatedContainer, ANY_HOST, containersToUse,
remainingAfterOffRackMatches)
}
if (!remainingAfterOffRackMatches.isEmpty) {
logDebug(s"Releasing ${remainingAfterOffRackMatches.size} unneeded containers that were " +
s"allocated to us")
for (container <- remainingAfterOffRackMatches) {
internalReleaseContainer(container)
}
}
runAllocatedContainers(containersToUse)
logInfo("Received %d containers from YARN, launching executors on %d of them."
.format(allocatedContainers.size, containersToUse.size))
}
/**
* Looks for requests for the given location that match the given container allocation. If it
* finds one, removes the request so that it won't be submitted again. Places the container into
* containersToUse or remaining.
*
* @param allocatedContainer container that was given to us by YARN
* @param location resource name, either a node, rack, or *
* @param containersToUse list of containers that will be used
* @param remaining list of containers that will not be used
*/
private def matchContainerToRequest(
allocatedContainer: Container,
location: String,
containersToUse: ArrayBuffer[Container],
remaining: ArrayBuffer[Container]): Unit = {
// SPARK-6050: certain Yarn configurations return a virtual core count that doesn't match the
// request; for example, capacity scheduler + DefaultResourceCalculator. So match on requested
// memory, but use the asked vcore count for matching, effectively disabling matching on vcore
// count.
val matchingResource = Resource.newInstance(allocatedContainer.getResource.getMemory,
resource.getVirtualCores)
val matchingRequests = amClient.getMatchingRequests(allocatedContainer.getPriority, location,
matchingResource)
// Match the allocation to a request
if (!matchingRequests.isEmpty) {
val containerRequest = matchingRequests.get(0).iterator.next
amClient.removeContainerRequest(containerRequest)
containersToUse += allocatedContainer
} else {
remaining += allocatedContainer
}
}
/**
* Launches executors in the allocated containers.
*/
private def runAllocatedContainers(containersToUse: ArrayBuffer[Container]): Unit = {
for (container <- containersToUse) {
executorIdCounter += 1
val executorHostname = container.getNodeId.getHost
val containerId = container.getId
val executorId = executorIdCounter.toString
assert(container.getResource.getMemory >= resource.getMemory)
logInfo(s"Launching container $containerId on host $executorHostname " +
s"for executor with ID $executorId")
def updateInternalState(): Unit = synchronized {
numExecutorsRunning.incrementAndGet()
numExecutorsStarting.decrementAndGet()
executorIdToContainer(executorId) = container
containerIdToExecutorId(container.getId) = executorId
val containerSet = allocatedHostToContainersMap.getOrElseUpdate(executorHostname,
new HashSet[ContainerId])
containerSet += containerId
allocatedContainerToHostMap.put(containerId, executorHostname)
}
if (numExecutorsRunning.get < targetNumExecutors) {
numExecutorsStarting.incrementAndGet()
if (launchContainers) {
launcherPool.execute(new Runnable {
override def run(): Unit = {
try {
new ExecutorRunnable(
Some(container),
conf,
sparkConf,
driverUrl,
executorId,
executorHostname,
executorMemory,
executorCores,
appAttemptId.getApplicationId.toString,
securityMgr,
localResources
).run()
updateInternalState()
} catch {
case e: Throwable =>
numExecutorsStarting.decrementAndGet()
if (NonFatal(e)) {
logError(s"Failed to launch executor $executorId on container $containerId", e)
// Assigned container should be released immediately
// to avoid unnecessary resource occupation.
amClient.releaseAssignedContainer(containerId)
} else {
throw e
}
}
}
})
} else {
// For test only
updateInternalState()
}
} else {
logInfo(("Skip launching executorRunnable as running executors count: %d " +
"reached target executors count: %d.").format(
numExecutorsRunning.get, targetNumExecutors))
}
}
}
// Visible for testing.
private[yarn] def processCompletedContainers(completedContainers: Seq[ContainerStatus]): Unit = {
for (completedContainer <- completedContainers) {
val containerId = completedContainer.getContainerId
val alreadyReleased = releasedContainers.remove(containerId)
val hostOpt = allocatedContainerToHostMap.get(containerId)
val onHostStr = hostOpt.map(host => s" on host: $host").getOrElse("")
val exitReason = if (!alreadyReleased) {
// Decrement the number of executors running. The next iteration of
// the ApplicationMaster's reporting thread will take care of allocating.
numExecutorsRunning.decrementAndGet()
logInfo("Completed container %s%s (state: %s, exit status: %s)".format(
containerId,
onHostStr,
completedContainer.getState,
completedContainer.getExitStatus))
// Hadoop 2.2.X added a ContainerExitStatus we should switch to use
// there are some exit status' we shouldn't necessarily count against us, but for
// now I think its ok as none of the containers are expected to exit.
val exitStatus = completedContainer.getExitStatus
val (exitCausedByApp, containerExitReason) = exitStatus match {
case ContainerExitStatus.SUCCESS =>
(false, s"Executor for container $containerId exited because of a YARN event (e.g., " +
"pre-emption) and not because of an error in the running job.")
case ContainerExitStatus.PREEMPTED =>
// Preemption is not the fault of the running tasks, since YARN preempts containers
// merely to do resource sharing, and tasks that fail due to preempted executors could
// just as easily finish on any other executor. See SPARK-8167.
(false, s"Container ${containerId}${onHostStr} was preempted.")
// Should probably still count memory exceeded exit codes towards task failures
case VMEM_EXCEEDED_EXIT_CODE =>
(true, memLimitExceededLogMessage(
completedContainer.getDiagnostics,
VMEM_EXCEEDED_PATTERN))
case PMEM_EXCEEDED_EXIT_CODE =>
(true, memLimitExceededLogMessage(
completedContainer.getDiagnostics,
PMEM_EXCEEDED_PATTERN))
case _ =>
// Enqueue the timestamp of failed executor
failedExecutorsTimeStamps.enqueue(clock.getTimeMillis())
(true, "Container marked as failed: " + containerId + onHostStr +
". Exit status: " + completedContainer.getExitStatus +
". Diagnostics: " + completedContainer.getDiagnostics)
}
if (exitCausedByApp) {
logWarning(containerExitReason)
} else {
logInfo(containerExitReason)
}
ExecutorExited(exitStatus, exitCausedByApp, containerExitReason)
} else {
// If we have already released this container, then it must mean
// that the driver has explicitly requested it to be killed
ExecutorExited(completedContainer.getExitStatus, exitCausedByApp = false,
s"Container $containerId exited from explicit termination request.")
}
for {
host <- hostOpt
containerSet <- allocatedHostToContainersMap.get(host)
} {
containerSet.remove(containerId)
if (containerSet.isEmpty) {
allocatedHostToContainersMap.remove(host)
} else {
allocatedHostToContainersMap.update(host, containerSet)
}
allocatedContainerToHostMap.remove(containerId)
}
containerIdToExecutorId.remove(containerId).foreach { eid =>
executorIdToContainer.remove(eid)
pendingLossReasonRequests.remove(eid) match {
case Some(pendingRequests) =>
// Notify application of executor loss reasons so it can decide whether it should abort
pendingRequests.foreach(_.reply(exitReason))
case None =>
// We cannot find executor for pending reasons. This is because completed container
// is processed before querying pending result. We should store it for later query.
// This is usually happened when explicitly killing a container, the result will be
// returned in one AM-RM communication. So query RPC will be later than this completed
// container process.
releasedExecutorLossReasons.put(eid, exitReason)
}
if (!alreadyReleased) {
// The executor could have gone away (like no route to host, node failure, etc)
// Notify backend about the failure of the executor
numUnexpectedContainerRelease += 1
driverRef.send(RemoveExecutor(eid, exitReason))
}
}
}
}
/**
* Register that some RpcCallContext has asked the AM why the executor was lost. Note that
* we can only find the loss reason to send back in the next call to allocateResources().
*/
private[yarn] def enqueueGetLossReasonRequest(
eid: String,
context: RpcCallContext): Unit = synchronized {
if (executorIdToContainer.contains(eid)) {
pendingLossReasonRequests
.getOrElseUpdate(eid, new ArrayBuffer[RpcCallContext]) += context
} else if (releasedExecutorLossReasons.contains(eid)) {
// Executor is already released explicitly before getting the loss reason, so directly send
// the pre-stored lost reason
context.reply(releasedExecutorLossReasons.remove(eid).get)
} else {
logWarning(s"Tried to get the loss reason for non-existent executor $eid")
context.sendFailure(
new SparkException(s"Fail to find loss reason for non-existent executor $eid"))
}
}
private def internalReleaseContainer(container: Container): Unit = {
releasedContainers.add(container.getId())
amClient.releaseAssignedContainer(container.getId())
}
private[yarn] def getNumUnexpectedContainerRelease = numUnexpectedContainerRelease
private[yarn] def getNumPendingLossReasonRequests: Int = synchronized {
pendingLossReasonRequests.size
}
/**
* Split the pending container requests into 3 groups based on current localities of pending
* tasks.
* @param hostToLocalTaskCount a map of preferred hostname to possible task counts to be used as
* container placement hint.
* @param pendingAllocations A sequence of pending allocation container request.
* @return A tuple of 3 sequences, first is a sequence of locality matched container
* requests, second is a sequence of locality unmatched container requests, and third is a
* sequence of locality free container requests.
*/
private def splitPendingAllocationsByLocality(
hostToLocalTaskCount: Map[String, Int],
pendingAllocations: Seq[ContainerRequest]
): (Seq[ContainerRequest], Seq[ContainerRequest], Seq[ContainerRequest]) = {
val localityMatched = ArrayBuffer[ContainerRequest]()
val localityUnMatched = ArrayBuffer[ContainerRequest]()
val localityFree = ArrayBuffer[ContainerRequest]()
val preferredHosts = hostToLocalTaskCount.keySet
pendingAllocations.foreach { cr =>
val nodes = cr.getNodes
if (nodes == null) {
localityFree += cr
} else if (nodes.asScala.toSet.intersect(preferredHosts).nonEmpty) {
localityMatched += cr
} else {
localityUnMatched += cr
}
}
(localityMatched.toSeq, localityUnMatched.toSeq, localityFree.toSeq)
}
}
private object YarnAllocator {
val MEM_REGEX = "[0-9.]+ [KMG]B"
val PMEM_EXCEEDED_PATTERN =
Pattern.compile(s"$MEM_REGEX of $MEM_REGEX physical memory used")
val VMEM_EXCEEDED_PATTERN =
Pattern.compile(s"$MEM_REGEX of $MEM_REGEX virtual memory used")
val VMEM_EXCEEDED_EXIT_CODE = -103
val PMEM_EXCEEDED_EXIT_CODE = -104
def memLimitExceededLogMessage(diagnostics: String, pattern: Pattern): String = {
val matcher = pattern.matcher(diagnostics)
val diag = if (matcher.find()) " " + matcher.group() + "." else ""
("Container killed by YARN for exceeding memory limits." + diag
+ " Consider boosting spark.yarn.executor.memoryOverhead.")
}
}
| ron8hu/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala | Scala | apache-2.0 | 32,184 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import joptsimple.OptionParser
import kafka.utils.{Utils, CommandLineUtils, Logging}
import kafka.producer.{KeyedMessage, ProducerConfig, Producer}
import scala.collection.JavaConversions._
import java.util.concurrent.CountDownLatch
import java.nio.ByteBuffer
import kafka.consumer._
import kafka.serializer._
import collection.mutable.ListBuffer
import kafka.tools.KafkaMigrationTool.{ProducerThread, ProducerDataChannel}
import kafka.javaapi
object MirrorMaker extends Logging {
private var connectors: Seq[ZookeeperConsumerConnector] = null
private var consumerThreads: Seq[MirrorMakerThread] = null
private var producerThreads: ListBuffer[ProducerThread] = null
def main(args: Array[String]) {
info ("Starting mirror maker")
val parser = new OptionParser
val consumerConfigOpt = parser.accepts("consumer.config",
"Consumer config to consume from a source cluster. " +
"You may specify multiple of these.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val producerConfigOpt = parser.accepts("producer.config",
"Embedded producer config.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val numProducersOpt = parser.accepts("num.producers",
"Number of producer instances")
.withRequiredArg()
.describedAs("Number of producers")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val numStreamsOpt = parser.accepts("num.streams",
"Number of consumption streams.")
.withRequiredArg()
.describedAs("Number of threads")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val bufferSizeOpt = parser.accepts("queue.size", "Number of messages that are buffered between the consumer and producer")
.withRequiredArg()
.describedAs("Queue size in terms of number of messages")
.ofType(classOf[java.lang.Integer])
.defaultsTo(10000);
val whitelistOpt = parser.accepts("whitelist",
"Whitelist of topics to mirror.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val blacklistOpt = parser.accepts("blacklist",
"Blacklist of topics to mirror.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val helpOpt = parser.accepts("help", "Print this message.")
val options = parser.parse(args : _*)
if (options.has(helpOpt)) {
parser.printHelpOn(System.out)
System.exit(0)
}
CommandLineUtils.checkRequiredArgs(parser, options, consumerConfigOpt, producerConfigOpt)
if (List(whitelistOpt, blacklistOpt).count(options.has) != 1) {
println("Exactly one of whitelist or blacklist is required.")
System.exit(1)
}
val numStreams = options.valueOf(numStreamsOpt)
val bufferSize = options.valueOf(bufferSizeOpt).intValue()
val producers = (1 to options.valueOf(numProducersOpt).intValue()).map(_ => {
val props = Utils.loadProps(options.valueOf(producerConfigOpt))
val config = props.getProperty("partitioner.class") match {
case null =>
new ProducerConfig(props) {
override val partitionerClass = "kafka.producer.ByteArrayPartitioner"
}
case pClass : String =>
new ProducerConfig(props)
}
new Producer[Array[Byte], Array[Byte]](config)
})
connectors = options.valuesOf(consumerConfigOpt).toList
.map(cfg => new ConsumerConfig(Utils.loadProps(cfg.toString)))
.map(new ZookeeperConsumerConnector(_))
val filterSpec = if (options.has(whitelistOpt))
new Whitelist(options.valueOf(whitelistOpt))
else
new Blacklist(options.valueOf(blacklistOpt))
var streams: Seq[KafkaStream[Array[Byte], Array[Byte]]] = Nil
try {
streams = connectors.map(_.createMessageStreamsByFilter(filterSpec, numStreams.intValue(), new DefaultDecoder(), new DefaultDecoder())).flatten
} catch {
case t: Throwable =>
fatal("Unable to create stream - shutting down mirror maker.")
connectors.foreach(_.shutdown)
}
val producerDataChannel = new ProducerDataChannel[KeyedMessage[Array[Byte], Array[Byte]]](bufferSize);
consumerThreads = streams.zipWithIndex.map(streamAndIndex => new MirrorMakerThread(streamAndIndex._1, producerDataChannel, producers, streamAndIndex._2))
producerThreads = new ListBuffer[ProducerThread]()
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
cleanShutdown()
}
})
// create producer threads
var i: Int = 1
for(producer <- producers) {
val producerThread: KafkaMigrationTool.ProducerThread = new KafkaMigrationTool.ProducerThread(producerDataChannel,
new javaapi.producer.Producer[Array[Byte], Array[Byte]](producer), i)
producerThreads += producerThread
i += 1
}
consumerThreads.foreach(_.start)
producerThreads.foreach(_.start)
// in case the consumer threads hit a timeout/other exception
consumerThreads.foreach(_.awaitShutdown)
cleanShutdown()
}
def cleanShutdown() {
if (connectors != null) connectors.foreach(_.shutdown)
if (consumerThreads != null) consumerThreads.foreach(_.awaitShutdown)
if (producerThreads != null) {
producerThreads.foreach(_.shutdown)
producerThreads.foreach(_.awaitShutdown)
}
info("Kafka mirror maker shutdown successfully")
}
class MirrorMakerThread(stream: KafkaStream[Array[Byte], Array[Byte]],
producerDataChannel: ProducerDataChannel[KeyedMessage[Array[Byte], Array[Byte]]],
producers: Seq[Producer[Array[Byte], Array[Byte]]],
threadId: Int)
extends Thread with Logging {
private val shutdownLatch = new CountDownLatch(1)
private val threadName = "mirrormaker-" + threadId
this.logIdent = "[%s] ".format(threadName)
this.setName(threadName)
override def run() {
info("Starting mirror maker thread " + threadName)
try {
for (msgAndMetadata <- stream) {
// If the key of the message is empty, put it into the universal channel
// Otherwise use a pre-assigned producer to send the message
if (msgAndMetadata.key == null) {
trace("Send the non-keyed message the producer channel.")
val pd = new KeyedMessage[Array[Byte], Array[Byte]](msgAndMetadata.topic, msgAndMetadata.message)
producerDataChannel.sendRequest(pd)
} else {
val producerId = Utils.abs(java.util.Arrays.hashCode(msgAndMetadata.key)) % producers.size()
trace("Send message with key %s to producer %d.".format(java.util.Arrays.toString(msgAndMetadata.key), producerId))
val producer = producers(producerId)
val pd = new KeyedMessage[Array[Byte], Array[Byte]](msgAndMetadata.topic, msgAndMetadata.key, msgAndMetadata.message)
producer.send(pd)
}
}
} catch {
case e: Throwable =>
fatal("Stream unexpectedly exited.", e)
} finally {
shutdownLatch.countDown()
info("Stopped thread.")
}
}
def awaitShutdown() {
try {
shutdownLatch.await()
} catch {
case e: InterruptedException => fatal("Shutdown of thread %s interrupted. This might leak data!".format(threadName))
}
}
}
}
| unix1986/universe | tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/tools/MirrorMaker.scala | Scala | bsd-2-clause | 8,359 |
package com.twitter.finagle.exp.mysql
import com.twitter.finagle.dispatch.GenSerialClientDispatcher
import com.twitter.finagle.exp.mysql.transport.{Buffer, BufferReader, Packet}
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.{CancelledRequestException, Service, SimpleFilter, WriteException}
import com.twitter.util.{Closable, Future, Promise, Return, Time, Try, Throw}
import java.util.ArrayDeque
import scala.collection.JavaConverters._
case class ServerError(code: Short, sqlState: String, message: String)
extends Exception(message)
case class LostSyncException(underlying: Throwable)
extends RuntimeException(underlying) {
override def getMessage = underlying.getMessage
override def getStackTrace = underlying.getStackTrace
}
/**
* Caches statements that have been successfully prepared over the connection
* managed by the underlying service (a ClientDispatcher). This decreases
* the chances of leaking prepared statements and can simplify the
* implementation of prepared statements in the presence of a connection pool.
* The cache is capped at `max` and eldest elements are evicted.
*/
private[mysql] class PrepareCache(max: Int = 20) extends SimpleFilter[Request, Result] {
// TODO: consider using a more suitable cache as an lru.
private[this] val lru = new ArrayDeque[(String, Future[Result])]()
private[this] val iter = lru.asScala
/**
* Populate cache with unique prepare requests identified by their
* sql queries. Note, access to `lru` is synchronized, however, because
* the finagle default pool guarantees us exlusive access to a service
* per dispatch, there should not be contention here.
*/
def apply(req: Request, svc: Service[Request, Result]) = {
// remove the eldest entry in the lru and
// dispatch a CloseRequest for the corresponding
// entry over `svc`.
def removeEldest() = synchronized {
if (lru.size > max) lru.peekLast match {
case null => // ignore
case eldest =>
lru.remove(eldest)
val (_, ok) = eldest
ok onSuccess {
case r: PrepareOK => svc(CloseRequest(r.id))
case _ => // ignore
}
}
}
req match {
case PrepareRequest(sql) => synchronized {
iter.find(_._1 == sql) match {
// maintain access order.
case Some(entry) =>
lru.remove(entry)
lru.push(entry)
entry._2
// dispatch prepare request and
// populate cache preemptively.
case None =>
val ok = svc(req)
val entry = (sql, ok)
lru.push(entry)
ok respond {
case Throw(_) => synchronized { lru.remove(entry) }
case Return(_) => removeEldest()
}
}
}
case _ => svc(req)
}
}
}
object ClientDispatcher {
private val cancelledRequestExc = new CancelledRequestException
private val lostSyncExc = new LostSyncException(new Throwable)
private val emptyTx = (Nil, EOF(0: Short, 0: Short))
private val wrapWriteException: PartialFunction[Throwable, Future[Nothing]] = {
case exc: Throwable => Future.exception(WriteException(exc))
}
/**
* Creates a mysql client dispatcher with write-through caches for optimization.
* @param trans A transport that reads a writes logical mysql packets.
* @param handshake A function that is responsible for facilitating
* the connection phase given a HandshakeInit.
*/
def apply(
trans: Transport[Packet, Packet],
handshake: HandshakeInit => Try[HandshakeResponse]
): Service[Request, Result] = {
new PrepareCache andThen new ClientDispatcher(trans, handshake)
}
/**
* Wrap a Try[T] into a Future[T]. This is useful for
* transforming decoded results into futures. Any Throw
* is assumed to be a failure to decode and thus a synchronization
* error (or corrupt data) between the client and server.
*/
private def const[T](result: Try[T]): Future[T] =
Future.const(result rescue { case exc => Throw(LostSyncException(exc)) })
}
/**
* A ClientDispatcher that implements the mysql client/server protocol.
* For a detailed exposition of the client/server protocol refer to:
* [[http://dev.mysql.com/doc/internals/en/client-server-protocol.html]]
*
* Note, the mysql protocol does not support any form of multiplexing so
* requests are dispatched serially and concurrent requests are queued.
*/
class ClientDispatcher(
trans: Transport[Packet, Packet],
handshake: HandshakeInit => Try[HandshakeResponse]
) extends GenSerialClientDispatcher[Request, Result, Packet, Packet](trans) {
import ClientDispatcher._
override def apply(req: Request): Future[Result] =
connPhase flatMap { _ =>
super.apply(req)
} onFailure {
// a LostSyncException represents a fatal state between
// the client / server. The error is unrecoverable
// so we close the service.
case e@LostSyncException(_) => close()
case _ =>
}
/**
* Performs the connection phase. The phase should only be performed
* once before any other exchange between the client/server. A failure
* to handshake renders this service unusable.
* [[http://dev.mysql.com/doc/internals/en/connection-phase.html]]
*/
private[this] val connPhase: Future[Result] =
trans.read() flatMap { packet =>
const(HandshakeInit(packet)) flatMap { init =>
const(handshake(init)) flatMap { req =>
val rep = new Promise[Result]
dispatch(req, rep)
rep
}
}
} onFailure { _ => close() }
/**
* Returns a Future that represents the result of an exchange
* between the client and server. An exchange does not necessarily entail
* a single write and read operation. Thus, the result promise
* is decoupled from the promise that signals a complete exchange.
* This leaves room for implementing streaming results.
*/
protected def dispatch(req: Request, rep: Promise[Result]): Future[Unit] =
trans.write(req.toPacket) rescue {
wrapWriteException
} before {
val signal = new Promise[Unit]
if (req.cmd == Command.COM_STMT_CLOSE) {
// synthesize COM_STMT_CLOSE response
signal.setDone()
rep.updateIfEmpty(Return(CloseStatementOK))
signal
} else trans.read() flatMap { packet =>
rep.become(decodePacket(packet, req.cmd, signal))
signal
}
}
/**
* Returns a Future[Result] representing the decoded
* packet. Some packets represent the start of a longer
* transmission. These packets are distinguished by
* the command used to generate the transmission.
*
* @param packet The first packet in the result.
* @param cmd The command byte used to generate the packet.
* @param signal A future used to signal completion. When this
* future is satisfied, subsequent requests can be dispatched.
*/
private[this] def decodePacket(
packet: Packet,
cmd: Byte,
signal: Promise[Unit]
): Future[Result] = packet.body.headOption match {
case Some(Packet.OkByte) if cmd == Command.COM_STMT_PREPARE =>
// decode PrepareOk Result: A header packet potentially followed
// by two transmissions that contain parameter and column
// information, respectively.
val result = for {
ok <- const(PrepareOK(packet))
(seq1, _) <- readTx(ok.numOfParams)
(seq2, _) <- readTx(ok.numOfCols)
ps <- Future.collect(seq1 map { p => const(Field(p)) })
cs <- Future.collect(seq2 map { p => const(Field(p)) })
} yield ok.copy(params = ps, columns = cs)
result ensure signal.setDone()
// decode OK Result
case Some(Packet.OkByte) =>
signal.setDone()
const(OK(packet))
// decode Error result
case Some(Packet.ErrorByte) =>
signal.setDone()
const(Error(packet)) flatMap { err =>
val Error(code, state, msg) = err
Future.exception(ServerError(code, state, msg))
}
// decode ResultSet
case Some(byte) =>
val isBinaryEncoded = cmd != Command.COM_QUERY
val numCols = Try {
val br = BufferReader(packet.body)
br.readLengthCodedBinary().toInt
}
val result = for {
cnt <- const(numCols)
(fields, _) <- readTx(cnt)
(rows, _) <- readTx()
res <- const(ResultSet(isBinaryEncoded)(packet, fields, rows))
} yield res
// TODO: When streaming is implemented the
// done signal should dependent on the
// completion of the stream.
result ensure signal.setDone()
case _ =>
signal.setDone()
Future.exception(lostSyncExc)
}
/**
* Reads a transmission from the transport that is terminated by
* an EOF packet.
*
* TODO: This result should be streaming via some construct
* that allows the consumer to exert backpressure.
*
* @param limit An upper bound on the number of reads. If the
* number of reads exceeds the limit before an EOF packet is reached
* a Future encoded LostSyncException is returned.
*/
private[this] def readTx(limit: Int = Int.MaxValue): Future[(Seq[Packet], EOF)] = {
def aux(numRead: Int, xs: List[Packet]): Future[(List[Packet], EOF)] = {
if (numRead > limit) Future.exception(lostSyncExc)
else trans.read() flatMap { packet =>
packet.body.headOption match {
case Some(Packet.EofByte) =>
const(EOF(packet)) map { eof =>
(xs.reverse, eof)
}
case Some(Packet.ErrorByte) =>
const(Error(packet)) flatMap { err =>
val Error(code, state, msg) = err
Future.exception(ServerError(code, state, msg))
}
case Some(_) => aux(numRead + 1, packet :: xs)
case None => Future.exception(lostSyncExc)
}
}
}
if (limit <= 0) Future.value(emptyTx)
else aux(0, Nil)
}
} | JustinTulloss/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/ClientDispatcher.scala | Scala | apache-2.0 | 10,019 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bobby.output
import uk.gov.hmrc.bobby.domain.{Message, MessageLevels}
trait TextWriter extends BobbyWriter {
def buildModel(messages: List[Message], viewType: ViewType): List[Seq[fansi.Str]] = viewType match {
case Nested => {
val transitiveMessages = messages.filterNot(_.isLocal).groupBy(m => m.dependencyChain.lastOption)
.collect {
case (Some(k), v) => k -> v
}
val groupedMessages = messages
.filter(_.isLocal).flatMap { m =>
List(m) ++ transitiveMessages.getOrElse(m.checked.moduleID, List.empty)
}
groupedMessages.map(viewType.renderMessage)
}
case _ => {
messages
.sortBy(_.moduleName)
.sortWith((a, b) => MessageLevels.compare(a.level, b.level))
.map(viewType.renderMessage)
}
}
}
| hmrc/sbt-bobby | src/main/scala/uk/gov/hmrc/bobby/output/TextWriter.scala | Scala | apache-2.0 | 1,437 |
package a64.加法
import a64.基础._
class PlusTypeContext1[T] extends TypeContext {
override type toDataType = Number[T]
override type Parameter = Unit
override type Result = T
}
class PlusContext1[T] extends Context[PlusTypeContext1[T], T] {
override type DataCtx = (Number[T], Number[T])
override def convert(t: Number[T], current: Number[T]): (Number[T], Number[T]) = (t, current)
override def bindS(number: (Number[T], Number[T]), parameter: Unit, head: T): Collect[T] =
CollectS(number._2.execute(this)((), number._1), head)
override def bindT(number: (Number[T], Number[T]), parameter: Unit): Collect[T] = number._1.execute(new PlusContext2)((), ())
}
class PlusTypeContext2[T] extends TypeContext {
override type toDataType = Unit
override type Parameter = Unit
override type Result = T
}
class PlusContext2[T] extends Context[PlusTypeContext2[T], T] {
override type DataCtx = Number[T]
override def convert(t: Unit, current: Number[T]): Number[T] = current
override def bindS(number: Number[T], parameter: Unit, head: T): Collect[T] =
CollectS(number.execute(this)((), ()), head)
override def bindT(number: Number[T], parameter: Unit): Collect[T] = CollectT()
}
| djx314/ubw | a64-模仿四法/src/main/scala/a64/加法/Counter.scala | Scala | bsd-3-clause | 1,224 |
/** Improving: An unconstrained collection of scala code.
* Copyright 2005-2011 Paul Phillips
*
* Distributed under the "Simplified BSD License" in LICENSE.txt.
*/
package improving
import scala.collection.{ mutable, immutable }
import scala.tools.nsc.io.{ Directory, File, Path }
import java.io.{ ObjectInputStream, ObjectOutputStream }
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
class DiskBackedConcurrentMap[A, B](val mapFile: File) extends mutable.ConcurrentMap[A, B] {
private def load() = {
if (!mapFile.exists) new ConcurrentHashMap[A, B]
else {
val in = new ObjectInputStream(mapFile.inputStream())
val map = in.readObject();
in.close()
map.asInstanceOf[ConcurrentHashMap[A, B]]
}
}
private val _map: ConcurrentHashMap[A, B] = load()
// Map
def get(key: A): Option[B] = Option(_map.get(key))
def iterator: Iterator[(A, B)] = _map.entrySet().iterator map (x => (x.getKey, x.getValue))
def +=(kv: (A, B)): this.type = { _map.put(kv._1, kv._2); this }
def -=(key: A): this.type = { _map.remove(key); this }
// mutable.ConcurrentMap
def putIfAbsent(k: A, v: B): Option[B] = Option(_map.putIfAbsent(k, v))
def remove(k: A, v: B): Boolean = _map.remove(k, v)
def replace(k: A, oldvalue: B, newvalue: B): Boolean = _map.replace(k, oldvalue, newvalue)
def replace(k: A, v: B): Option[B] = Option( _map.replace(k, v))
def store() = {
val fstream = mapFile.bufferedOutput()
val out = new ObjectOutputStream(fstream)
out.writeObject(_map)
out.close()
fstream.close()
}
} | adriaanm/scala-improving | src/main/scala/DiskBackedConcurrentMap.scala | Scala | bsd-3-clause | 1,623 |
/***********************************************************************
* Copyright (c) 2017-2018 IBM
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.cassandra.data
import com.datastax.driver.core.{Row, Statement}
import org.locationtech.geomesa.cassandra.utils.CassandraBatchScan
import org.locationtech.geomesa.cassandra.{CassandraFilterStrategyType, CassandraQueryPlanType}
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.collection.{CloseableIterator, SelfClosingIterator}
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
sealed trait CassandraQueryPlan extends CassandraQueryPlanType {
def filter: CassandraFilterStrategyType
def table: String
def ranges: Seq[Statement]
def numThreads: Int
def clientSideFilter: Option[Filter]
override def explain(explainer: Explainer, prefix: String): Unit =
CassandraQueryPlan.explain(this, explainer, prefix)
}
object CassandraQueryPlan {
def explain(plan: CassandraQueryPlan, explainer: Explainer, prefix: String): Unit = {
import org.locationtech.geomesa.filter.filterToString
explainer.pushLevel(s"${prefix}Plan: ${plan.getClass.getName}")
explainer(s"Table: ${Option(plan.table).orNull}")
explainer(s"Ranges (${plan.ranges.size}): ${plan.ranges.take(5).map(_.toString).mkString(", ")}")
explainer(s"Client-side filter: ${plan.clientSideFilter.map(filterToString).getOrElse("None")}")
explainer.popLevel()
}
}
// plan that will not actually scan anything
case class EmptyPlan(filter: CassandraFilterStrategyType) extends CassandraQueryPlan {
override val table: String = ""
override val ranges: Seq[Statement] = Seq.empty
override val numThreads: Int = 0
override val clientSideFilter: Option[Filter] = None
override def scan(ds: CassandraDataStore): CloseableIterator[SimpleFeature] = CloseableIterator.empty
}
case class QueryPlan(filter: CassandraFilterStrategyType,
table: String,
ranges: Seq[Statement],
numThreads: Int,
// note: filter is applied in entriesToFeatures, this is just for explain logging
clientSideFilter: Option[Filter],
entriesToFeatures: Iterator[Row] => Iterator[SimpleFeature]) extends CassandraQueryPlan {
override val hasDuplicates: Boolean = false
override def scan(ds: CassandraDataStore): CloseableIterator[SimpleFeature] = {
val results = new CassandraBatchScan(ds.session, ranges, numThreads, 100000)
SelfClosingIterator(entriesToFeatures(results), results.close)
}
}
| boundlessgeo/geomesa | geomesa-cassandra/geomesa-cassandra-datastore/src/main/scala/org/locationtech/geomesa/cassandra/data/CassandraQueryPlan.scala | Scala | apache-2.0 | 2,998 |
package org.sireum.pilarform.lexer
import org.sireum.pilarform.lexer.Tokens._
import scala.collection.mutable.ListBuffer
class WhitespaceAndCommentsGrouper(lexer: PilarLexer) extends Iterator[Token] {
private var nextToken = lexer.next()
private var ended = false
private var hiddenTokens: HiddenTokens = _
def getHiddenTokens = hiddenTokens
def hasNext = !ended
private[lexer] def text = lexer.text
def next() = {
require(hasNext)
hiddenTokens = readHiddenTokens()
val resultToken = nextToken
resultToken.associatedWhitespaceAndComments_ = hiddenTokens
if (nextToken.tokenType == EOF)
ended = true
nextToken = lexer.next()
resultToken
}
private def readHiddenTokens(): HiddenTokens = {
val hiddenTokens = new ListBuffer[HiddenToken]
while (isCommentOrWhitespace(nextToken)) {
hiddenTokens += makeHiddenToken(nextToken)
nextToken = lexer.next()
}
new HiddenTokens(hiddenTokens.toList)
}
private def isCommentOrWhitespace(token: Token) = token.tokenType match {
case WS | LINE_COMMENT | MULTILINE_COMMENT ⇒ true
case _ ⇒ false
}
private def makeHiddenToken(token: Token) = token.tokenType match {
case LINE_COMMENT ⇒ SingleLineComment(token)
case MULTILINE_COMMENT ⇒ MultiLineComment(token)
case WS ⇒ Whitespace(token)
}
}
| fgwei/pilarform | pilarform/src/main/scala/org/sireum/pilarform/lexer/WhitespaceAndCommentsGrouper.scala | Scala | epl-1.0 | 1,391 |
package scala.meta.internal.semanticdb
import scala.{meta => m}
import java.io.PrintWriter
import java.io.StringWriter
import java.io.Writer
import scala.collection.mutable
import scala.reflect.internal.ModifierFlags._
trait PrinterOps { self: DatabaseOps =>
import g._
def showSynthetic(tpe: g.Type): AttributedSynthetic = tpe match {
case g.TypeBounds(_, _) =>
// Skip signature for abstract type members, e.g. type T <: Int
AttributedSynthetic.empty
case PolyType(tparams, tb: TypeBounds) =>
// Type lambda with no body
AttributedSynthetic.empty
case _ => showSynthetic(g.TypeTree(tpe))
}
def showSynthetic(what: g.Tree): AttributedSynthetic = {
val out = new StringWriter()
val printer = SyntheticCodePrinter(out)
printer.print(what)
val names = printer.names.map {
case ((start, end), symbol) => SyntheticRange(start, end, symbol)
}.toList
printer.names.clear()
val syntax = out.toString
AttributedSynthetic(syntax, names)
}
private object SyntheticCodePrinter {
def apply(writer: Writer) = new SyntheticCodePrinter(new LengthWriter(writer, 0))
}
// An adaptation of g.CodePrinter that emits positioned symbols for names inside synthetics.
// The modifications have been wrapped in "+- scalac deviation" comments.
// In addition, the original source has been reformatted for better readability.
private class SyntheticCodePrinter(out: LengthWriter) extends TreePrinter(new PrintWriter(out)) {
// + scalac deviation
case class ResolvedName(syntax: String, symbol: m.Symbol)
object ResolvedName {
def apply(sym: g.Symbol): ResolvedName =
ResolvedName(printedName(sym.name), sym.toSemantic)
}
val names = mutable.HashMap.empty[(Int, Int), m.Symbol]
def printWithTrailingSpace(string: String): Unit =
if (string.isEmpty) ()
else {
this.print(string)
this.print(" ")
}
override def print(args: Any*): Unit = args.foreach {
case ResolvedName(syntax, symbol) =>
val start = out.length
super.print(syntax)
val end = out.length
if (symbol != m.Symbol.None) {
names(start -> end) = symbol
}
case els => super.print(els)
}
// - scalac deviation
// The original code printer was copied from the 2.11 sources and used mutable.Stack
// which was deprecated in 2.12. I refactored the code to use a var + List instead.
// + scalac deviation
protected var parentsStack: List[g.Tree] = Nil
// - scalac deviation
private val printRootPkg = false
protected def currentTree: Option[g.Tree] =
parentsStack.headOption
protected def currentParent: Option[g.Tree] =
if (parentsStack.length > 1) Some(parentsStack(1)) else None
protected def printedName(name: Name, decoded: Boolean = true): String = {
import scala.reflect.internal.Chars._
val decName = name.decoded
val bslash = '\\\\'
val isDot = (x: Char) => x == '.'
val brackets = List('[', ']', '(', ')', '{', '}')
def addBackquotes(s: String) =
if (decoded &&
(decName.exists(ch =>
brackets.contains(ch) ||
isWhitespace(ch) ||
isDot(ch)) ||
(name.isOperatorName &&
decName.exists(isOperatorPart) &&
decName.exists(isScalaLetter) &&
!decName.contains(bslash))))
s"`$s`"
else s
if (name == nme.CONSTRUCTOR) "this"
else addBackquotes(quotedName(name, decoded))
}
protected def isIntLitWithDecodedOp(qual: Tree, name: Name): Boolean = {
val qualIsIntLit = qual match {
case Literal(Constant(x: Int)) => true
case _ => false
}
qualIsIntLit && name.isOperatorName
}
override protected val commentsRequired = true
protected def needsParentheses(parent: Tree)(
insideIf: Boolean = true,
insideMatch: Boolean = true,
insideTry: Boolean = true,
insideAnnotated: Boolean = true,
insideBlock: Boolean = true,
insideLabelDef: Boolean = true,
insideAssign: Boolean = true): Boolean = {
parent match {
case _: If => insideIf
case _: Match => insideMatch
case _: Try => insideTry
case _: Annotated => insideAnnotated
case _: Block => insideBlock
case _: LabelDef => insideLabelDef
case _: Assign => insideAssign
case _ => false
}
}
protected def checkForBlank(cond: Boolean): String = if (cond) " " else ""
protected def blankForOperatorName(name: Name): String = checkForBlank(name.isOperatorName)
protected def blankForName(name: Name): String =
checkForBlank(name.isOperatorName || name.endsWith("_"))
def render(
what: Any,
printTypes: BooleanFlag = None,
printIds: BooleanFlag = None,
printOwners: BooleanFlag = None,
printKinds: BooleanFlag = None,
printMirrors: BooleanFlag = None,
printPositions: BooleanFlag = None): String = {
val buffer = new StringWriter()
val writer = new LengthWriter(buffer, out.length)
val printer = new SyntheticCodePrinter(writer)
printTypes.value.map(printTypes =>
if (printTypes) printer.withTypes else printer.withoutTypes)
printIds.value.map(printIds => if (printIds) printer.withIds else printer.withoutIds)
printOwners.value.map(printOwners =>
if (printOwners) printer.withOwners else printer.withoutOwners)
printKinds.value.map(printKinds =>
if (printKinds) printer.withKinds else printer.withoutKinds)
printMirrors.value.map(printMirrors =>
if (printMirrors) printer.withMirrors else printer.withoutMirrors)
printPositions.value.map(printPositions =>
if (printPositions) printer.withPositions else printer.withoutPositions)
printer.print(what)
writer.flush()
buffer.toString
}
protected def resolveSelect(t: Tree): String = {
t match {
// case for: 1) (if (a) b else c).meth1.meth2 or 2) 1 + 5 should be represented as (1).+(5)
case Select(qual, name)
if (name.isTermName &&
needsParentheses(qual)(insideLabelDef = false)) ||
isIntLitWithDecodedOp(qual, name) =>
s"(${resolveSelect(qual)}).${printedName(name)}"
case Select(qual, name) if name.isTermName =>
s"${resolveSelect(qual)}.${printedName(name)}"
case Select(qual, name) if name.isTypeName =>
s"${resolveSelect(qual)}#${blankForOperatorName(name)}%${printedName(name)}"
case Ident(name) => printedName(name)
case _ => render(t)
}
}
object EmptyTypeTree {
def unapply(tt: TypeTree): Boolean = tt match {
case build.SyntacticEmptyTypeTree() if tt.wasEmpty || tt.isEmpty => true
case _ => false
}
}
protected def isEmptyTree(tree: Tree) =
tree match {
case EmptyTree | EmptyTypeTree() => true
case _ => false
}
protected def originalTypeTrees(trees: List[Tree]) =
trees.filter(!isEmptyTree(_)).map {
case tt: TypeTree if tt.original != null => tt.original
case tree => tree
}
val defaultClasses = List(tpnme.AnyRef, tpnme.Object)
val defaultTraitsForCase = List(tpnme.Product, tpnme.Serializable)
protected def removeDefaultTypesFromList(trees: List[Tree])(
classesToRemove: List[Name] = defaultClasses)(traitsToRemove: List[Name]) = {
def removeDefaultTraitsFromList(trees: List[Tree], traitsToRemove: List[Name]): List[Tree] =
trees match {
case Nil => trees
case init :+ last =>
last match {
case Select(Ident(sc), name) if traitsToRemove.contains(name) && sc == nme.scala_ =>
removeDefaultTraitsFromList(init, traitsToRemove)
case _ => trees
}
}
removeDefaultTraitsFromList(
removeDefaultClassesFromList(trees, classesToRemove),
traitsToRemove)
}
protected def removeDefaultClassesFromList(
trees: List[Tree],
classesToRemove: List[Name] = defaultClasses) =
originalTypeTrees(trees).filter {
case Select(Ident(sc), name) => !(classesToRemove.contains(name) && sc == nme.scala_)
case tt: TypeTree if tt.tpe != null =>
!(classesToRemove contains (newTypeName(tt.tpe.toString())))
case _ => true
}
protected def syntheticToRemove(tree: Tree) =
tree match {
case _: ValDef | _: TypeDef => false // don't remove ValDef and TypeDef
case md: MemberDef if md.mods.isSynthetic => true
case _ => false
}
override def printOpt(prefix: String, tree: Tree) =
if (!isEmptyTree(tree)) super.printOpt(prefix, tree)
override def printColumn(ts: List[Tree], start: String, sep: String, end: String) = {
super.printColumn(ts.filter(!syntheticToRemove(_)), start, sep, end)
}
def printFlags(mods: Modifiers, primaryCtorParam: Boolean = false): Unit = {
val base = AccessFlags | OVERRIDE | ABSTRACT | FINAL | SEALED | LAZY
val mask = if (primaryCtorParam) base else base | IMPLICIT
val s = mods.flagString(mask)
if (s != "") print(s"$s ")
// case flag should be the last
if (mods.isCase) print(mods.flagBitsToString(CASE) + " ")
if (mods.isAbstractOverride) print("abstract override ")
}
override def printModifiers(tree: Tree, mods: Modifiers): Unit =
printModifiers(mods, primaryCtorParam = false)
def printModifiers(mods: Modifiers, primaryCtorParam: Boolean): Unit = {
def modsAccepted =
List(currentTree, currentParent).exists(_.exists({
case _: ClassDef | _: ModuleDef | _: Template | _: PackageDef => true
case _ => false
}))
if (currentParent.isEmpty || modsAccepted)
printFlags(mods, primaryCtorParam)
else
List(IMPLICIT, CASE, LAZY, SEALED).foreach { flag =>
if (mods.hasFlag(flag)) print(s"${mods.flagBitsToString(flag)} ")
}
}
def printParam(tree: Tree, primaryCtorParam: Boolean): Unit =
tree match {
case vd @ ValDef(mods, name, tp, rhs) =>
printPosition(tree)
printAnnotations(vd)
val mutableOrOverride = mods.isOverride || mods.isMutable
val hideCtorMods = mods.isParamAccessor && mods.isPrivateLocal && !mutableOrOverride
val hideCaseCtorMods = mods.isCaseAccessor && mods.isPublic && !mutableOrOverride
if (primaryCtorParam && !(hideCtorMods || hideCaseCtorMods)) {
printModifiers(mods, primaryCtorParam)
print(if (mods.isMutable) "var " else "val ")
}
print(printedName(name), blankForName(name))
printOpt(": ", tp)
printOpt(" = ", rhs)
case TypeDef(_, name, tparams, rhs) =>
printPosition(tree)
print(printedName(name))
printTypeParams(tparams)
print(rhs)
case _ =>
super.printParam(tree)
}
override def printParam(tree: Tree): Unit = {
printParam(tree, primaryCtorParam = false)
}
protected def printArgss(argss: List[List[Tree]]): Unit =
argss.foreach({ x: List[Tree] =>
if (!(x.isEmpty && argss.size == 1)) printRow(x, "(", ", ", ")")
})
override def printAnnotations(tree: MemberDef) = {
val annots = tree.mods.annotations
annots.foreach { annot =>
printAnnot(annot); print(" ")
}
}
protected def printAnnot(tree: Tree) = {
tree match {
case treeInfo.Applied(core, _, argss) =>
print("@")
core match {
case Select(New(tree), _) => print(tree)
case _ =>
}
printArgss(argss)
case _ => super.printTree(tree)
}
}
// + scalac deviation
object PathDependentPrefix {
def unapply(arg: g.Type): Option[g.Symbol] = arg match {
case SingleType(_, sym) if sym.isTerm && !sym.isModule =>
Some(sym)
case _ => None
}
}
object ByNameType {
def unapply(arg: g.Type): Option[Type] =
if (definitions.isByNameParamType(arg)) {
arg match { case TypeRef(_, _, arg :: Nil) => Some(arg) }
} else None
}
object RepeatedType {
def unapply(arg: g.Type): Option[Type] =
if (definitions.isRepeatedParamType(arg)) {
arg match { case TypeRef(_, _, arg :: Nil) => Some(arg) }
} else None
}
def printType(tpe: Type): Unit = {
def wrapped[T](
ts: List[T],
open: String,
close: String,
forceOpen: Boolean = false,
sep: String = ", ")(f: T => Unit): Unit = {
if (ts.nonEmpty || forceOpen) {
this.print(open)
var first = true
ts.foreach { t =>
if (!first) {
this.print(sep)
}
f(t)
first = false
}
this.print(close)
}
}
def prefix(pre: Type): Unit = {
if (!pre.typeSymbol.isInstanceOf[NoSymbol] &&
!pre.typeSymbol.isEmptyPrefix &&
!pre.typeSymbol.isEmptyPackage) {
this.printType(pre)
this.print(".")
}
}
def printSymbol(s: g.Symbol): Unit = {
this.print(s.varianceString)
val nameString = s.nameString
this.print(nameString)
if (nameString.lastOption.exists(!_.isLetterOrDigit)) {
this.print(" ")
}
this.print(": ")
this.printType(s.typeSignature)
this.print(s.flagsExplanationString)
}
tpe match {
case MethodType(params, result) =>
var isImplicit = false
wrapped(params, "(", ")", forceOpen = true) { s =>
if (!isImplicit && s.isImplicit) {
this.print("implicit ")
isImplicit = true
}
printSymbol(s)
}
if (!result.isInstanceOf[MethodType]) {
// Only print `: ` before last curried parameter list.
this.print(": ")
}
this.printType(result)
case NullaryMethodType(result) =>
printType(result)
case PolyType(tparams, result) =>
wrapped(tparams, "[", "] => ")(s => this.print(s.decodedName))
this.printType(result)
case SingleType(pre, sym) =>
if (!sym.isStable) this.printType(pre)
this.print(ResolvedName(sym))
this.print(".type")
case ThisType(sym) =>
this.print(ResolvedName(sym))
this.print(".this.type")
case ByNameType(arg) =>
this.print("=>")
this.printType(arg)
case RepeatedType(arg) =>
this.printType(arg)
this.print("*")
case TypeRef(pre, sym, args) =>
pre match {
case PathDependentPrefix(sym) =>
this.print(ResolvedName(sym))
this.print(".")
case _ =>
}
this.print(ResolvedName(sym))
wrapped(args, "[", "]")(printType)
case AnnotatedType(annotations, tpe) =>
this.printType(tpe)
case OverloadedType(pre, alternatives) =>
this.printType(pre)
case NoType =>
case ConstantType(_) =>
this.printType(tpe.widen)
case RefinedType(parents, decls) =>
wrapped(parents, "", "", sep = " with ") { s =>
this.printType(s)
}
this.print("{")
wrapped(decls.toList, "", "", sep = "; ") { s =>
import s._
this.printWithTrailingSpace(s.flagString)
this.printWithTrailingSpace(s.keyString)
this.print(varianceString)
this.print(ResolvedName(s))
this.print(signatureString)
this.print(flagsExplanationString)
}
this.print("}")
case _ =>
this.print(tpe.toString())
}
}
// - scalac deviation
override def printTree(tree: Tree): Unit = {
parentsStack = tree :: parentsStack
try {
processTreePrinting(tree)
printTypesInfo(tree)
} finally {
parentsStack = parentsStack.tail
}
}
def processTreePrinting(tree: Tree): Unit = {
tree match {
// don't remove synthetic ValDef/TypeDef
case _ if syntheticToRemove(tree) =>
case cl @ ClassDef(mods, name, tparams, impl) =>
if (mods.isJavaDefined) super.printTree(cl)
printAnnotations(cl)
// traits
val clParents: List[Tree] = if (mods.isTrait) {
// avoid abstract modifier for traits
printModifiers(tree, mods &~ ABSTRACT)
print("trait ", printedName(name))
printTypeParams(tparams)
val build.SyntacticTraitDef(_, _, _, _, parents, _, _) = tree
parents
// classes
} else {
printModifiers(tree, mods)
print("class ", printedName(name))
printTypeParams(tparams)
val build.SyntacticClassDef(
_,
_,
_,
ctorMods,
vparamss,
earlyDefs,
parents,
selfType,
body) = cl
// constructor's modifier
if (ctorMods.hasFlag(AccessFlags) || ctorMods.hasAccessBoundary) {
print(" ")
printModifiers(ctorMods, primaryCtorParam = false)
}
def printConstrParams(ts: List[ValDef]): Unit = {
parenthesize() {
printImplicitInParamsList(ts)
printSeq(ts)(printParam(_, primaryCtorParam = true))(print(", "))
}
}
// constructor's params processing (don't print single empty constructor param list)
vparamss match {
case Nil | List(Nil) if !mods.isCase && !ctorMods.hasFlag(AccessFlags) =>
case _ => vparamss.foreach(printConstrParams)
}
parents
}
// get trees without default classes and traits (when they are last)
val printedParents = removeDefaultTypesFromList(clParents)()(
if (mods.hasFlag(CASE)) defaultTraitsForCase else Nil)
print(
if (mods.isDeferred) "<: " else if (printedParents.nonEmpty) " extends " else "",
impl)
case pd @ PackageDef(packaged, stats) =>
packaged match {
case Ident(name) if name == nme.EMPTY_PACKAGE_NAME =>
printSeq(stats)(print(_)) {
println()
println()
}
case _ =>
printPackageDef(pd, scala.util.Properties.lineSeparator)
}
case md @ ModuleDef(mods, name, impl) =>
printAnnotations(md)
printModifiers(tree, mods)
val Template(parents, self, methods) = impl
val parWithoutAnyRef = removeDefaultClassesFromList(parents)
print(
"object " + printedName(name),
if (parWithoutAnyRef.nonEmpty) " extends " else "",
impl)
case vd @ ValDef(mods, name, tp, rhs) =>
printValDef(vd, printedName(name)) {
// place space after symbolic def name (val *: Unit does not compile)
printOpt(s"${blankForName(name)}: ", tp)
} {
if (!mods.isDeferred) print(" = ", if (rhs.isEmpty) "_" else rhs)
}
case dd @ DefDef(mods, name, tparams, vparamss, tp, rhs) =>
printDefDef(dd, printedName(name)) {
if (tparams.isEmpty && (vparamss.isEmpty || vparamss(0).isEmpty))
print(blankForName(name))
printOpt(": ", tp)
} {
printOpt(" = " + (if (mods.isMacro) "macro " else ""), rhs)
}
case td @ TypeDef(mods, name, tparams, rhs) =>
printTypeDef(td, printedName(name))
case LabelDef(name, params, rhs) =>
if (name.startsWith(nme.WHILE_PREFIX)) {
val If(cond, thenp, elsep) = rhs
print("while (", cond, ") ")
val Block(list, wh) = thenp
printColumn(list, "", ";", "")
} else if (name.startsWith(nme.DO_WHILE_PREFIX)) {
val Block(bodyList, ifCond @ If(cond, thenp, elsep)) = rhs
print("do ")
printColumn(bodyList, "", ";", "")
print(" while (", cond, ") ")
} else {
print(printedName(name)); printLabelParams(params)
printBlock(rhs)
}
case imp @ Import(expr, _) =>
printImport(imp, resolveSelect(expr))
case t @ Template(parents, self, tbody) =>
val body = t.body // treeInfo.untypecheckedTemplBody(t)
val printedParents =
currentParent
.map {
case _: CompoundTypeTree => parents
case ClassDef(mods, name, _, _) if mods.isCase =>
removeDefaultTypesFromList(parents)()(List(tpnme.Product, tpnme.Serializable))
case _ => removeDefaultClassesFromList(parents)
}
.getOrElse(parents)
val primaryCtr = treeInfo.firstConstructor(body)
val ap: Option[Apply] = primaryCtr match {
case DefDef(_, _, _, _, _, Block(ctBody, _)) =>
val earlyDefs = treeInfo.preSuperFields(ctBody) ::: body.filter {
case td: TypeDef => treeInfo.isEarlyDef(td)
case _ => false
}
if (earlyDefs.nonEmpty) {
print("{")
printColumn(earlyDefs, "", ";", "")
print("} " + (if (printedParents.nonEmpty) "with " else ""))
}
ctBody.collectFirst {
case apply: Apply => apply
}
case _ => None
}
if (printedParents.nonEmpty) {
val (clParent :: traits) = printedParents
print(clParent)
val constrArgss = ap match {
case Some(treeInfo.Applied(_, _, argss)) => argss
case _ => Nil
}
printArgss(constrArgss)
if (traits.nonEmpty) {
printRow(traits, " with ", " with ", "")
}
}
/* Remove primary constr def and constr val and var defs
* right contains all constructors
*/
val (left, right) = body
.filter {
// remove valdefs defined in constructor and presuper vals
case vd: ValDef => !vd.mods.isParamAccessor && !treeInfo.isEarlyValDef(vd)
// remove $this$ from traits
case dd: DefDef => dd.name != nme.MIXIN_CONSTRUCTOR
case td: TypeDef => !treeInfo.isEarlyDef(td)
case EmptyTree => false
case _ => true
}
.span {
case dd: DefDef => dd.name != nme.CONSTRUCTOR
case _ => true
}
val modBody = left ::: right.drop(1)
val showBody = !(modBody.isEmpty && (self == noSelfType || self.isEmpty))
if (showBody) {
if (self.name != nme.WILDCARD) {
print(" { ", self.name)
printOpt(": ", self.tpt)
print(" =>")
} else if (self.tpt.nonEmpty) {
print(" { _ : ", self.tpt, " =>")
} else {
print(" {")
}
printColumn(modBody, "", ";", "}")
}
case bl @ Block(stats, expr) =>
printBlock(bl.stats, expr)
case Match(selector, cases) =>
/* Insert braces if match is inner
* make this function available for other cases
* passing required type for checking
*/
def insertBraces(body: => Unit): Unit =
if (parentsStack.nonEmpty && parentsStack.tail.exists(_.isInstanceOf[Match])) {
print("(")
body
print(")")
} else body
val printParentheses = needsParentheses(selector)(insideLabelDef = false)
tree match {
case Match(EmptyTree, cs) =>
printColumn(cases, "{", "", "}")
case _ =>
insertBraces {
parenthesize(printParentheses)(print(selector))
printColumn(cases, " match {", "", "}")
}
}
case cd @ CaseDef(pat, guard, body) =>
printCaseDef(cd)
case Star(elem) =>
print(elem, "*")
case Bind(name, t) =>
if (t == EmptyTree) print("(", printedName(name), ")")
else if (t.exists(_.isInstanceOf[Star])) print(printedName(name), " @ ", t)
else print("(", printedName(name), " @ ", t, ")")
case f @ Function(vparams, body) =>
// parentheses are not allowed for val a: Int => Int = implicit x => x
val printParentheses = vparams match {
case head :: _ => !head.mods.isImplicit
case _ => true
}
printFunction(f)(printValueParams(vparams, inParentheses = printParentheses))
case Typed(expr, tp) =>
def printTp = print("(", tp, ")")
tp match {
case EmptyTree | EmptyTypeTree() => printTp
// case for untypechecked trees
case Annotated(annot, arg)
if (expr ne null) && (arg ne null) && expr.equalsStructure(arg) =>
printTp // remove double arg - 5: 5: @unchecked
case tt: TypeTree if tt.original.isInstanceOf[Annotated] => printTp
case Function(List(), EmptyTree) => print("(", expr, " _)") //func _
// parentheses required when (a match {}) : Type
case _ => print("((", expr, "): ", tp, ")")
}
// print only fun when targs are TypeTrees with empty original
case TypeApply(fun, targs) =>
if (targs.exists(isEmptyTree)) {
print(fun)
} else super.printTree(tree)
case Apply(fun, vargs) =>
tree match {
// processing methods ending on colons (x \\: list)
case Apply(
Block(
l1 @ List(sVD: ValDef),
a1 @ Apply(Select(_, methodName), l2 @ List(Ident(iVDName)))),
l3)
if sVD.mods.isSynthetic && treeInfo.isLeftAssoc(methodName) && sVD.name == iVDName =>
val printBlock = Block(l1, Apply(a1, l3))
print(printBlock)
case Apply(tree1, _) if needsParentheses(tree1)(insideAnnotated = false) =>
parenthesize()(print(fun)); printRow(vargs, "(", ", ", ")")
case _ => super.printTree(tree)
}
case UnApply(fun, args) =>
fun match {
case treeInfo.Unapplied(body) =>
body match {
case Select(qual, name) if name == nme.unapply => print(qual)
case TypeApply(Select(qual, name), _)
if name == nme.unapply || name == nme.unapplySeq =>
print(qual)
case _ => print(body)
}
case _ => print(fun)
}
printRow(args, "(", ", ", ")")
case st @ Super(This(qual), mix) =>
printSuper(st, printedName(qual), checkSymbol = false)
case th @ This(qual) =>
if (tree.hasExistingSymbol && tree.symbol.hasPackageFlag) print(tree.symbol.fullName)
else printThis(th, printedName(qual))
// remove this prefix from constructor invocation in typechecked trees: this.this -> this
case Select(This(_), name @ nme.CONSTRUCTOR) => print(printedName(name))
case Select(qual: New, name) =>
print(qual)
case sel @ Select(qual, name) =>
def checkRootPackage(tr: Tree): Boolean =
(currentParent match { //check that Select is not for package def name
case Some(_: PackageDef) => false
case _ => true
}) && (tr match { // check that Select contains package
case Select(q, _) => checkRootPackage(q)
case _: Ident | _: This =>
val sym = tr.symbol
tr.hasExistingSymbol && sym.hasPackageFlag && sym.name != nme.ROOTPKG
case _ => false
})
if (printRootPkg && checkRootPackage(tree)) print(s"${printedName(nme.ROOTPKG)}.")
val printParentheses =
needsParentheses(qual)(insideAnnotated = false) ||
isIntLitWithDecodedOp(qual, name)
// + scalac deviation
val resolved = ResolvedName(printedName(name), sel.symbol.toSemantic)
if (printParentheses) print("(", resolveSelect(qual), ").", resolved)
else print(resolveSelect(qual), ".", resolved)
// - scalac deviation
case id @ Ident(name) =>
if (name.nonEmpty) {
if (name == nme.dollarScope) {
print(s"scala.xml.${nme.TopScope}")
} else {
val str = printedName(name)
val strIsBackquoted = str.startsWith("`") && str.endsWith("`")
print(if (id.isBackquoted && !strIsBackquoted) "`" + str + "`" else str)
}
} else {
print("")
}
case l @ Literal(x) =>
import scala.reflect.internal.Chars.LF
x match {
case Constant(v: String) if {
val strValue = x.stringValue
strValue.contains(LF) &&
!strValue.contains("\\"\\"\\"") &&
strValue.length > 1
} =>
val splitValue = x.stringValue.split(s"$LF").toList
val multilineStringValue =
if (x.stringValue.endsWith(s"$LF")) splitValue :+ "" else splitValue
val trQuotes = "\\"\\"\\""
print(trQuotes)
printSeq(multilineStringValue) { print(_) } { print(LF) }
print(trQuotes)
case _ =>
// processing Float constants
val printValue = x.escapedStringValue + (if (x.value.isInstanceOf[Float]) "F"
else "")
print(printValue)
}
case an @ Annotated(ap, tree) =>
val printParentheses = needsParentheses(tree)()
parenthesize(printParentheses) { print(tree) }
print(if (tree.isType) " " else ": ")
printAnnot(ap)
case SelectFromTypeTree(qualifier, selector) =>
print("(", qualifier, ")#", blankForOperatorName(selector), printedName(selector))
case tt: TypeTree =>
if (!isEmptyTree(tt)) {
val original = tt.original
if (original != null) print(original)
else {
// + scalac deviation
this.printType(tree.tpe)
// - scalac deviation
}
}
case AppliedTypeTree(tp, args) =>
// it's possible to have (=> String) => String type but Function1[=> String, String] is not correct
val containsByNameTypeParam = args.exists(treeInfo.isByNameParamType)
if (containsByNameTypeParam) {
print("(")
printRow(args.init, "(", ", ", ")")
print(" => ", args.last, ")")
} else {
if (treeInfo.isRepeatedParamType(tree) && args.nonEmpty) {
print(args.head, "*")
} else if (treeInfo.isByNameParamType(tree)) {
print("=> ", if (args.isEmpty) "()" else args.head)
} else
super.printTree(tree)
}
case ExistentialTypeTree(tpt, whereClauses) =>
print("(", tpt)
printColumn(whereClauses, " forSome { ", ";", "})")
case EmptyTree =>
case tree => super.printTree(tree)
}
}
}
}
| DavidDudson/scalameta | scalameta/semanticdb-scalac-core/src/main/scala/scala/meta/internal/semanticdb/PrinterOps.scala | Scala | bsd-3-clause | 32,239 |
package com.blackboxsociety.http.parsers
import scala.util.parsing.combinator._
import scalaz.Validation
case class Url(protocol: String, host: String, port: Option[Int], path: String, query: Option[String]) {
override def toString: String = s"$protocol://$host${port.map(":" + _.toString)}$toRequestPath"
def toRequestPath: String = s"$path${query.getOrElse("")}"
}
object Url {
implicit def implicitToString(url: Url): String = url.toString
}
object UrlParser extends RegexParsers {
val protocol: Parser[String] = opt("[^:]+".r <~ "://") ^^ { _.getOrElse("http").toLowerCase }
val host: Parser[String] = "[^:\\\\/]+".r
val port: Parser[Option[Int]] = opt(":" ~> "[0-9]+".r) ^^ { _.map(_.toInt) }
val path: Parser[String] = opt("/[^?]+".r) ^^ { _.getOrElse("/") }
val query: Parser[Option[String]] = opt("?" ~> ".+".r)
val urlParser: Parser[Url] = protocol ~ host ~ port ~ path ~ query ^^ {
case pr ~ ho ~ po ~ pa ~ qu => Url(pr, ho, po, pa, qu)
}
def apply(url: String): Validation[String, Url] = {
parseAll(urlParser, url) match {
case NoSuccess(e) => scalaz.Failure(e)
case Success(u) => scalaz.Success(u)
}
}
}
| blackboxsociety/blackbox-http | src/main/scala/com/blackboxsociety/http/parsers/UrlParser.scala | Scala | mit | 1,176 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data.tables
import org.apache.accumulo.core.client.BatchWriter
import org.apache.accumulo.core.data.{Mutation, Value}
import org.apache.accumulo.core.security.ColumnVisibility
import org.locationtech.geomesa.core.data._
import org.locationtech.geomesa.feature.SimpleFeatureEncoder
import org.opengis.feature.simple.SimpleFeature
// TODO: Implement as traits and cache results to gain flexibility and speed-up.
// https://geomesa.atlassian.net/browse/GEOMESA-344
object RecordTable extends GeoMesaTable {
type Visibility = String
type Feature2Mutation = (SimpleFeature, Visibility) => Mutation
def buildWrite(encoder: SimpleFeatureEncoder, rowIdPrefix: String): (SimpleFeature, Visibility) => Mutation =
(feature: SimpleFeature, visibility: Visibility) => {
val m = new Mutation(getRowKey(rowIdPrefix, feature.getID))
m.put(SFT_CF, EMPTY_COLQ, new ColumnVisibility(visibility), new Value(encoder.encode(feature)))
m
}
def buildDelete(encoder: SimpleFeatureEncoder, rowIdPrefix: String): Feature2Mutation =
(feature: SimpleFeature, visibility: Visibility) => {
val m = new Mutation(getRowKey(rowIdPrefix, feature.getID))
m.putDelete(SFT_CF, EMPTY_COLQ, new ColumnVisibility(visibility))
m
}
/** Creates a function to write a feature to the Record Table **/
def recordWriter(bw: BatchWriter, encoder: SimpleFeatureEncoder, rowIdPrefix: String) = {
val builder = buildWrite(encoder, rowIdPrefix)
(feature: SimpleFeature, visibility: Visibility) => bw.addMutation(builder(feature, visibility))
}
def recordDeleter(bw: BatchWriter, encoder: SimpleFeatureEncoder, rowIdPrefix: String) = {
val builder = buildDelete(encoder, rowIdPrefix)
(feature: SimpleFeature, visibility: Visibility) => bw.addMutation(builder(feature, visibility))
}
def getRowKey(rowIdPrefix: String, id: String): String = rowIdPrefix + id
}
| kevinwheeler/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/tables/RecordTable.scala | Scala | apache-2.0 | 2,556 |
package katas.scala.sort.quicksort
import org.scalatest.Matchers
import katas.scala.sort.SeqSortTest
import scala.reflect.ClassTag
class QuickSort3 extends SeqSortTest with Matchers {
override def sort[T](seq: Seq[T])(implicit ordered: (T) => Ordered[T], tag: ClassTag[T]): Seq[T] = {
if (seq.size <= 1) return seq
val pivot = seq(seq.size / 2)
sort(seq.filter(_ < pivot)) ++ seq.filter(_ == pivot) ++ sort(seq.filter(_ > pivot))
}
} | dkandalov/katas | scala/src/katas/scala/sort/quicksort/QuickSort3.scala | Scala | unlicense | 445 |
package mesosphere.marathon.core.leadership.impl
import akka.actor.{ Status, PoisonPill, ActorSystem, Actor, Props }
import akka.testkit.{ TestActorRef, TestProbe }
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.core.leadership.PreparationMessages
import scala.concurrent.duration._
class LeadershipCoordinatorActorTest extends MarathonSpec {
test("in suspended, Stop doesn't do anything") {
val probe = TestProbe()
probe.send(coordinatorRef, WhenLeaderActor.Stop)
whenLeader1Probe.expectNoMsg(0.seconds)
whenLeader2Probe.expectNoMsg(0.seconds)
}
test("in preparingForStart, Stop is send to all whenLeaderActors and preparation is aborted") {
val probe = TestProbe()
coordinatorRef.underlying.become(
coordinatorRef.underlyingActor.preparingForStart(Set(probe.ref), Set(whenLeader1Probe.ref)))
probe.send(coordinatorRef, WhenLeaderActor.Stop)
whenLeader1Probe.expectMsg(WhenLeaderActor.Stop)
whenLeader2Probe.expectMsg(WhenLeaderActor.Stop)
probe.expectMsgAnyClassOf(classOf[Status.Failure])
}
test("in active, Stop is send to all whenLeaderActors and preparation is aborted") {
val probe = TestProbe()
coordinatorRef.underlying.become(coordinatorRef.underlyingActor.active)
probe.send(coordinatorRef, WhenLeaderActor.Stop)
whenLeader1Probe.expectMsg(WhenLeaderActor.Stop)
whenLeader2Probe.expectMsg(WhenLeaderActor.Stop)
probe.expectNoMsg(0.seconds)
// check we are in suspend
probe.send(coordinatorRef, WhenLeaderActor.Stop)
probe.expectNoMsg(0.seconds)
}
test("in suspended, remove terminated whenLeaderActors") {
val probe = TestProbe()
probe.send(whenLeader1Probe.ref, PoisonPill)
assert(coordinatorRef.underlyingActor.whenLeaderActors == Set(whenLeader2Probe.ref))
}
test("in prepareToStart, remove terminated whenLeaderActors") {
val probe = TestProbe()
coordinatorRef.underlying.become(
coordinatorRef.underlyingActor.preparingForStart(Set(probe.ref), Set(whenLeader1Probe.ref)))
probe.send(whenLeader1Probe.ref, PoisonPill)
assert(coordinatorRef.underlyingActor.whenLeaderActors == Set(whenLeader2Probe.ref))
whenLeader2Probe.send(coordinatorRef, PreparationMessages.Prepared(whenLeader2Probe.ref))
probe.expectMsg(PreparationMessages.Prepared(coordinatorRef))
}
test("in active, remove terminated whenLeaderActors") {
val probe = TestProbe()
coordinatorRef.underlying.become(coordinatorRef.underlyingActor.active)
probe.send(whenLeader1Probe.ref, PoisonPill)
assert(coordinatorRef.underlyingActor.whenLeaderActors == Set(whenLeader2Probe.ref))
}
test("switch to prepareForStart and wait for all actors to prepare until started") {
val probe = TestProbe()
probe.send(coordinatorRef, PreparationMessages.PrepareForStart)
whenLeader1Probe.expectMsg(PreparationMessages.PrepareForStart)
whenLeader2Probe.expectMsg(PreparationMessages.PrepareForStart)
probe.expectNoMsg(0.seconds)
whenLeader1Probe.reply(PreparationMessages.Prepared(whenLeader1Probe.ref))
probe.expectNoMsg(0.seconds)
whenLeader2Probe.reply(PreparationMessages.Prepared(whenLeader2Probe.ref))
probe.expectMsg(PreparationMessages.Prepared(coordinatorRef))
}
test("when preparingForStart with one requester, add another interested actorRef if necessary") {
val requester1 = TestProbe()
val requester2 = TestProbe()
coordinatorRef.underlying.become(
coordinatorRef.underlyingActor.preparingForStart(
Set(requester1.ref), Set(whenLeader1Probe.ref)))
requester2.send(coordinatorRef, PreparationMessages.PrepareForStart)
whenLeader1Probe.send(coordinatorRef, PreparationMessages.Prepared(whenLeader1Probe.ref))
requester1.expectMsg(PreparationMessages.Prepared(coordinatorRef))
requester2.expectMsg(PreparationMessages.Prepared(coordinatorRef))
}
test("when active, immediately answer PrepareToStart") {
val probe = TestProbe()
coordinatorRef.underlying.become(coordinatorRef.underlyingActor.active)
probe.send(coordinatorRef, PreparationMessages.PrepareForStart)
probe.expectMsg(PreparationMessages.Prepared(coordinatorRef))
}
private[this] implicit var actorSystem: ActorSystem = _
private[this] var whenLeader1Probe: TestProbe = _
private[this] var whenLeader2Probe: TestProbe = _
private[this] var coordinatorRef: TestActorRef[LeadershipCoordinatorActor] = _
before {
actorSystem = ActorSystem()
whenLeader1Probe = TestProbe()
whenLeader2Probe = TestProbe()
coordinatorRef = TestActorRef(LeadershipCoordinatorActor.props(Set(whenLeader1Probe.ref, whenLeader2Probe.ref)))
coordinatorRef.start()
}
after {
coordinatorRef.stop()
actorSystem.shutdown()
actorSystem.awaitTermination()
}
}
| sepiroth887/marathon | src/test/scala/mesosphere/marathon/core/leadership/impl/LeadershipCoordinatorActorTest.scala | Scala | apache-2.0 | 4,836 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
// <:< allows to instantiate a class even though some methods
// may not be applicable
class Pair[T](val first: T, val second: T) {
def smaller(implicit ev: T <:< Ordered[T]) =
if (first < second) first else second
}
import java.io.File
val p = new Pair(new File("."), new File(".."))
// Ok as long as we don't call smaller
p.smaller // Error
// <:< is used in the definition of the Option.orNull method
val friends = Map("Fred" -> "Barney")
val friendOpt = friends.get("Wilma") // An Option[String]
val friendOrNull = friendOpt.orNull // A String or null
val scores = Map("Fred" -> 42)
val scoreOpt = scores.get("Fred") // An Option[Int]
val scoreOrNull = scoreOpt.orNull // Error
// <:< can improve type inference
def firstLast[A, C <: Iterable[A]](it: C) = (it.head, it.last)
firstLast(List(1, 2, 3)) // Error
def firstLast[A, C](it: C)(implicit ev: C <:< Iterable[A]) =
(it.head, it.last)
firstLast(List(1, 2, 3)) // OK
| P7h/ScalaPlayground | Scala for the Impatient/examples/ch17/sec08/repl-session.scala | Scala | apache-2.0 | 1,077 |
/**
* This file is part of mycollab-scheduler.
*
* mycollab-scheduler is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-scheduler is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-scheduler. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.schedule.email.format
import scala.beans.BeanProperty
/**
* @author MyCollab Ltd.
* @version 4.6.0
*/
class WebItem(@BeanProperty val displayName: String, @BeanProperty val webLink: String) {
}
| uniteddiversity/mycollab | mycollab-scheduler/src/main/scala/com/esofthead/mycollab/schedule/email/format/WebItem.scala | Scala | agpl-3.0 | 950 |
package com.getjenny.starchat.entities.io
case class TokenFrequency(
tokensFreq: List[TokenFrequencyItem],
priorTotalTerms: Long,
observedTotalTerms: Long
)
case class TokenFrequencyItem(
token: String,
priorFrequency: Double,
observedFrequency: Double
)
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/entities/io/TokenFrequency.scala | Scala | gpl-2.0 | 485 |
package moulder.values
import moulder.Value
case class SeqValue[A](private val values: List[A]) extends Value[A] {
private val it = values.iterator
def apply() = if (it.hasNext) Some(it.next) else None
def cycle(): Value[A] = new Value[A] {
private var s = values.iterator
def apply() = if (s.hasNext)
Some(s.next)
else {
if (values.iterator.hasNext) {
s = values.iterator; Some(s.next)
} else
None
}
}
}
| jawher/moulder-s | src/main/scala/moulder/values/SeqValue.scala | Scala | mit | 488 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.annotation.Since
import org.apache.spark.ml.UnaryTransformer
import org.apache.spark.ml.param._
import org.apache.spark.ml.util._
import org.apache.spark.sql.types.{ArrayType, DataType, StringType}
/**
* A tokenizer that converts the input string to lowercase and then splits it by white spaces.
*
* @see [[RegexTokenizer]]
*/
@Since("1.2.0")
class Tokenizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[String, Seq[String], Tokenizer] with DefaultParamsWritable {
@Since("1.2.0")
def this() = this(Identifiable.randomUID("tok"))
override protected def createTransformFunc: String => Seq[String] = {
// scalastyle:off caselocale
_.toLowerCase.split("\\\\s")
// scalastyle:on caselocale
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType == StringType,
s"Input type must be ${StringType.catalogString} type but got ${inputType.catalogString}.")
}
override protected def outputDataType: DataType = new ArrayType(StringType, true)
@Since("1.4.1")
override def copy(extra: ParamMap): Tokenizer = defaultCopy(extra)
}
@Since("1.6.0")
object Tokenizer extends DefaultParamsReadable[Tokenizer] {
@Since("1.6.0")
override def load(path: String): Tokenizer = super.load(path)
}
/**
* A regex based tokenizer that extracts tokens either by using the provided regex pattern to split
* the text (default) or repeatedly matching the regex (if `gaps` is false).
* Optional parameters also allow filtering tokens using a minimal length.
* It returns an array of strings that can be empty.
*/
@Since("1.4.0")
class RegexTokenizer @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends UnaryTransformer[String, Seq[String], RegexTokenizer] with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("regexTok"))
/**
* Minimum token length, greater than or equal to 0.
* Default: 1, to avoid returning empty strings
* @group param
*/
@Since("1.4.0")
val minTokenLength: IntParam = new IntParam(this, "minTokenLength", "minimum token length (>= 0)",
ParamValidators.gtEq(0))
/** @group setParam */
@Since("1.4.0")
def setMinTokenLength(value: Int): this.type = set(minTokenLength, value)
/** @group getParam */
@Since("1.4.0")
def getMinTokenLength: Int = $(minTokenLength)
/**
* Indicates whether regex splits on gaps (true) or matches tokens (false).
* Default: true
* @group param
*/
@Since("1.4.0")
val gaps: BooleanParam = new BooleanParam(this, "gaps", "Set regex to match gaps or tokens")
/** @group setParam */
@Since("1.4.0")
def setGaps(value: Boolean): this.type = set(gaps, value)
/** @group getParam */
@Since("1.4.0")
def getGaps: Boolean = $(gaps)
/**
* Regex pattern used to match delimiters if [[gaps]] is true or tokens if [[gaps]] is false.
* Default: `"\\\\s+"`
* @group param
*/
@Since("1.4.0")
val pattern: Param[String] = new Param(this, "pattern", "regex pattern used for tokenizing")
/** @group setParam */
@Since("1.4.0")
def setPattern(value: String): this.type = set(pattern, value)
/** @group getParam */
@Since("1.4.0")
def getPattern: String = $(pattern)
/**
* Indicates whether to convert all characters to lowercase before tokenizing.
* Default: true
* @group param
*/
@Since("1.6.0")
final val toLowercase: BooleanParam = new BooleanParam(this, "toLowercase",
"whether to convert all characters to lowercase before tokenizing.")
/** @group setParam */
@Since("1.6.0")
def setToLowercase(value: Boolean): this.type = set(toLowercase, value)
/** @group getParam */
@Since("1.6.0")
def getToLowercase: Boolean = $(toLowercase)
setDefault(minTokenLength -> 1, gaps -> true, pattern -> "\\\\s+", toLowercase -> true)
override protected def createTransformFunc: String => Seq[String] = { originStr =>
val re = $(pattern).r
// scalastyle:off caselocale
val str = if ($(toLowercase)) originStr.toLowerCase() else originStr
// scalastyle:on caselocale
val tokens = if ($(gaps)) re.split(str).toSeq else re.findAllIn(str).toSeq
val minLength = $(minTokenLength)
tokens.filter(_.length >= minLength)
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType == StringType, s"Input type must be string type but got $inputType.")
}
override protected def outputDataType: DataType = new ArrayType(StringType, true)
@Since("1.4.1")
override def copy(extra: ParamMap): RegexTokenizer = defaultCopy(extra)
@Since("3.0.0")
override def toString: String = {
s"RegexTokenizer: uid=$uid, minTokenLength=${$(minTokenLength)}, gaps=${$(gaps)}, " +
s"pattern=${$(pattern)}, toLowercase=${$(toLowercase)}"
}
}
@Since("1.6.0")
object RegexTokenizer extends DefaultParamsReadable[RegexTokenizer] {
@Since("1.6.0")
override def load(path: String): RegexTokenizer = super.load(path)
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/ml/feature/Tokenizer.scala | Scala | apache-2.0 | 5,868 |
package jigg.nlp.ccg.lexicon
/*
Copyright 2013-2015 Hiroshi Noji
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.io.Source
class CabochaReader[S<:TaggedSentence](ccgSentences: Seq[S]) {
def readSentences(path: String): Seq[ParsedBunsetsuSentence] = {
val bunsetsuStart = """\\* (\\d+) (-?\\d+)[A-Z].*""".r
def addBunsetsuTo(curSent: List[(String, Int)], curBunsetsu: List[String]) = curBunsetsu.reverse match {
case Nil => curSent
case headIdx :: tail => (tail.mkString(""), headIdx.toInt) :: curSent
}
val bunsetsuSegedSentences: List[List[(String, Int)]] =
scala.io.Source.fromFile(path).getLines.filter(_ != "").foldLeft(
(List[List[(String, Int)]](), List[(String, Int)](), List[String]())) {
case ((processed, curSent, curBunsetsu), line) => line match {
case bunsetsuStart(_, nextHeadIdx) =>
(processed, addBunsetsuTo(curSent, curBunsetsu), nextHeadIdx :: Nil) // use first elem as the head idx
case "EOS" => (addBunsetsuTo(curSent, curBunsetsu).reverse :: processed, Nil, Nil)
case word => (processed, curSent, word.split("\\t")(0) :: curBunsetsu)
}
}._1.reverse
ccgSentences.zip(bunsetsuSegedSentences).map { case (ccgSentence, bunsetsuSentence) =>
val bunsetsuSegCharIdxs: List[Int] = bunsetsuSentence.map { _._1.size }.scanLeft(0)(_+_).tail // 5 10 ...
val ccgWordSegCharIdxs: List[Int] = ccgSentence.wordSeq.toList.map { _.v.size }.scanLeft(0)(_+_).tail // 2 5 7 10 ...
assert(bunsetsuSegCharIdxs.last == ccgWordSegCharIdxs.last)
val bunsetsuSegWordIdxs: List[Int] = ccgWordSegCharIdxs.zipWithIndex.foldLeft((List[Int](), 0)) { // 1 3 ...
case ((segWordIdxs, curBunsetsuIdx), (wordIdx, i)) =>
if (wordIdx >= bunsetsuSegCharIdxs(curBunsetsuIdx)) (i :: segWordIdxs, curBunsetsuIdx + 1)
else (segWordIdxs, curBunsetsuIdx) // wait until wordIdx exceeds the next bunsetsu segment
}._1.reverse
val bunsetsuSeq = bunsetsuSegWordIdxs.zip(-1 :: bunsetsuSegWordIdxs).map { case (bunsetsuIdx, prevIdx) =>
val offset = prevIdx + 1
Bunsetsu(offset,
ccgSentence.wordSeq.slice(offset, bunsetsuIdx + 1),
ccgSentence.posSeq.slice(offset, bunsetsuIdx + 1))
}
ParsedBunsetsuSentence(bunsetsuSeq, bunsetsuSentence.map { _._2 })
}
}
}
| sakabar/jigg | src/main/scala/jigg/nlp/ccg/lexicon/CabochaReader.scala | Scala | apache-2.0 | 2,860 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package integration.interpreter.scala
import java.io.{ByteArrayOutputStream, OutputStream}
import org.apache.toree.annotations.SbtForked
import org.apache.toree.global.StreamState
import org.apache.toree.interpreter._
import org.apache.toree.kernel.api.KernelLike
import org.apache.toree.kernel.interpreter.scala.ScalaInterpreter
import org.apache.toree.utils.{MultiOutputStream, TaskManager}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
@SbtForked
class AddExternalJarMagicSpecForIntegration
extends FunSpec with Matchers with MockitoSugar with BeforeAndAfter
{
private val outputResult = new ByteArrayOutputStream()
private var interpreter: Interpreter = _
before {
interpreter = new ScalaInterpreter {
override protected def bindKernelVariable(kernel: KernelLike): Unit = { }
}
// interpreter.start()
interpreter.init(mock[KernelLike])
StreamState.setStreams(outputStream = outputResult)
}
after {
interpreter.stop()
outputResult.reset()
}
describe("ScalaInterpreter") {
describe("#addJars") {
it("should be able to load an external jar") {
val testJarUrl = this.getClass.getClassLoader.getResource("TestJar.jar")
//
// NOTE: This can be done with any jar. I have tested it previously by
// downloading jgoodies, placing it in /tmp/... and loading it.
//
// Should fail since jar was not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
// Add jar to paths
interpreter.addJars(testJarUrl)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\n")
}
it("should support Scala jars") {
val testJarUrl = this.getClass.getClassLoader.getResource("ScalaTestJar.jar")
// Should fail since jar was not added to paths
interpreter.interpret(
"import com.ibm.scalatestjar.TestClass")._1 should be (Results.Error)
// Add jar to paths
interpreter.addJars(testJarUrl)
// Should now succeed
interpreter.interpret(
"import com.ibm.scalatestjar.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new TestClass().runMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("You ran me!\n")
}
it("should be able to add multiple jars at once") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// val interpreter = new ScalaInterpreter(List(), mock[OutputStream])
// with StandardSparkIMainProducer
// with StandardTaskManagerProducer
// with StandardSettingsProducer
// interpreter.start()
// Should fail since jars were not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Error)
// Add jars to paths
interpreter.addJars(testJar1Url, testJar2Url)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new com.ibm.testjar.TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\n")
outputResult.reset()
interpreter.interpret(
"""println(new com.ibm.testjar2.TestClass().CallMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("3\n")
}
it("should be able to add multiple jars in consecutive calls to addjar") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// val interpreter = new ScalaInterpreter(List(), mock[OutputStream])
// with StandardSparkIMainProducer
// with StandardTaskManagerProducer
// with StandardSettingsProducer
// interpreter.start()
// Should fail since jars were not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Error)
// Add jars to paths
interpreter.addJars(testJar1Url)
interpreter.addJars(testJar2Url)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new com.ibm.testjar.TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\n")
outputResult.reset()
interpreter.interpret(
"""println(new com.ibm.testjar2.TestClass().CallMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("3\n")
}
// Todo: rebinding is kinda finicky in Scala 2.11
ignore("should not have issues with previous variables") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// Add a jar, which reinitializes the symbols
interpreter.addJars(testJar1Url)
interpreter.interpret(
"""
|val t = new com.ibm.testjar.TestClass()
""".stripMargin)._1 should be (Results.Success)
// Add a second jar, which reinitializes the symbols and breaks the
// above variable
interpreter.addJars(testJar2Url)
interpreter.interpret(
"""
|def runMe(testClass: com.ibm.testjar.TestClass) =
|testClass.sayHello("Hello")
""".stripMargin)._1 should be (Results.Success)
// This line should NOT explode if variable is rebound correctly
// otherwise you get the error of
//
// Message: <console>:16: error: type mismatch;
// found : com.ibm.testjar.com.ibm.testjar.com.ibm.testjar.com.ibm.
// testjar.com.ibm.testjar.TestClass
// required: com.ibm.testjar.com.ibm.testjar.com.ibm.testjar.com.ibm.
// testjar.com.ibm.testjar.TestClass
// runMe(t)
// ^
val ans = interpreter.interpret(
"""
|runMe(t)
""".stripMargin)
ans._1 should be (Results.Success)
}
}
}
}
| chipsenkbeil/incubator-toree | scala-interpreter/src/test/scala/integration/interpreter/scala/AddExternalJarMagicSpecForIntegration.scala | Scala | apache-2.0 | 8,311 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.std
import java.util.UUID
import scalaz.{Order, Ordering, Show}
object uuid {
implicit val uuidOrder: Order[UUID] = new Order[UUID] {
def order(x: UUID, y: UUID): Ordering =
Ordering.fromInt(x.compareTo(y))
}
implicit val showUUID: Show[UUID] = Show.showFromToString
}
| slamdata/quasar | foundation/src/main/scala/quasar/contrib/std/uuid.scala | Scala | apache-2.0 | 918 |
/* __ *\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ **
** /____/\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\* */
package org.scalajs.testsuite.javalib
import org.scalajs.jasminetest.JasmineTest
import scala.scalajs.js
object CharacterTest extends JasmineTest {
describe("java.lang.Character") {
it("should provide `isISOControl`") {
val isoControlChars = (('\u0000' to '\u001F') ++ ('\u007F' to '\u009F')).map(_.toInt).toSet
isoControlChars foreach { c =>
expect(Character.isISOControl(c)).toEqual(true)
}
val randomInts = List.fill(100)(scala.util.Random.nextInt)
((-1000 to 1000) ++ randomInts).filterNot(isoControlChars) foreach { c =>
expect(Character.isISOControl(c)).toEqual(false)
}
}
it("should provide `digit`") {
expect(Character.digit('a', 16)).toEqual(10)
expect(Character.digit('}', 5)).toEqual(-1)
expect(Character.digit('1', 50)).toEqual(-1)
expect(Character.digit('1', 36)).toEqual(1)
expect(Character.digit('Z', 36)).toEqual(35)
expect(Character.digit('\uFF22', 20)).toEqual(11)
}
it("should provide `forDigit`") {
// Ported from https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/lang/CharacterTest.java
for (i <- 0 until 36) {
expect(Character.digit(Character.forDigit(i, 36), 36)).toEqual(i)
}
expect(Character.forDigit(9, 10) == '9').toBeTruthy
}
it("should provide isDigit") {
expect(Character.isDigit('a')).toBeFalsy
expect(Character.isDigit('0')).toBeTruthy
expect(Character.isDigit('5')).toBeTruthy
expect(Character.isDigit('9')).toBeTruthy
expect(Character.isDigit('z')).toBeFalsy
expect(Character.isDigit(' ')).toBeFalsy
}
it("should provide `compareTo`") {
def compare(x: Char, y: Char): Int =
new Character(x).compareTo(new Character(y))
expect(compare('0', '5')).toBeLessThan(0)
expect(compare('o', 'g')).toBeGreaterThan(0)
expect(compare('A', 'a')).toBeLessThan(0)
expect(compare('b', 'b')).toEqual(0)
}
it("should be a Comparable") {
def compare(x: Any, y: Any): Int =
x.asInstanceOf[Comparable[Any]].compareTo(y)
expect(compare('0', '5')).toBeLessThan(0)
expect(compare('o', 'g')).toBeGreaterThan(0)
expect(compare('A', 'a')).toBeLessThan(0)
expect(compare('b', 'b')).toEqual(0)
}
it("should provide isIdentifierIgnorable") {
for (c <- '\u0000' to '\u0008')
expect(Character.isIdentifierIgnorable(c)).toBeTruthy
for (c <- '\u000E' to '\u001B')
expect(Character.isIdentifierIgnorable(c)).toBeTruthy
for (c <- '\u007F' to '\u009F')
expect(Character.isIdentifierIgnorable(c)).toBeTruthy
// Exhaustive list of Cf category. Unicode 7.0.0
expect(Character.isIdentifierIgnorable('\u00AD')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0600')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0601')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0602')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0603')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0604')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u0605')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u061C')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u06DD')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u070F')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u180E')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u200B')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u200C')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u200D')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u200E')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u200F')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u202A')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u202B')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u202C')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u202D')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u202E')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2060')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2061')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2062')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2063')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2064')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2066')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2067')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2068')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u2069')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206A')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206B')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206C')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206D')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206E')).toBeTruthy
expect(Character.isIdentifierIgnorable('\u206F')).toBeTruthy
expect(Character.isIdentifierIgnorable('\uFEFF')).toBeTruthy
expect(Character.isIdentifierIgnorable('\uFFF9')).toBeTruthy
expect(Character.isIdentifierIgnorable('\uFFFA')).toBeTruthy
expect(Character.isIdentifierIgnorable('\uFFFB')).toBeTruthy
// BUG in JDK? 17B4 should be "Mn", Java says "Cf"
//expect(Character.isIdentifierIgnorable('\u17b4')).toBeTruthy
// 100 randomly generated negatives
expect(Character.isIdentifierIgnorable('\u745a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub445')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub23a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub029')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ufb5c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u1b67')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u943b')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ue766')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uad12')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub80b')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u7341')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ubc73')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uabb9')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub34b')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u1063')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u272f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3801')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u53a6')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u2ec2')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u540c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc85f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud2c8')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u551b')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc0a1')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud25a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u2b98')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u398b')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ubc77')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u54cc')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc9a0')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud10f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf7e1')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u0f29')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uafcd')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf187')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u6287')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uacb6')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uff99')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub59e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf630')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ufaec')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ua7d7')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3eab')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u54a5')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u393a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc621')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u766c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud64c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u8beb')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u44e2')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub6f6')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u58b6')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3bad')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3c28')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ufbfd')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u585f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u7227')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ucea7')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u2c82')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u686d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u120d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf3db')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u320a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud96e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u85eb')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9648')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u08a4')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9db7')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u82c7')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ufe12')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u0eaf')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u96dc')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3a2a')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc72e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3745')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ubcf9')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u5f66')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9be1')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud81d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3ca3')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3e82')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u7ce4')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u33ca')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ue725')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uef49')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ue2cf')).toBeFalsy
expect(Character.isIdentifierIgnorable('\udcf0')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u5f2e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u2a63')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ud2d2')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u8023')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ua957')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u10ba')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf85f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uc40d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u2509')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u0d8e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9db8')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u824d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u5670')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u6005')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub8de')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uff5c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\ub36d')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u0cf2')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u82f6')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9206')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u95e1')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u990f')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u9fc7')).toBeFalsy
expect(Character.isIdentifierIgnorable('\udffb')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u0ecb')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u7563')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uf0ff')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u6b2e')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u894c')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u8f06')).toBeFalsy
expect(Character.isIdentifierIgnorable('\uffa9')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u37b0')).toBeFalsy
expect(Character.isIdentifierIgnorable('\u3e04')).toBeFalsy
}
it("should provide isUnicodeIdentifierStart") {
// 100 randomly generated positives and 100 randomly generated negatives
expect(Character.isUnicodeIdentifierStart('\ud6d5')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3f9c')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3a40')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u53af')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u1636')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4884')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ucba4')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u1ee4')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u6dec')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u10d4')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u631f')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3661')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u55f8')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub4ef')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ud509')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u65b5')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u316b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub270')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7f0f')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uff84')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u11cc')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u0294')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u51b1')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u9ae2')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u304a')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ud5c7')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3b4b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u5e42')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u51fc')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uc148')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uc1ae')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7372')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uc116')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u5d29')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u8753')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u50f8')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3f9d')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u1f44')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ucd43')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u9126')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u8d2e')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4f5c')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u66d7')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ua30b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u140b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub264')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7b35')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u15e4')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ubb37')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u34e3')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uac3e')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ubd0e')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub641')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u1580')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u30c1')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub0c8')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u8681')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7f14')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4142')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u56c1')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u0444')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u9964')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub5c0')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u43d8')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u479e')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u0853')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ube08')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u9346')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uf9c1')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u0e8a')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u212c')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u810c')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u8089')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u1331')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ua5f7')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u5e5e')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u613b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u34a7')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ud15b')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\uc1fc')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u92f1')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u3ae6')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ufceb')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7584')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ufe98')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ubb23')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7961')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4445')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4d5f')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u61cb')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u5176')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ub987')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u906a')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u4317')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u93ad')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u825a')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u7ff8')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u533a')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\u5617')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ufcc6')).toBeTruthy
expect(Character.isUnicodeIdentifierStart('\ue398')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ueab6')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue7bc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf8ab')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue27f')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uebea')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ueedc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf091')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2785')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u287b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf042')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u20f9')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u23d6')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udc5b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ued16')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u1b6b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue7ba')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf7fa')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2125')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uea97')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue624')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ufbb8')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2730')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udb89')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue30d')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2e24')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf03e')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uda27')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u28fc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u9ffe')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ude19')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0b70')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uddfc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ued53')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue8cb')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udccc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u00a3')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0bed')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0c68')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf47b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0f96')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue9c3')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf784')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uef4b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udee1')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2f61')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf622')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u19f9')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ud86a')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ued83')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf7e4')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uecce')).toBeFalsy
// BUG in JDK? A699 should be "Ll", Java says "Cn"
// expect(Character.isUnicodeIdentifierStart('\ua699')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uaa5f')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udf24')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2e0e')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf322')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue137')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ued19')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u21ab')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue972')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udbf2')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf54c')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u4dd3')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2769')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue363')).toBeFalsy
// BUG in JDK? 1BBB should be "Lo", Java says "Cn"
// expect(Character.isUnicodeIdentifierStart('\u1bbb')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ueae7')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2bf3')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue704')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u1c7f')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf52b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue9e3')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u259b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf250')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf42f')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ue244')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u20d9')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ua881')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0ee6')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2203')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0fc7')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u07fc')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udb86')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2a70')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2bb7')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uecf0')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ude48')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0a3b')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u20b8')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf898')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u23e6')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ud8ba')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uda1e')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\udc12')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u2a06')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\u0888')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\ud9ec')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf81f')).toBeFalsy
expect(Character.isUnicodeIdentifierStart('\uf817')).toBeFalsy
}
it("should provide isUnicodeIdentifierPart") {
// 100 randomly generated positives and 100 randomly generated negatives
expect(Character.isUnicodeIdentifierPart('\u48d3')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u0905')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8f51')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9bcb')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ud358')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u1538')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uffcf')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u83ec')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3a89')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ub63a')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ufe24')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u2d62')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u15ca')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4fa4')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u47d1')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u831c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u84e6')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u7783')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua03c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6ecf')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u147f')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u67a9')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8b6c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3410')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u2cc0')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua332')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9733')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5df3')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3fd7')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6611')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u55b4')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8bc8')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6f74')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6c97')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6a86')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6000')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u614f')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u206e')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua801')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9edf')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ub42c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u7fcd')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8a60')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u182f')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5d0a')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uaf9c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9d4b')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5088')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uc1a6')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ubbe4')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uad25')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4653')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8add')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3d1c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u80a8')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u810e')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uc1d2')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ub984')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9d13')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u37c2')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u13cd')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u53f9')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u98b7')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u57f3')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ub554')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u0176')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua318')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u9704')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8d52')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u940a')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u0fa5')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u38d1')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3b33')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u93bb')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u03bd')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4c88')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ud67d')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ubcbf')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3867')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4368')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8f2d')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u049a')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4c01')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5589')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5e71')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua1fd')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3a4a')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\uc111')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ub465')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u95af')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ubf2c')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8488')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u4317')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u6b77')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8995')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u7467')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u16b7')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u3ca0')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u5332')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\u8654')).toBeTruthy
expect(Character.isUnicodeIdentifierPart('\ua8c8')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue3ca')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uebee')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u270e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf0ac')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue9ec')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u296a')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u33fd')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue5f4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ueb01')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf38b')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2e6f')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uea69')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf155')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u0f0e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ueb80')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ud959')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue25e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf566')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue4a3')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uec44')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u3297')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u3214')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u1bfd')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u4dd0')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uea99')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u309b')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf592')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf4dd')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udfaf')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udd38')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf820')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uaacd')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uff5b')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ude36')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue33b')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udbce')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue1f6')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf78a')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ueb44')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uebd4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u1df7')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2f10')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u1cbf')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2362')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uebeb')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2ede')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u221d')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2021')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udf41')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u05f5')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u24ab')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uee15')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf175')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf35c')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udc7b')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ud883')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf341')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ueec6')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2f57')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uff64')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue6a4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uec34')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u22a5')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf5ac')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u3360')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u28b0')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf678')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue0e4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u233f')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u0afa')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2013')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ud7af')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ud98e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ud8a5')).toBeFalsy
// BUG in JDK? A79E should be "Lu", Java says "Cn"
// expect(Character.isUnicodeIdentifierPart('\ua79e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u1806')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue07a')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2748')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uabad')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uec5c')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue832')).toBeFalsy
// BUG in JDK? 08A9 should be "Lo", Java says "Cn"
// expect(Character.isUnicodeIdentifierPart('\u08a9')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue4bd')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u208a')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf840')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf570')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uef1e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2bd4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue385')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udc18')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u0af0')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u244a')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf01e')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf114')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\ue9c4')).toBeFalsy
// BUG in JDK? AAF4 should be "Lm", Java says "Cn"
// expect(Character.isUnicodeIdentifierPart('\uaaf4')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\uf7b9')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\udd2f')).toBeFalsy
expect(Character.isUnicodeIdentifierPart('\u2d2c')).toBeFalsy
}
}
}
| matthughes/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/CharacterTest.scala | Scala | bsd-3-clause | 42,234 |
/*
* RobolabSim
* Copyright (C) 2014 Max Leuthaeuser
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*/
package tud.robolab.controller
import tud.robolab.{Config, Boot}
import tud.robolab.view.{Interface, SimulationView}
import tud.robolab.model._
import tud.robolab.utils.TimeUtils
import scala.concurrent._
import ExecutionContext.Implicits.global
import tud.robolab.testing.TestRunner
/** Handles incoming requests and sessions.
*
* See `handleQueryRequest`, `handlePathRequest` and `handleMapRequest`.
*/
object SessionController
{
private val sessions = new SessionPool()
var hide_swing = Config.HIDE_SWING
/**
* See [[tud.robolab.model.SessionPool]] for doc.
* @return all sessions stored via the [[tud.robolab.model.SessionPool]].
*/
def getSessions: SessionPool = sessions
/**
* @return all sessions as list stored via the [[tud.robolab.model.SessionPool]].
*/
def getSessionsAsList: List[Session] = getSessions.all.keys.toList
/**
* @return the amount of sessions stored.
*/
def numberOfSessions(): Int = sessions.all.size
/**
* @return if there is at least one [[tud.robolab.model.Session]] stored, false otherwise.
*/
def hasSessions: Boolean = sessions.all.isEmpty
/**
* @param s the [[tud.robolab.model.Session]] you want to get the asociated [[tud.robolab.view.SimulationView]] for.
* @return the [[tud.robolab.view.SimulationView]] the [[tud.robolab.model.Session]] `s` is associated with.
*/
def getView(s: Session): Option[SimulationView] = sessions.get(s)
/**
* Set the new [[tud.robolab.model.Session]] `s` and its [[tud.robolab.view.SimulationView]] `v`.
* @param s the new [[tud.robolab.model.Session]] to set
* @param v the new [[tud.robolab.view.SimulationView]] to set
*/
def set(
s: Session,
v: Option[SimulationView]
)
{
sessions.set(s, v)
}
/**
* @param id the group ID
* @return the [[tud.robolab.model.Session]] with the given IP `ip`.
*/
def getSession(id: String): Option[Session] = sessions.all.keys.find(_.client.id == id)
/**
* @param i the index
* @return the `i`th [[tud.robolab.model.Session]].
*/
def getSession(i: Int): Session =
{
assert(i >= 0 && i < sessions.all.size)
sessions.all.keys.toSeq(i)
}
/**
* @param id the group ID
* @return true if there is a [[tud.robolab.model.Session]] with the given IP `id` is stored, false otherwise.
*/
def hasSession(id: String): Boolean = sessions.all.keys.exists(_.client.id == id)
/**
* @param id id the group ID
* @return true if the [[tud.robolab.model.Session]] with the given IP `id` is blocked, false otherwise.
*/
private def sessionBlocked(id: String): Boolean = if (hasSession(id)) getSession(id).get.client.blocked else false
/**
* @param id remove the [[tud.robolab.model.Session]] with the given IP `id`.
*/
def removeSession(id: String)
{
if (hasSession(id)) {
val s = getSession(id).get
sessions.remove(s)
}
}
/**
* @param s the [[tud.robolab.model.Session]] to remove.
*/
def removeSession(s: Session)
{
sessions.remove(s)
}
/**
* @param s the [[tud.robolab.model.Session]] to add.
*/
def addSession(s: Session)
{
if (!hasSession(s.client.id)) {
val v: Option[SimulationView] = hide_swing match {
case true => Option.empty
case false => Option(new SimulationView(s, false))
}
sessions.set(s, v)
}
}
/**
* @param id create a nre [[tud.robolab.model.Session]] and add it to the [[tud.robolab.model.SessionPool]].
*/
def addSession(id: String): Boolean =
{
if (!hasSession(id) && !sessionBlocked(id)) {
val s = Session(Client(id))
hide_swing match {
case false =>
val v = new SimulationView(s)
if (Interface.addSimTab(v, id)) {
sessions.set(s, Option(v))
return true
}
case true =>
sessions.set(s, Option.empty)
return true
}
}
false
}
/**
* @param id the IP for the the [[tud.robolab.model.Session]] to block or unblock depending on `block`.
* @param block `true` means block the [[tud.robolab.model.Session]] `s`, false means unblock it.
*/
def blockSession(
id: String,
block: Boolean = true
)
{
if (!hasSession(id)) {
val s = Session(Client(id))
val v = new SimulationView(s)
v.isShown = false
sessions.set(s, Option(v))
}
sessions.block(getSession(id).get, block)
}
/**
* Handle the incoming request, calculating the new robot position
* and return the appropriate result.
*
* @param id the group ID
* @param r the [[tud.robolab.model.Request]]
* @return a [[tud.robolab.model.Message]] regarding to the result of this call.
*/
def handleQueryRequest(
id: String,
r: Request
): Message =
{
if (sessionBlocked(id)) return ErrorType.BLOCKED
if (!hasSession(id)) {
if (!addSession(id)) {
return ErrorType.DENIED
}
}
val s = getSession(id).get
val err = !s.maze.setRobot(Coordinate(r.x, r.y))
val token = err match {
case true => false
case false => s.maze.getPoint(Coordinate(r.x, r.y)).get.token
}
val wayElememt = WayElement(r.x, r.y, token, TimeUtils.nowAsString)
s.addHistoryElement(wayElememt)
if (err) {
return ErrorType.INVALID
}
else {
s.addWayElement(wayElememt)
}
val n = s.maze.getPoint(Coordinate(r.x, r.y)).get
val v = sessions.get(s)
v.foreach(view => {
view.updateSession()
if (!view.isShown) {
view.isShown = true
Interface.addSimTab(view, s.client.id, ask = false)
}
})
QueryResponseFactory.fromPoint(n)
}
/**
* Handle the incoming request, calculating the full robot path that is known until now
* and return the appropriate result.
*
* @param id the group ID
* @return a [[tud.robolab.model.Message]] containing the path regarding to the result of this call.
*/
def handlePathRequest(id: String): Message =
{
if (!hasSession(id)) return ErrorType.NO_PATH
if (sessionBlocked(id)) return ErrorType.BLOCKED
val s = getSession(id).get
PathResponse(
s.path.map(p => {
s.maze.getPoint(Coordinate(p.x, p.y)) match {
case Some(point) => (Request(p.x, p.y), QueryResponseFactory.fromPoint(point))
case None => throw new IllegalArgumentException
}
}))
}
/**
* Handle the incoming request, set the new map if possible and return the the appropriate result.
*
* @param id the group ID
* @param r the [[tud.robolab.model.MapRequest]]
* @return a [[tud.robolab.model.Message]] regarding to the result of this call.
*/
def handleMapRequest(
id: String,
r: MapRequest
): Message =
{
if (!hasSession(id)) return ErrorType.NO_PATH
if (sessionBlocked(id)) return ErrorType.BLOCKED
val s = getSession(id).get
MapController.changeMap(r.map, s, getView(s)) match {
case true => Ok()
case false => ErrorType.NO_MAP
}
}
/**
* Handle the incoming request, returning the current history if possible.
*
* @param id the group ID
* @return a [[tud.robolab.model.Message]] containing the history regarding to the result of this call.
*/
def handleHistoryRequest(id: String): Message =
{
if (!hasSession(id)) return ErrorType.NO_PATH
if (sessionBlocked(id)) return ErrorType.BLOCKED
val s = getSession(id).get
PathResponse(s.history.map(p => {
val point = s.maze.isValid(Coordinate(p.x, p.y)) match {
case true => s.maze.getPoint(Coordinate(p.x, p.y)).get
case false => Point(Seq.empty)
}
(Request(p.x, p.y), QueryResponseFactory.fromPoint(point))
}))
}
/**
* Handle the incoming request, return the the appropriate test result.
*
* @param id the group ID
* @return a [[tud.robolab.model.Message]] regarding to the result of this call.
*/
def handleTestRequest(id: String): Message =
{
if (!hasSession(id)) return ErrorType.NO_ID
if (sessionBlocked(id)) return ErrorType.BLOCKED
val s = getSession(id).get
val st = s.test.status
TestMessage(s.test.result, st match {
case TestResult.SUCCESS => true
case TestResult.FAILED => false
})
}
def handleRunTestRequest(
id: String
): Message =
{
if (!hasSession(id)) return ErrorType.NO_ID
if (sessionBlocked(id)) return ErrorType.BLOCKED
Future {
blocking {
TestRunner.run(id)
Boot.log.info("Done [Test] run for ID [%s]".format(id))
}
}
Ok()
}
def handleResetRequest(
id: String
): Message =
{
if (!hasSession(id)) return ErrorType.NO_ID
if (sessionBlocked(id)) return ErrorType.BLOCKED
val s = getSession(id).get
s.clearHistory()
s.clearWay()
s.maze = Maze.default
s.test = Test()
Ok()
}
def handleRemoveIDRequest(
id: String
): Message =
{
if (!hasSession(id)) return ErrorType.NO_ID
removeSession(id)
Ok()
}
}
| max-leuthaeuser/RobolabSim | server/src/main/scala/tud/robolab/controller/SessionController.scala | Scala | gpl-3.0 | 9,778 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import java.util.concurrent.{ScheduledExecutorService, TimeUnit}
import monix.execution.{Cancelable, Features, Scheduler, UncaughtExceptionReporter, ExecutionModel => ExecModel}
import scala.concurrent.ExecutionContext
import monix.execution.internal.{InterceptRunnable, ScheduledExecutors}
/** An `AsyncScheduler` schedules tasks to happen in the future with the
* given `ScheduledExecutorService` and the tasks themselves are executed on
* the given `ExecutionContext`.
*/
final class AsyncScheduler private (
scheduler: ScheduledExecutorService,
ec: ExecutionContext,
val executionModel: ExecModel,
r: UncaughtExceptionReporter)
extends ReferenceScheduler with BatchingScheduler {
protected def executeAsync(runnable: Runnable): Unit = {
if (((r: AnyRef) eq ec) || (r eq null)) ec.execute(runnable)
else ec.execute(InterceptRunnable(runnable, this))
}
override def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable =
ScheduledExecutors.scheduleOnce(this, scheduler)(initialDelay, unit, r)
override def reportFailure(t: Throwable): Unit =
if (r eq null) ec.reportFailure(t)
else r.reportFailure(t)
override def withExecutionModel(em: ExecModel): AsyncScheduler =
new AsyncScheduler(scheduler, ec, em, r)
override val features: Features =
Features(Scheduler.BATCHING)
}
object AsyncScheduler {
/** Builder for [[AsyncScheduler]].
*
* @param schedulerService is the Java `ScheduledExecutorService` that will take
* care of scheduling tasks for execution with a delay.
* @param ec is the execution context that will execute all runnables
* @param reporter is the [[UncaughtExceptionReporter]] that logs uncaught exceptions.
* @param executionModel is the preferred
* [[monix.execution.ExecutionModel ExecutionModel]],
* a guideline for run-loops and producers of data.
*/
def apply(
schedulerService: ScheduledExecutorService,
ec: ExecutionContext,
executionModel: ExecModel,
reporter: UncaughtExceptionReporter = null): AsyncScheduler =
new AsyncScheduler(schedulerService, ec, executionModel, reporter)
}
| alexandru/monifu | monix-execution/jvm/src/main/scala/monix/execution/schedulers/AsyncScheduler.scala | Scala | apache-2.0 | 2,887 |
/*
* Copyright 1998-2017 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.user
import java.sql.Timestamp
import java.util.Date
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.TcpClient
import com.sksamuel.elastic4s.searches.RichSearchResponse
import com.sksamuel.elastic4s.searches.queries.BoolQueryDefinition
import com.typesafe.scalalogging.StrictLogging
import org.elasticsearch.ElasticsearchException
import org.elasticsearch.search.aggregations.bucket.terms.Terms
import org.elasticsearch.search.aggregations.metrics.stats.Stats
import org.joda.time.DateTime
import org.springframework.stereotype.Service
import ru.org.linux.search.ElasticsearchIndexService.MessageIndexTypes
import ru.org.linux.section.{Section, SectionService}
import ru.org.linux.user.UserStatisticsService._
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
@Service
class UserStatisticsService(
userDao: UserDao,
ignoreListDao: IgnoreListDao,
sectionService: SectionService,
elastic: TcpClient
) extends StrictLogging {
def getStats(user:User): UserStats = {
val commentCountFuture = countComments(user)
val topicsFuture = topicStats(user)
val ignoreCount = ignoreListDao.getIgnoreStat(user)
val (firstComment, lastComment) = userDao.getFirstAndLastCommentDate(user)
try {
Await.ready(Future.sequence(Seq(commentCountFuture, topicsFuture)), ElasticTimeout)
} catch {
case _:TimeoutException =>
logger.warn("Stat lookup timed out")
}
val commentCount = extractValue(commentCountFuture.value) {
logger.warn("Unable to count comments", _)
}
val topicStat = extractValue(topicsFuture.value) {
logger.warn("Unable to count topics", _)
}
val topicsBySection = topicStat.map(_.sectionCount).getOrElse(Seq()).map(
e => PreparedUsersSectionStatEntry(sectionService.getSectionByName(e._1), e._2)
).sortBy(_.section.getId)
UserStats(
ignoreCount,
commentCount.getOrElse(0L),
commentCount.isEmpty || topicStat.isEmpty,
firstComment,
lastComment,
topicStat.flatMap(_.firstTopic).map(_.toDate).orNull,
topicStat.flatMap(_.lastTopic).map(_.toDate).orNull,
topicsBySection.asJava
)
}
private def timeoutHandler(response: RichSearchResponse): Future[RichSearchResponse] = {
if (response.isTimedOut) {
Future failed new RuntimeException("ES Request timed out")
} else {
Future successful response
}
}
private def statSearch = search(MessageIndexTypes) size 0 timeout ElasticTimeout
private def countComments(user: User): Future[Long] = {
try {
elastic execute {
val root = boolQuery() filter (
termQuery("author", user.getNick),
termQuery("is_comment", true))
statSearch query root
} flatMap timeoutHandler map { _.totalHits }
} catch {
case ex: ElasticsearchException => Future.failed(ex)
}
}
private def topicStats(user: User): Future[TopicStats] = {
try {
elastic execute {
val root = new BoolQueryDefinition filter (
termQuery("author", user.getNick),
termQuery("is_comment", false))
statSearch query root aggs(
statsAggregation("topic_stats") field "postdate",
termsAggregation("sections") field "section")
} flatMap timeoutHandler map { response ⇒
val topicStatsResult = response.aggregations.getAs[Stats]("topic_stats")
val sectionsResult = response.aggregations.getAs[Terms]("sections")
val (firstTopic, lastTopic) = if (topicStatsResult.getCount > 0) {
(Some(new DateTime(topicStatsResult.getMin.toLong)), Some(new DateTime(topicStatsResult.getMax.toLong)))
} else {
(None, None)
}
val sections = sectionsResult.getBuckets.asScala.map { bucket =>
(bucket.getKeyAsString, bucket.getDocCount)
}
TopicStats(firstTopic, lastTopic, sections)
}
} catch {
case ex: ElasticsearchException => Future.failed(ex)
}
}
}
object UserStatisticsService {
val ElasticTimeout = 1.second
private def extractValue[T](value:Option[Try[T]])(f:(Throwable => Unit)):Option[T] = {
value flatMap {
case Failure(ex) =>
f(ex)
None
case Success(count) =>
Some(count)
}
}
case class TopicStats(firstTopic:Option[DateTime], lastTopic:Option[DateTime], sectionCount:Seq[(String, Long)])
}
case class UserStats (
@BeanProperty ignoreCount: Int,
@BeanProperty commentCount: Long,
@BeanProperty incomplete: Boolean,
@BeanProperty firstComment: Timestamp,
@BeanProperty lastComment: Timestamp,
@BeanProperty firstTopic: Date,
@BeanProperty lastTopic: Date,
@BeanProperty topicsBySection: java.util.List[PreparedUsersSectionStatEntry]
)
case class PreparedUsersSectionStatEntry (
@BeanProperty section: Section,
@BeanProperty count: Long
)
| kloun/lorsource | src/main/scala/ru/org/linux/user/UserStatisticsService.scala | Scala | apache-2.0 | 5,711 |
package x7c1.wheat.harvest
import sbt.{file, richFile}
import x7c1.wheat.harvest.layout.LayoutLocations
import x7c1.wheat.harvest.values.ValuesLocations
object SampleLocations {
def packages = Packages(
starter = "x7c1.wheat.sample",
starterLayout = "x7c1.wheat.sample.res.layout",
starterValues = "x7c1.wheat.sample.res.values",
glueLayout = "x7c1.wheat.sample.glue.res.layout",
glueValues = "x7c1.wheat.sample.glue.res.values"
)
def directories = Directories(
starter = file("sample-project") / "sample-starter",
glue = file("sample-project") / "sample-glue"
)
def layout = LayoutLocations(HarvestLocations(
packages = packages,
directories = directories
))
def values = ValuesLocations(HarvestLocations(
packages = packages,
directories = directories
))
}
| x7c1/Wheat | wheat-harvest/src/test/scala/x7c1/wheat/harvest/SampleLocations.scala | Scala | mit | 825 |
/*
*
* * Copyright 2020 Lenses.io.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.datamountaineer.streamreactor.common.config.base.traits
import com.datamountaineer.streamreactor.common.config.base.const.TraitConfigConst._
/**
* Created by andrew@datamountaineer.com on 31/07/2017.
* stream-reactor
*/
trait SSLSettings extends BaseSettings {
val trustStorePath: String = s"$connectorPrefix.$TRUSTSTORE_PATH_SUFFIX"
val trustStorePass: String = s"$connectorPrefix.$TRUSTSTORE_PASS_SUFFIX"
val keyStorePath: String = s"$connectorPrefix.$KEYSTORE_PATH_SUFFIX"
val keyStorePass: String = s"$connectorPrefix.$KEYSTORE_PASS_SUFFIX"
val certificates: String = s"$connectorPrefix.$CERTIFICATES_SUFFIX"
val certificateKeyChain: String = s"$connectorPrefix.$CERTIFICATE_KEY_CHAIN_SUFFIX"
def getTrustStorePath = getString(trustStorePath)
def getTrustStorePass = getPassword(trustStorePass)
def getKeyStorePath = getString(keyStorePath)
def getKeyStorePass = getPassword(keyStorePass)
def getCertificates = getList(certificates)
def getCertificateKeyChain = getString(certificateKeyChain)
}
| datamountaineer/stream-reactor | kafka-connect-common/src/main/scala/com/datamountaineer/streamreactor/common/config/base/traits/SSLSettings.scala | Scala | apache-2.0 | 1,685 |
/*
* Copyright 2012 Eligotech BV.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eligosource.eventsourced.example.service
import scala.concurrent.stm.Ref
import akka.actor.Actor
import org.eligosource.eventsourced.core._
import org.eligosource.eventsourced.example.domain._
class StatisticsService(statisticsRef: Ref[Map[String, Int]]) {
def statistics = statisticsRef.single.get
}
class StatisticsProcessor(statisticsRef: Ref[Map[String, Int]]) extends Actor {
def receive = {
case InvoiceItemAdded(id, _) => statisticsRef.single.transform { statistics =>
statistics.get(id) match {
case Some(count) => statistics + (id -> (count + 1))
case None => statistics + (id -> 1)
}
}
}
}
| eligosource/eventsourced-example | src/main/scala/org/eligosource/eventsourced/example/service/StatisticsService.scala | Scala | apache-2.0 | 1,259 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.encoders.OuterScopes
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.SubExprUtils._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects.{LambdaVariable, MapObjects, NewInstance, UnresolvedMapObjects}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, _}
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]].
* Used for testing when all relations are already filled in and the analyzer needs only
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean) {}
},
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
* of analysis environment from the catalog.
*
* Note this is thread local.
*
* @param defaultDatabase The default database used in the view resolution, this overrules the
* current catalog database.
* @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the
* depth of nested views.
*/
case class AnalysisContext(
defaultDatabase: Option[String] = None,
nestedViewDepth: Int = 0)
object AnalysisContext {
private val value = new ThreadLocal[AnalysisContext]() {
override def initialValue: AnalysisContext = AnalysisContext()
}
def get: AnalysisContext = value.get()
private def set(context: AnalysisContext): Unit = value.set(context)
def withAnalysisContext[A](database: Option[String])(f: => A): A = {
val originContext = value.get()
val context = AnalysisContext(defaultDatabase = database,
nestedViewDepth = originContext.nestedViewDepth + 1)
set(context)
try f finally { set(originContext) }
}
}
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]].
*/
class Analyzer(
catalog: SessionCatalog,
conf: SQLConf,
maxIterations: Int)
extends RuleExecutor[LogicalPlan] with CheckAnalysis {
def this(catalog: SessionCatalog, conf: SQLConf) = {
this(catalog, conf, conf.optimizerMaxIterations)
}
def resolver: Resolver = conf.resolver
protected val fixedPoint = FixedPoint(maxIterations)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide rules to do post-hoc resolution. Note that these rules will be executed
* in an individual batch. This batch is to run right after the normal resolution batch and
* execute its rules in one pass.
*/
val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil
lazy val batches: Seq[Batch] = Seq(
Batch("Hints", fixedPoint,
new ResolveHints.ResolveBroadcastHints(conf),
ResolveHints.RemoveAllHints),
Batch("Simple Sanity Check", Once,
LookupFunctions),
Batch("Substitution", fixedPoint,
CTESubstitution,
WindowsSubstitution,
EliminateUnions,
new SubstituteUnresolvedOrdinals(conf)),
Batch("Resolution", fixedPoint,
ResolveTableValuedFunctions ::
ResolveRelations ::
ResolveReferences ::
ResolveCreateNamedStruct ::
ResolveDeserializer ::
ResolveNewInstance ::
ResolveUpCast ::
ResolveGroupingAnalytics ::
ResolvePivot ::
ResolveOrdinalInOrderByAndGroupBy ::
ResolveAggAliasInGroupBy ::
ResolveMissingReferences ::
ExtractGenerator ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ResolveSubquery ::
ResolveSubqueryColumnAliases ::
ResolveWindowOrder ::
ResolveWindowFrame ::
ResolveNaturalAndUsingJoin ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
TimeWindowing ::
ResolveInlineTables(conf) ::
ResolveTimeZone(conf) ::
TypeCoercion.typeCoercionRules ++
extendedResolutionRules : _*),
Batch("Post-Hoc Resolution", Once, postHocResolutionRules: _*),
Batch("View", Once,
AliasViewChild(conf)),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
HandleNullInputsForUDF),
Batch("FixNullability", Once,
FixNullability),
Batch("Subquery", Once,
UpdateOuterReferences),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* Analyze cte definitions and substitute child plan with analyzed cte definitions.
*/
object CTESubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case With(child, relations) =>
substituteCTE(child, relations.foldLeft(Seq.empty[(String, LogicalPlan)]) {
case (resolved, (name, relation)) =>
resolved :+ name -> execute(substituteCTE(relation, resolved))
})
case other => other
}
def substituteCTE(plan: LogicalPlan, cteRelations: Seq[(String, LogicalPlan)]): LogicalPlan = {
plan transformDown {
case u : UnresolvedRelation =>
cteRelations.find(x => resolver(x._1, u.tableIdentifier.table))
.map(_._2).getOrElse(u)
case other =>
// This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE.
other transformExpressions {
case e: SubqueryExpression =>
e.withNewPlan(substituteCTE(e.plan, cteRelations))
}
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) =>
child.transform {
case p => p.transformExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.zipWithIndex.map {
case (expr, i) =>
expr.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) =>
child match {
case ne: NamedExpression => ne
case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil)
case e if !e.resolved => u
case g: Generator => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)()
case e: ExtractValue => Alias(e, toPrettySQL(e))()
case e if optGenAliasFunc.isDefined =>
Alias(child, optGenAliasFunc.get.apply(e))()
case e => Alias(e, toPrettySQL(e))()
}
}
}.asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.copy(aggregations = assignAliases(g.aggregations))
case Pivot(groupByExprs, pivotColumn, pivotValues, aggregates, child)
if child.resolved && hasUnresolvedAlias(groupByExprs) =>
Pivot(assignAliases(groupByExprs), pivotColumn, pivotValues, aggregates, child)
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as sequence of expressions.
*/
def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toSeq
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as sequence of expressions.
*/
def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match {
case x :: xs =>
val initial = cubeExprs(xs)
initial.map(x +: _) ++ initial
case Nil =>
Seq(Seq.empty)
}
private def hasGroupingAttribute(expr: Expression): Boolean = {
expr.collectFirst {
case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) => u
}.isDefined
}
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
}.isDefined
}
private def replaceGroupingFunc(
expr: Expression,
groupByExprs: Seq[Expression],
gid: Expression): Expression = {
expr transform {
case e: GroupingID =>
if (e.groupByExprs.isEmpty || e.groupByExprs == groupByExprs) {
Alias(gid, toPrettySQL(e))()
} else {
throw new AnalysisException(
s"Columns of grouping_id (${e.groupByExprs.mkString(",")}) does not match " +
s"grouping columns (${groupByExprs.mkString(",")})")
}
case e @ Grouping(col: Expression) =>
val idx = groupByExprs.indexOf(col)
if (idx >= 0) {
Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)),
Literal(1)), ByteType), toPrettySQL(e))()
} else {
throw new AnalysisException(s"Column of grouping ($col) can't be found " +
s"in grouping columns ${groupByExprs.mkString(",")}")
}
}
}
/*
* Create new alias for all group by expressions for `Expand` operator.
*/
private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = {
groupByExprs.map {
case e: NamedExpression => Alias(e, e.name)()
case other => Alias(other, other.toString)()
}
}
/*
* Construct [[Expand]] operator with grouping sets.
*/
private def constructExpand(
selectedGroupByExprs: Seq[Seq[Expression]],
child: LogicalPlan,
groupByAliases: Seq[Alias],
gid: Attribute): LogicalPlan = {
// Change the nullability of group by aliases if necessary. For example, if we have
// GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we
// should change the nullabilty of b to be TRUE.
// TODO: For Cube/Rollup just set nullability to be `true`.
val expandedAttributes = groupByAliases.map { alias =>
if (selectedGroupByExprs.exists(!_.contains(alias.child))) {
alias.toAttribute.withNullability(true)
} else {
alias.toAttribute
}
}
val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs =>
groupingSetExprs.map { expr =>
val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse(
failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases"))
// Map alias to expanded attribute.
expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse(
alias.toAttribute)
}
}
Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child)
}
/*
* Construct new aggregate expressions by replacing grouping functions.
*/
private def constructAggregateExprs(
groupByExprs: Seq[Expression],
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
gid: Attribute): Seq[NamedExpression] = aggregations.map {
// collect all the found AggregateExpression, so we can check an expression is part of
// any AggregateExpression or not.
val aggsBuffer = ArrayBuffer[Expression]()
// Returns whether the expression belongs to any expressions in `aggsBuffer` or not.
def isPartOfAggregation(e: Expression): Boolean = {
aggsBuffer.exists(a => a.find(_ eq e).isDefined)
}
replaceGroupingFunc(_, groupByExprs, gid).transformDown {
// AggregateExpression should be computed on the unmodified value of its argument
// expressions, so we should not replace any references to grouping expression
// inside it.
case e: AggregateExpression =>
aggsBuffer += e
e
case e if isPartOfAggregation(e) => e
case e =>
// Replace expression by expand output attribute.
val index = groupByAliases.indexWhere(_.child.semanticEquals(e))
if (index == -1) {
e
} else {
groupingAttrs(index)
}
}.asInstanceOf[NamedExpression]
}
/*
* Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets.
*/
private def constructAggregate(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
aggregationExprs: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
val gid = AttributeReference(VirtualColumn.groupingIdName, IntegerType, false)()
// Expand works by setting grouping expressions to null as determined by the
// `selectedGroupByExprs`. To prevent these null values from being used in an aggregate
// instead of the original value we need to create new aliases for all group by expressions
// that will only be used for the intended purpose.
val groupByAliases = constructGroupByAlias(groupByExprs)
val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid)
val groupingAttrs = expand.output.drop(child.output.length)
val aggregations = constructAggregateExprs(
groupByExprs, aggregationExprs, groupByAliases, groupingAttrs, gid)
Aggregate(groupingAttrs, aggregations, expand)
}
private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = {
plan.collectFirst {
case a: Aggregate =>
// this Aggregate should have grouping id as the last grouping key.
val gid = a.groupingExpressions.last
if (!gid.isInstanceOf[AttributeReference]
|| gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
a.groupingExpressions.take(a.groupingExpressions.length - 1)
}.getOrElse {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
}
// This require transformUp to replace grouping()/grouping_id() in resolved Filter/Sort
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case a if !a.childrenResolved => a // be sure all of the children are resolved.
case p if p.expressions.exists(hasGroupingAttribute) =>
failAnalysis(
s"${VirtualColumn.hiveGroupingIdName} is deprecated; use grouping_id() instead")
// Ensure group by expressions and aggregate expressions have been resolved.
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
// Ensure all the expressions have been resolved.
case x: GroupingSets if x.expressions.forall(_.resolved) =>
constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child)
// We should make sure all expressions in condition have been resolved.
case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved =>
val groupingExprs = findGroupingExprs(child)
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute)
f.copy(condition = newCond)
// We should make sure all [[SortOrder]]s have been resolved.
case s @ Sort(order, _, child)
if order.exists(hasGroupingFunction) && order.forall(_.resolved) =>
val groupingExprs = findGroupingExprs(child)
val gid = VirtualColumn.groupingIdAttribute
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder])
s.copy(order = newOrder)
}
}
object ResolvePivot extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case p: Pivot if !p.childrenResolved | !p.aggregates.forall(_.resolved)
| !p.groupByExprs.forall(_.resolved) | !p.pivotColumn.resolved => p
case Pivot(groupByExprs, pivotColumn, pivotValues, aggregates, child) =>
val singleAgg = aggregates.size == 1
def outputName(value: Literal, aggregate: Expression): String = {
val utf8Value = Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
val stringValue: String = Option(utf8Value).map(_.toString).getOrElse("null")
if (singleAgg) {
stringValue
} else {
val suffix = aggregate match {
case n: NamedExpression => n.name
case _ => toPrettySQL(aggregate)
}
stringValue + "_" + suffix
}
}
if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) {
// Since evaluating |pivotValues| if statements for each input row can get slow this is an
// alternate plan that instead uses two steps of aggregation.
val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)())
val namedPivotCol = pivotColumn match {
case n: NamedExpression => n
case _ => Alias(pivotColumn, "__pivot_col")()
}
val bigGroup = groupByExprs :+ namedPivotCol
val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child)
val castPivotValues = pivotValues.map(Cast(_, pivotColumn.dataType).eval(EmptyRow))
val pivotAggs = namedAggExps.map { a =>
Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, castPivotValues)
.toAggregateExpression()
, "__pivot_" + a.sql)()
}
val groupByExprsAttr = groupByExprs.map(_.toAttribute)
val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg)
val pivotAggAttribute = pivotAggs.map(_.toAttribute)
val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) =>
aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) =>
Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))()
}
}
Project(groupByExprsAttr ++ pivotOutputs, secondAgg)
} else {
val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value =>
def ifExpr(expr: Expression) = {
If(EqualNullSafe(pivotColumn, value), expr, Literal(null))
}
aggregates.map { aggregate =>
val filteredAggregate = aggregate.transformDown {
// Assumption is the aggregate function ignores nulls. This is true for all current
// AggregateFunction's with the exception of First and Last in their default mode
// (which we handle) and possibly some Hive UDAF's.
case First(expr, _) =>
First(ifExpr(expr), Literal(true))
case Last(expr, _) =>
Last(ifExpr(expr), Literal(true))
case a: AggregateFunction =>
a.withNewChildren(a.children.map(ifExpr))
}.transform {
// We are duplicating aggregates that are now computing a different value for each
// pivot value.
// TODO: Don't construct the physical container until after analysis.
case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId)
}
if (filteredAggregate.fastEquals(aggregate)) {
throw new AnalysisException(
s"Aggregate expression required for pivot, found '$aggregate'")
}
Alias(filteredAggregate, outputName(value, aggregate))()
}
}
Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child)
}
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
// If the unresolved relation is running directly on files, we just return the original
// UnresolvedRelation, the plan will get resolved later. Else we look up the table from catalog
// and change the default database name(in AnalysisContext) if it is a view.
// We usually look up a table from the default database if the table identifier has an empty
// database part, for a view the default database should be the currentDb when the view was
// created. When the case comes to resolving a nested view, the view may have different default
// database with that the referenced view has, so we need to use
// `AnalysisContext.defaultDatabase` to track the current default database.
// When the relation we resolve is a view, we fetch the view.desc(which is a CatalogTable), and
// then set the value of `CatalogTable.viewDefaultDatabase` to
// `AnalysisContext.defaultDatabase`, we look up the relations that the view references using
// the default database.
// For example:
// |- view1 (defaultDatabase = db1)
// |- operator
// |- table2 (defaultDatabase = db1)
// |- view2 (defaultDatabase = db2)
// |- view3 (defaultDatabase = db3)
// |- view4 (defaultDatabase = db4)
// In this case, the view `view1` is a nested view, it directly references `table2`, `view2`
// and `view4`, the view `view2` references `view3`. On resolving the table, we look up the
// relations `table2`, `view2`, `view4` using the default database `db1`, and look up the
// relation `view3` using the default database `db2`.
//
// Note this is compatible with the views defined by older versions of Spark(before 2.2), which
// have empty defaultDatabase and all the relations in viewText have database part defined.
def resolveRelation(plan: LogicalPlan): LogicalPlan = plan match {
case u: UnresolvedRelation if !isRunningDirectlyOnFiles(u.tableIdentifier) =>
val defaultDatabase = AnalysisContext.get.defaultDatabase
val foundRelation = lookupTableFromCatalog(u, defaultDatabase)
resolveRelation(foundRelation)
// The view's child should be a logical plan parsed from the `desc.viewText`, the variable
// `viewText` should be defined, or else we throw an error on the generation of the View
// operator.
case view @ View(desc, _, child) if !child.resolved =>
// Resolve all the UnresolvedRelations and Views in the child.
val newChild = AnalysisContext.withAnalysisContext(desc.viewDefaultDatabase) {
if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) {
view.failAnalysis(s"The depth of view ${view.desc.identifier} exceeds the maximum " +
s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " +
"avoid errors. Increase the value of spark.sql.view.maxNestedViewDepth to work " +
"aroud this.")
}
execute(child)
}
view.copy(child = newChild)
case p @ SubqueryAlias(_, view: View) =>
val newChild = resolveRelation(view)
p.copy(child = newChild)
case _ => plan
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case i @ InsertIntoTable(u: UnresolvedRelation, parts, child, _, _) if child.resolved =>
EliminateSubqueryAliases(lookupTableFromCatalog(u)) match {
case v: View =>
u.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.")
case other => i.copy(table = other)
}
case u: UnresolvedRelation => resolveRelation(u)
}
// Look up the table with the given name from catalog. The database we used is decided by the
// precedence:
// 1. Use the database part of the table identifier, if it is defined;
// 2. Use defaultDatabase, if it is defined(In this case, no temporary objects can be used,
// and the default database is only used to look up a view);
// 3. Use the currentDb of the SessionCatalog.
private def lookupTableFromCatalog(
u: UnresolvedRelation,
defaultDatabase: Option[String] = None): LogicalPlan = {
val tableIdentWithDb = u.tableIdentifier.copy(
database = u.tableIdentifier.database.orElse(defaultDatabase))
try {
catalog.lookupRelation(tableIdentWithDb)
} catch {
case _: NoSuchTableException =>
u.failAnalysis(s"Table or view not found: ${tableIdentWithDb.unquotedString}")
// If the database is defined and that database is not found, throw an AnalysisException.
// Note that if the database is not defined, it is possible we are looking up a temp view.
case e: NoSuchDatabaseException =>
u.failAnalysis(s"Table or view not found: ${tableIdentWithDb.unquotedString}, the " +
s"database ${e.db} doesn't exsits.")
}
}
// If the database part is specified, and we support running SQL directly on files, and
// it's not a temporary view, and the table does not exist, then let's just return the
// original UnresolvedRelation. It is possible we are matching a query like "select *
// from parquet.`/path/to/query`". The plan will get resolved in the rule `ResolveDataSource`.
// Note that we are testing (!db_exists || !table_exists) because the catalog throws
// an exception from tableExists if the database does not exist.
private def isRunningDirectlyOnFiles(table: TableIdentifier): Boolean = {
table.database.isDefined && conf.runSQLonFile && !catalog.isTemporaryTable(table) &&
(!catalog.databaseExists(table.database.get) || !catalog.tableExists(table))
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
/**
* Generate a new logical plan for the right child with different expression IDs
* for all conflicting attributes.
*/
private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " +
s"between $left and $right")
right.collect {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
(oldVersion, newVersion)
case oldVersion: SerializeFromObject
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(serializer = oldVersion.serializer.map(_.newInstance())))
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(projectList = newAliases(projectList)))
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(aggregateExpressions = newAliases(aggregateExpressions)))
case oldVersion: Generate
if oldVersion.generatedSet.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
(oldVersion, oldVersion.copy(generatorOutput = newOutput))
case oldVersion @ Window(windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
(oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions)))
}
// Only handle first case, others will be fixed on the next pass.
.headOption match {
case None =>
/*
* No result implies that there is a logical plan node that produces new references
* that this rule cannot handle. When that is the case, there must be another rule
* that resolves these conflicts. Otherwise, the analysis will fail.
*/
right
case Some((oldRelation, newRelation)) =>
val attributeRewrites = AttributeMap(oldRelation.output.zip(newRelation.output))
val newRight = right transformUp {
case r if r == oldRelation => newRelation
} transformUp {
case other => other transformExpressions {
case a: Attribute =>
dedupAttr(a, attributeRewrites)
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attributeRewrites))
}
}
newRight
}
}
private def dedupAttr(attr: Attribute, attrMap: AttributeMap[Attribute]): Attribute = {
attrMap.get(attr).getOrElse(attr).withQualifier(attr.qualifier)
}
/**
* The outer plan may have been de-duplicated and the function below updates the
* outer references to refer to the de-duplicated attributes.
*
* For example (SQL):
* {{{
* SELECT * FROM t1
* INTERSECT
* SELECT * FROM t1
* WHERE EXISTS (SELECT 1
* FROM t2
* WHERE t1.c1 = t2.c1)
* }}}
* Plan before resolveReference rule.
* 'Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- 'Project [*]
* +- Filter exists#257 [c1#245]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#245) = c1#251)
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#245,c2#246] parquet
* Plan after the resolveReference rule.
* Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- Project [c1#259, c2#260]
* +- Filter exists#257 [c1#259]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#259) = c1#251) => Updated
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#259,c2#260] parquet => Outer plan's attributes are de-duplicated.
*/
private def dedupOuterReferencesInSubquery(
plan: LogicalPlan,
attrMap: AttributeMap[Attribute]): LogicalPlan = {
plan transformDown { case currentFragment =>
currentFragment transformExpressions {
case OuterReference(a: Attribute) =>
OuterReference(dedupAttr(a, attrMap))
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attrMap))
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p: Project if containsStar(p.projectList) =>
p.copy(projectList = buildExpandedProjectList(p.projectList, p.child))
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) {
failAnalysis(
"Star (*) is not allowed in select list when GROUP BY ordinal position is used")
} else {
a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child))
}
// If the script transformation input contains Stars, expand it.
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child, resolver)
case o => o :: Nil
}
)
case g: Generate if containsStar(g.generator.children) =>
failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF")
// To resolve duplicate expression IDs for Join and Intersect
case j @ Join(left, right, _, _) if !j.duplicateResolved =>
j.copy(right = dedupRight(left, right))
case i @ Intersect(left, right) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
case i @ Except(left, right) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on its descendants
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering =
ordering.map(order => resolveExpression(order, child).asInstanceOf[SortOrder])
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g
case g @ Generate(generator, join, outer, qualifier, output, child) =>
val newG = resolveExpression(generator, child, throws = true)
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
// Skips plan which contains deserializer expressions, as they should be resolved by another
// rule: ResolveDeserializer.
case plan if containsDeserializer(plan.expressions) => plan
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString}")
q.transformExpressionsUp {
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result = withPosition(u) { q.resolveChildren(nameParts, resolver).getOrElse(u) }
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
}
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
/**
* Build a project list for Project/Aggregate and expand the star if possible
*/
private def buildExpandedProjectList(
exprs: Seq[NamedExpression],
child: LogicalPlan): Seq[NamedExpression] = {
exprs.flatMap {
// Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*")
case s: Star => s.expand(child, resolver)
// Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b
case UnresolvedAlias(s: Star, _) => s.expand(child, resolver)
case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil
case o => o :: Nil
}.map(_.asInstanceOf[NamedExpression])
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
/**
* Expands the matching attribute.*'s in `child`'s output.
*/
def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = {
expr.transformUp {
case f1: UnresolvedFunction if containsStar(f1.children) =>
f1.copy(children = f1.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case c: CreateNamedStruct if containsStar(c.valExprs) =>
val newChildren = c.children.grouped(2).flatMap {
case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children
case kv => kv
}
c.copy(children = newChildren.toList )
case c: CreateArray if containsStar(c.children) =>
c.copy(children = c.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: Murmur3Hash if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
// count(*) has been replaced by count(1)
case o if containsStar(o.children) =>
failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'")
}
}
}
private def containsDeserializer(exprs: Seq[Expression]): Boolean = {
exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined)
}
protected[sql] def resolveExpression(
expr: Expression,
plan: LogicalPlan,
throws: Boolean = false) = {
// Resolve expression in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
expr transformUp {
case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal)
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) { plan.resolve(nameParts, resolver).getOrElse(u) }
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
} catch {
case a: AnalysisException if !throws => expr
}
}
/**
* In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by
* clauses. This rule is to convert ordinal positions to the corresponding expressions in the
* select list. This support is introduced in Spark 2.0.
*
* - When the sort references or group by expressions are not integer but foldable expressions,
* just ignore them.
* - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position
* numbers too.
*
* Before the release of Spark 2.0, the literals in order/sort by and group by clauses
* have no effect on the results.
*/
object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.childrenResolved => p
// Replace the index with the related attribute for ORDER BY,
// which is a 1-base position of the projection list.
case Sort(orders, global, child)
if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) =>
val newOrders = orders map {
case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) =>
if (index > 0 && index <= child.output.size) {
SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty)
} else {
s.failAnalysis(
s"ORDER BY position $index is not in select list " +
s"(valid range is [1, ${child.output.size}])")
}
case o => o
}
Sort(newOrders, global, child)
// Replace the index with the corresponding expression in aggregateExpressions. The index is
// a 1-base position of aggregateExpressions, which is output columns (select expression)
case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedOrdinal]) =>
val newGroups = groups.map {
case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size =>
aggs(index - 1)
case ordinal @ UnresolvedOrdinal(index) =>
ordinal.failAnalysis(
s"GROUP BY position $index is not in select list " +
s"(valid range is [1, ${aggs.size}])")
case o => o
}
Aggregate(newGroups, aggs, child)
}
}
/**
* Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses.
* This rule is expected to run after [[ResolveReferences]] applied.
*/
object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] {
// This is a strict check though, we put this to apply the rule only if the expression is not
// resolvable by child.
private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = {
!child.output.exists(a => resolver(a.name, attrName))
}
private def mayResolveAttrByAggregateExprs(
exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = {
exprs.map { _.transform {
case u: UnresolvedAttribute if notResolvableByChild(u.name, child) =>
aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u)
}}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case agg @ Aggregate(groups, aggs, child)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(!_.resolved) =>
agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child))
case gs @ GroupingSets(selectedGroups, groups, child, aggs)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedAttribute]) =>
gs.copy(
selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)),
groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child))
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*
* The HAVING clause could also used a grouping columns that is not presented in the SELECT.
*/
object ResolveMissingReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
// Skip sort with aggregate. This will be handled in ResolveAggregateFunctions
case sa @ Sort(_, _, child: Aggregate) => sa
case s @ Sort(order, _, child) if !s.resolved && child.resolved =>
try {
val newOrder = order.map(resolveExpressionRecursively(_, child).asInstanceOf[SortOrder])
val requiredAttrs = AttributeSet(newOrder).filter(_.resolved)
val missingAttrs = requiredAttrs -- child.outputSet
if (missingAttrs.nonEmpty) {
// Add missing attributes and then project them away after the sort.
Project(child.output,
Sort(newOrder, s.global, addMissingAttr(child, missingAttrs)))
} else if (newOrder != order) {
s.copy(order = newOrder)
} else {
s
}
} catch {
// Attempting to resolve it might fail. When this happens, return the original plan.
// Users will see an AnalysisException for resolution failure of missing attributes
// in Sort
case ae: AnalysisException => s
}
case f @ Filter(cond, child) if !f.resolved && child.resolved =>
try {
val newCond = resolveExpressionRecursively(cond, child)
val requiredAttrs = newCond.references.filter(_.resolved)
val missingAttrs = requiredAttrs -- child.outputSet
if (missingAttrs.nonEmpty) {
// Add missing attributes and then project them away.
Project(child.output,
Filter(newCond, addMissingAttr(child, missingAttrs)))
} else if (newCond != cond) {
f.copy(condition = newCond)
} else {
f
}
} catch {
// Attempting to resolve it might fail. When this happens, return the original plan.
// Users will see an AnalysisException for resolution failure of missing attributes
case ae: AnalysisException => f
}
}
/**
* Add the missing attributes into projectList of Project/Window or aggregateExpressions of
* Aggregate.
*/
private def addMissingAttr(plan: LogicalPlan, missingAttrs: AttributeSet): LogicalPlan = {
if (missingAttrs.isEmpty) {
return plan
}
plan match {
case p: Project =>
val missing = missingAttrs -- p.child.outputSet
Project(p.projectList ++ missingAttrs, addMissingAttr(p.child, missing))
case a: Aggregate =>
// all the missing attributes should be grouping expressions
// TODO: push down AggregateExpression
missingAttrs.foreach { attr =>
if (!a.groupingExpressions.exists(_.semanticEquals(attr))) {
throw new AnalysisException(s"Can't add $attr to ${a.simpleString}")
}
}
val newAggregateExpressions = a.aggregateExpressions ++ missingAttrs
a.copy(aggregateExpressions = newAggregateExpressions)
case g: Generate =>
// If join is false, we will convert it to true for getting from the child the missing
// attributes that its child might have or could have.
val missing = missingAttrs -- g.child.outputSet
g.copy(join = true, child = addMissingAttr(g.child, missing))
case d: Distinct =>
throw new AnalysisException(s"Can't add $missingAttrs to $d")
case u: UnaryNode =>
u.withNewChildren(addMissingAttr(u.child, missingAttrs) :: Nil)
case other =>
throw new AnalysisException(s"Can't add $missingAttrs to $other")
}
}
/**
* Resolve the expression on a specified logical plan and it's child (recursively), until
* the expression is resolved or meet a non-unary node or Subquery.
*/
@tailrec
private def resolveExpressionRecursively(expr: Expression, plan: LogicalPlan): Expression = {
val resolved = resolveExpression(expr, plan)
if (resolved.resolved) {
resolved
} else {
plan match {
case u: UnaryNode if !u.isInstanceOf[SubqueryAlias] =>
resolveExpressionRecursively(resolved, u.child)
case other => resolved
}
}
}
}
/**
* Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the
* function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It
* only performs simple existence check according to the function identifier to quickly identify
* undefined functions without triggering relation resolution, which may incur potentially
* expensive partition/schema discovery process in some cases.
*
* @see [[ResolveFunctions]]
* @see https://issues.apache.org/jira/browse/SPARK-19737
*/
object LookupFunctions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressions {
case f: UnresolvedFunction if !catalog.functionExists(f.name) =>
withPosition(f) {
throw new NoSuchFunctionException(f.name.database.getOrElse("default"), f.name.funcName)
}
}
}
/**
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u @ UnresolvedGenerator(name, children) =>
withPosition(u) {
catalog.lookupFunction(name, children) match {
case generator: Generator => generator
case other =>
failAnalysis(s"$name is expected to be a generator. However, " +
s"its class is ${other.getClass.getCanonicalName}, which is not a generator.")
}
}
case u @ UnresolvedFunction(funcId, children, isDistinct) =>
withPosition(u) {
catalog.lookupFunction(funcId, children) match {
// AggregateWindowFunctions are AggregateFunctions that can only be evaluated within
// the context of a Window clause. They do not need to be wrapped in an
// AggregateExpression.
case wf: AggregateWindowFunction =>
if (isDistinct) {
failAnalysis(s"${wf.prettyName} does not support the modifier DISTINCT")
} else {
wf
}
// We get an aggregate function, we need to wrap it in an AggregateExpression.
case agg: AggregateFunction => AggregateExpression(agg, Complete, isDistinct)
// This function is not an aggregate function, just return the resolved one.
case other =>
if (isDistinct) {
failAnalysis(s"${other.prettyName} does not support the modifier DISTINCT")
} else {
other
}
}
}
}
}
}
/**
* This rule resolves and rewrites subqueries inside expressions.
*
* Note: CTEs are handled in CTESubstitution.
*/
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
/**
* Resolve the correlated expressions in a subquery by using the an outer plans' references. All
* resolved outer references are wrapped in an [[OuterReference]]
*/
private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = {
plan transformDown {
case q: LogicalPlan if q.childrenResolved && !q.resolved =>
q transformExpressions {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) {
try {
outer.resolve(nameParts, resolver) match {
case Some(outerAttr) => OuterReference(outerAttr)
case None => u
}
} catch {
case _: AnalysisException => u
}
}
}
}
}
/**
* Resolves the subquery plan that is referenced in a subquery expression. The normal
* attribute references are resolved using regular analyzer and the outer references are
* resolved from the outer plans using the resolveOuterReferences method.
*
* Outer references from the correlated predicates are updated as children of
* Subquery expression.
*/
private def resolveSubQuery(
e: SubqueryExpression,
plans: Seq[LogicalPlan])(
f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = {
// Step 1: Resolve the outer expressions.
var previous: LogicalPlan = null
var current = e.plan
do {
// Try to resolve the subquery plan using the regular analyzer.
previous = current
current = execute(current)
// Use the outer references to resolve the subquery plan if it isn't resolved yet.
val i = plans.iterator
val afterResolve = current
while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) {
current = resolveOuterReferences(current, i.next())
}
} while (!current.resolved && !current.fastEquals(previous))
// Step 2: If the subquery plan is fully resolved, pull the outer references and record
// them as children of SubqueryExpression.
if (current.resolved) {
// Record the outer references as children of subquery expression.
f(current, SubExprUtils.getOuterReferences(current))
} else {
e.withNewPlan(current)
}
}
/**
* Resolves the subquery. Apart of resolving the subquery and outer references (if any)
* in the subquery plan, the children of subquery expression are updated to record the
* outer references. This is needed to make sure
* (1) The column(s) referred from the outer query are not pruned from the plan during
* optimization.
* (2) Any aggregate expression(s) that reference outer attributes are pushed down to
* outer plan to get evaluated.
*/
private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = {
plan transformExpressions {
case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId))
case e @ Exists(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(e, plans)(Exists(_, _, exprId))
case In(value, Seq(l @ ListQuery(sub, _, exprId))) if value.resolved && !sub.resolved =>
val expr = resolveSubQuery(l, plans)(ListQuery(_, _, exprId))
In(value, Seq(expr))
}
}
/**
* Resolve and rewrite all subqueries in an operator tree..
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
// In case of HAVING (a filter after an aggregate) we use both the aggregate and
// its child for resolution.
case f @ Filter(_, a: Aggregate) if f.childrenResolved =>
resolveSubQueries(f, Seq(a, a.child))
// Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries.
case q: UnaryNode if q.childrenResolved =>
resolveSubQueries(q, q.children)
}
}
/**
* Replaces unresolved column aliases for a subquery with projections.
*/
object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved =>
// Resolves output attributes if a query has alias names in its subquery:
// e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2)
val outputAttrs = child.output
// Checks if the number of the aliases equals to the number of output columns
// in the subquery.
if (columnNames.size != outputAttrs.size) {
u.failAnalysis("Number of column aliases does not match number of columns. " +
s"Number of column aliases: ${columnNames.size}; " +
s"number of columns: ${outputAttrs.size}.")
}
val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) =>
Alias(attr, aliasName)()
}
Project(aliases, child)
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
// Collect all Windowed Aggregate Expressions.
val windowedAggExprs = exprs.flatMap { expr =>
expr.collect {
case WindowExpression(ae: AggregateExpression, _) => ae
}
}.toSet
// Find the first Aggregate Expression that is not Windowed.
exprs.exists(_.collectFirst {
case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae
}.isDefined)
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case filter @ Filter(havingCondition,
aggregate @ Aggregate(grouping, originalAggExprs, child))
if aggregate.resolved =>
// Try resolving the condition of the filter as though it is in the aggregate clause
try {
val aggregatedCondition =
Aggregate(
grouping,
Alias(havingCondition, "havingCondition")() :: Nil,
child)
val resolvedOperator = execute(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved) {
// Try to replace all aggregate expressions in the filter by an alias.
val aggregateExpressions = ArrayBuffer.empty[NamedExpression]
val transformedAggregateFilter = resolvedAggregateFilter.transform {
case ae: AggregateExpression =>
val alias = Alias(ae, ae.toString)()
aggregateExpressions += alias
alias.toAttribute
// Grouping functions are handled in the rule [[ResolveGroupingAnalytics]].
case e: Expression if grouping.exists(_.semanticEquals(e)) &&
!ResolveGroupingAnalytics.hasGroupingFunction(e) &&
!aggregate.output.exists(_.semanticEquals(e)) =>
e match {
case ne: NamedExpression =>
aggregateExpressions += ne
ne.toAttribute
case _ =>
val alias = Alias(e, e.toString)()
aggregateExpressions += alias
alias.toAttribute
}
}
// Push the aggregate expressions into the aggregate (if any).
if (aggregateExpressions.nonEmpty) {
Project(aggregate.output,
Filter(transformedAggregateFilter,
aggregate.copy(aggregateExpressions = originalAggExprs ++ aggregateExpressions)))
} else {
filter
}
} else {
filter
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => filter
}
case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
val unresolvedSortOrders = sortOrder.filter(s => !s.resolved || containsAggregate(s))
val aliasedOrdering =
unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate = execute(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(
CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(sortOrder).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
val sortOrdersMap = unresolvedSortOrders
.map(new TreeNodeRef(_))
.zip(evaluatedOrderings)
.toMap
val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s))
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == finalSortOrders) {
sort
} else {
Project(aggregate.output,
Sort(finalSortOrders, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
}
/**
* Extracts [[Generator]] from the projectList of a [[Project]] operator and create [[Generate]]
* operator under [[Project]].
*
* This rule will throw [[AnalysisException]] for following cases:
* 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl`
* 2. more than one [[Generator]] is found in projectList,
* e.g. `SELECT explode(list), explode(list) FROM tbl`
* 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]],
* e.g. `SELECT * FROM tbl SORT BY explode(list)`
*/
object ExtractGenerator extends Rule[LogicalPlan] {
private def hasGenerator(expr: Expression): Boolean = {
expr.find(_.isInstanceOf[Generator]).isDefined
}
private def hasNestedGenerator(expr: NamedExpression): Boolean = expr match {
case UnresolvedAlias(_: Generator, _) => false
case Alias(_: Generator, _) => false
case MultiAlias(_: Generator, _) => false
case other => hasGenerator(other)
}
private def trimAlias(expr: NamedExpression): Expression = expr match {
case UnresolvedAlias(child, _) => child
case Alias(child, _) => child
case MultiAlias(child, _) => child
case _ => expr
}
private object AliasedGenerator {
/**
* Extracts a [[Generator]] expression, any names assigned by aliases to the outputs
* and the outer flag. The outer flag is used when joining the generator output.
* @param e the [[Expression]]
* @return (the [[Generator]], seq of output names, outer flag)
*/
def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match {
case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true))
case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true))
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false))
case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false))
case _ => None
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, _) if projectList.exists(hasNestedGenerator) =>
val nestedGenerator = projectList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Project(projectList, _) if projectList.count(hasGenerator) > 1 =>
val generators = projectList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per select clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList.flatMap {
case AliasedGenerator(generator, names, outer) if generator.childrenResolved =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(resolvedGenerator == null, "More than one generator found in SELECT.")
resolvedGenerator =
Generate(
generator,
join = projectList.size > 1, // Only join if there are other expressions in SELECT.
outer = outer,
qualifier = None,
generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
case g: Generate => g
case p if p.expressions.exists(hasGenerator) =>
throw new AnalysisException("Generators are not supported outside the SELECT clause, but " +
"got: " + p.simpleString)
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]].
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case g: Generate if !g.child.resolved || !g.generator.resolved => g
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
if (names.length == elementAttrs.length) {
names.zip(elementAttrs).map {
case (name, attr) => attr.withName(name)
}
} else if (names.isEmpty) {
elementAttrs
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementAttrs.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Fixes nullability of Attributes in a resolved LogicalPlan by using the nullability of
* corresponding Attributes of its children output Attributes. This step is needed because
* users can use a resolved AttributeReference in the Dataset API and outer joins
* can change the nullability of an AttribtueReference. Without the fix, a nullable column's
* nullable field can be actually set as non-nullable, which cause illegal optimization
* (e.g., NULL propagation) and wrong answers.
* See SPARK-13484 and SPARK-13801 for the concrete queries of this case.
*/
object FixNullability extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p: LogicalPlan if p.resolved =>
val childrenOutput = p.children.flatMap(c => c.output).groupBy(_.exprId).flatMap {
case (exprId, attributes) =>
// If there are multiple Attributes having the same ExprId, we need to resolve
// the conflict of nullable field. We do not really expect this happen.
val nullable = attributes.exists(_.nullable)
attributes.map(attr => attr.withNullability(nullable))
}.toSeq
// At here, we create an AttributeMap that only compare the exprId for the lookup
// operation. So, we can find the corresponding input attribute's nullability.
val attributeMap = AttributeMap[Attribute](childrenOutput.map(attr => attr -> attr))
// For an Attribute used by the current LogicalPlan, if it is from its children,
// we fix the nullable field by using the nullability setting of the corresponding
// output Attribute from the children.
p.transformExpressions {
case attr: Attribute if attributeMap.contains(attr) =>
attr.withNullability(attributeMap(attr).nullable)
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s.
* 3. For every distinct [[WindowSpecDefinition]], creates a [[Window]] operator and inserts
* it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
private def hasWindowFunction(projectList: Seq[NamedExpression]): Boolean =
projectList.exists(hasWindowFunction)
private def hasWindowFunction(expr: NamedExpression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at least one window expression,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
// alias will be cleaned in the rule CleanupAliases
ne
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an internal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val seenWindowAggregates = new ArrayBuffer[AggregateExpression]
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr)
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
// Extract Windowed AggregateExpression
case we @ WindowExpression(
ae @ AggregateExpression(function, _, _, _),
spec: WindowSpecDefinition) =>
val newChildren = function.children.map(extractExpr)
val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
val newAgg = ae.copy(aggregateFunction = newFunction)
seenWindowAggregates += newAgg
WindowExpression(newAgg, spec)
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression if !seenWindowAggregates.contains(agg) =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.isEmpty) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
(spec.partitionSpec, spec.orderSpec)
}
}.toSeq
// Third, we aggregate them by adding each Window operator for each Window Spec and then
// setting this to the child of the next Window operator.
val windowOps =
groupedWindowExpressions.foldLeft(child) {
case (last, ((partitionSpec, orderSpec), windowExpressions)) =>
Window(windowExpressions, partitionSpec, orderSpec, last)
}
// Finally, we create a Project to output windowOps's output
// newExpressionsWithWindowFunctions.
Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map(_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(a.groupingExpressions)
val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child)
a.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}.copy(child = newChild)
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(p.expressions)
val newPlan = p.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = {
exprs.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect { case n: Nondeterministic => n }
leafNondeterministic.distinct.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
e -> ne
}
}.toMap
}
}
/**
* Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the
* null check. When user defines a UDF with primitive parameters, there is no way to tell if the
* primitive parameter is null or not, so here we assume the primitive input is null-propagatable
* and we should return null if the input is null.
*/
object HandleNullInputsForUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf @ ScalaUDF(func, _, inputs, _, _, _, _) =>
val parameterTypes = ScalaReflection.getParameterTypes(func)
assert(parameterTypes.length == inputs.length)
val inputsNullCheck = parameterTypes.zip(inputs)
// TODO: skip null handling for not-nullable primitive inputs after we can completely
// trust the `nullable` information.
// .filter { case (cls, expr) => cls.isPrimitive && expr.nullable }
.filter { case (cls, _) => cls.isPrimitive }
.map { case (_, expr) => IsNull(expr) }
.reduceLeftOption[Expression]((e1, e2) => Or(e1, e2))
inputsNullCheck.map(If(_, Literal.create(null, udf.dataType), udf)).getOrElse(udf)
}
}
}
/**
* Check and add proper window frames for all window functions.
*/
object ResolveWindowFrame extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case logical: LogicalPlan => logical transformExpressions {
case WindowExpression(wf: WindowFunction,
WindowSpecDefinition(_, _, f: SpecifiedWindowFrame))
if wf.frame != UnspecifiedFrame && wf.frame != f =>
failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}")
case WindowExpression(wf: WindowFunction,
s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if wf.frame != UnspecifiedFrame =>
WindowExpression(wf, s.copy(frameSpecification = wf.frame))
case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if e.resolved =>
val frame = SpecifiedWindowFrame.defaultWindowFrame(o.nonEmpty, acceptWindowFrame = true)
we.copy(windowSpec = s.copy(frameSpecification = frame))
}
}
}
/**
* Check and add order to [[AggregateWindowFunction]]s.
*/
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case logical: LogicalPlan => logical transformExpressions {
case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty =>
failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " +
s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " +
s"ORDER BY window_ordering) from table")
case WindowExpression(rank: RankLike, spec) if spec.resolved =>
val order = spec.orderSpec.map(_.child)
WindowExpression(rank.withOrder(order), spec)
}
}
}
/**
* Removes natural or using joins by calculating output columns based on output from two sides,
* Then apply a Project on a normal Join to eliminate natural or using join.
*/
object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case j @ Join(left, right, UsingJoin(joinType, usingCols), condition)
if left.resolved && right.resolved && j.duplicateResolved =>
commonNaturalJoinProcessing(left, right, joinType, usingCols, None)
case j @ Join(left, right, NaturalJoin(joinType), condition) if j.resolvedExceptNatural =>
// find common column names from both sides
val joinNames = left.output.map(_.name).intersect(right.output.map(_.name))
commonNaturalJoinProcessing(left, right, joinType, joinNames, condition)
}
}
private def commonNaturalJoinProcessing(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
joinNames: Seq[String],
condition: Option[Expression]) = {
val leftKeys = joinNames.map { keyName =>
left.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the left " +
s"side of the join. The left-side columns: [${left.output.map(_.name).mkString(", ")}]")
}
}
val rightKeys = joinNames.map { keyName =>
right.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the right " +
s"side of the join. The right-side columns: [${right.output.map(_.name).mkString(", ")}]")
}
}
val joinPairs = leftKeys.zip(rightKeys)
val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And)
// columns not in joinPairs
val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att))
val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att))
// the output list looks like: join keys, columns from left, columns from right
val projectList = joinType match {
case LeftOuter =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true))
case LeftExistence(_) =>
leftKeys ++ lUniqueOutput
case RightOuter =>
rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput
case FullOuter =>
// in full outer join, joinCols should be non-null if there is.
val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() }
joinedCols ++
lUniqueOutput.map(_.withNullability(true)) ++
rUniqueOutput.map(_.withNullability(true))
case _ : InnerLike =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput
case _ =>
sys.error("Unsupported natural join type " + joinType)
}
// use Project to trim unnecessary fields
Project(projectList, Join(left, right, joinType, newCondition))
}
/**
* Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved
* to the given input attributes.
*/
object ResolveDeserializer extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case UnresolvedDeserializer(deserializer, inputAttributes) =>
val inputs = if (inputAttributes.isEmpty) {
p.children.flatMap(_.output)
} else {
inputAttributes
}
validateTopLevelTupleFields(deserializer, inputs)
val resolved = resolveExpression(
deserializer, LocalRelation(inputs), throws = true)
val result = resolved transformDown {
case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved =>
inputData.dataType match {
case ArrayType(et, cn) =>
val expr = MapObjects(func, inputData, et, cn, cls) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
expr
case other =>
throw new AnalysisException("need an array field but got " + other.simpleString)
}
}
validateNestedTupleFields(result)
result
}
}
private def fail(schema: StructType, maxOrdinal: Int): Unit = {
throw new AnalysisException(s"Try to map ${schema.simpleString} to Tuple${maxOrdinal + 1}, " +
"but failed as the number of fields does not line up.")
}
/**
* For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column
* by position. However, the actual number of columns may be different from the number of Tuple
* fields. This method is used to check the number of columns and fields, and throw an
* exception if they do not match.
*/
private def validateTopLevelTupleFields(
deserializer: Expression, inputs: Seq[Attribute]): Unit = {
val ordinals = deserializer.collect {
case GetColumnByOrdinal(ordinal, _) => ordinal
}.distinct.sorted
if (ordinals.nonEmpty && ordinals != inputs.indices) {
fail(inputs.toStructType, ordinals.last)
}
}
/**
* For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field
* by position. However, the actual number of struct fields may be different from the number
* of nested Tuple fields. This method is used to check the number of struct fields and nested
* Tuple fields, and throw an exception if they do not match.
*/
private def validateNestedTupleFields(deserializer: Expression): Unit = {
val structChildToOrdinals = deserializer
// There are 2 kinds of `GetStructField`:
// 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property.
// 2. created when we build deserializer expression for nested tuple, no `name` property.
// Here we want to validate the ordinals of nested tuple, so we should only catch
// `GetStructField` without the name property.
.collect { case g: GetStructField if g.name.isEmpty => g }
.groupBy(_.child)
.mapValues(_.map(_.ordinal).distinct.sorted)
structChildToOrdinals.foreach { case (expr, ordinals) =>
val schema = expr.dataType.asInstanceOf[StructType]
if (ordinals != schema.indices) {
fail(schema, ordinals.last)
}
}
}
}
/**
* Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being
* constructed is an inner class.
*/
object ResolveNewInstance extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case n: NewInstance if n.childrenResolved && !n.resolved =>
val outer = OuterScopes.getOuterScope(n.cls)
if (outer == null) {
throw new AnalysisException(
s"Unable to generate an encoder for inner class `${n.cls.getName}` without " +
"access to the scope that this class was defined in.\\n" +
"Try moving this class out of its parent class.")
}
n.copy(outerPointer = Some(outer))
}
}
}
/**
* Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate.
*/
object ResolveUpCast extends Rule[LogicalPlan] {
private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = {
val fromStr = from match {
case l: LambdaVariable => "array element"
case e => e.sql
}
throw new AnalysisException(s"Cannot up cast $fromStr from " +
s"${from.dataType.simpleString} to ${to.simpleString} as it may truncate\\n" +
"The type path of the target object is:\\n" + walkedTypePath.mkString("", "\\n", "\\n") +
"You can either add an explicit cast to the input data or choose a higher precision " +
"type of the field in the target object")
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case u @ UpCast(child, _, _) if !child.resolved => u
case UpCast(child, dataType, walkedTypePath)
if Cast.mayTruncate(child.dataType, dataType) =>
fail(child, dataType, walkedTypePath)
case UpCast(child, dataType, walkedTypePath) => Cast(child, dataType.asNullable)
}
}
}
}
/**
* Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubqueryAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case SubqueryAlias(_, child) => child
}
}
/**
* Removes [[Union]] operators from the plan if it just has one child.
*/
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Union(children) if children.size == 1 => children.head
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions). Notice that if an expression has other expression parameters which
* are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this
* rule can't work for those parameters.
*/
object CleanupAliases extends Rule[LogicalPlan] {
private def trimAliases(e: Expression): Expression = {
e.transformDown {
case Alias(child, _) => child
}
}
def trimNonTopLevelAliases(e: Expression): Expression = e match {
case a: Alias =>
a.withNewChildren(trimAliases(a.child) :: Nil)
case other => trimAliases(other)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) =>
val cleanedProjectList =
projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case w @ Window(windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs =
windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression])
Window(cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
// Operators that operate on objects should only have expressions from encoders, which should
// never have extra aliases.
case o: ObjectConsumer => o
case o: ObjectProducer => o
case a: AppendColumns => a
case other =>
other transformExpressionsDown {
case Alias(child, _) => child
}
}
}
/**
* Ignore event time watermark in batch query, which is only supported in Structured Streaming.
* TODO: add this rule into analyzer rule list.
*/
object EliminateEventTimeWatermark extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case EventTimeWatermark(_, _, child) if !child.isStreaming => child
}
}
/**
* Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to
* figure out how many windows a time column can map to, we over-estimate the number of windows and
* filter out the rows where the time column is not inside the time window.
*/
object TimeWindowing extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.dsl.expressions._
private final val WINDOW_COL_NAME = "window"
private final val WINDOW_START = "start"
private final val WINDOW_END = "end"
/**
* Generates the logical plan for generating window ranges on a timestamp column. Without
* knowing what the timestamp value is, it's non-trivial to figure out deterministically how many
* window ranges a timestamp will map to given all possible combinations of a window duration,
* slide duration and start time (offset). Therefore, we express and over-estimate the number of
* windows there may be, and filter the valid windows. We use last Project operator to group
* the window columns into a struct so they can be accessed as `window.start` and `window.end`.
*
* The windows are calculated as below:
* maxNumOverlapping <- ceil(windowDuration / slideDuration)
* for (i <- 0 until maxNumOverlapping)
* windowId <- ceil((timestamp - startTime) / slideDuration)
* windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime
* windowEnd <- windowStart + windowDuration
* return windowStart, windowEnd
*
* This behaves as follows for the given parameters for the time: 12:05. The valid windows are
* marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the
* Filter operator.
* window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m
* 11:55 - 12:07 + 11:52 - 12:04 x
* 12:00 - 12:12 + 11:57 - 12:09 +
* 12:05 - 12:17 + 12:02 - 12:14 +
*
* @param plan The logical plan
* @return the logical plan that will generate the time windows using the Expand operator, with
* the Filter operator for correctness and Project for usability.
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case p: LogicalPlan if p.children.size == 1 =>
val child = p.children.head
val windowExpressions =
p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet
val numWindowExpr = windowExpressions.size
// Only support a single window expression for now
if (numWindowExpr == 1 &&
windowExpressions.head.timeColumn.resolved &&
windowExpressions.head.checkInputDataTypes().isSuccess) {
val window = windowExpressions.head
val metadata = window.timeColumn match {
case a: Attribute => a.metadata
case _ => Metadata.empty
}
def getWindow(i: Int, overlappingWindows: Int): Expression = {
val division = (PreciseTimestampConversion(
window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration
val ceil = Ceil(division)
// if the division is equal to the ceiling, our record is the start of a window
val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil))
val windowStart = (windowId + i - overlappingWindows) *
window.slideDuration + window.startTime
val windowEnd = windowStart + window.windowDuration
CreateNamedStruct(
Literal(WINDOW_START) ::
PreciseTimestampConversion(windowStart, LongType, TimestampType) ::
Literal(WINDOW_END) ::
PreciseTimestampConversion(windowEnd, LongType, TimestampType) ::
Nil)
}
val windowAttr = AttributeReference(
WINDOW_COL_NAME, window.dataType, metadata = metadata)()
if (window.windowDuration == window.slideDuration) {
val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)(
exprId = windowAttr.exprId)
val replacedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
// For backwards compatibility we add a filter to filter out nulls
val filterExpr = IsNotNull(window.timeColumn)
replacedPlan.withNewChildren(
Filter(filterExpr,
Project(windowStruct +: child.output, child)) :: Nil)
} else {
val overlappingWindows =
math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt
val windows =
Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows))
val projections = windows.map(_ +: child.output)
val filterExpr =
window.timeColumn >= windowAttr.getField(WINDOW_START) &&
window.timeColumn < windowAttr.getField(WINDOW_END)
val substitutedPlan = Filter(filterExpr,
Expand(projections, windowAttr +: child.output, child))
val renamedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
renamedPlan.withNewChildren(substitutedPlan :: Nil)
}
} else if (numWindowExpr > 1) {
p.failAnalysis("Multiple time window expressions would result in a cartesian product " +
"of rows, therefore they are currently not supported.")
} else {
p // Return unchanged. Analyzer will throw exception later
}
}
}
/**
* Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s.
*/
object ResolveCreateNamedStruct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressions {
case e: CreateNamedStruct if !e.resolved =>
val children = e.children.grouped(2).flatMap {
case Seq(NamePlaceholder, e: NamedExpression) if e.resolved =>
Seq(Literal(e.name), e)
case kv =>
kv
}
CreateNamedStruct(children.toList)
}
}
/**
* The aggregate expressions from subquery referencing outer query block are pushed
* down to the outer query block for evaluation. This rule below updates such outer references
* as AttributeReference referring attributes from the parent/outer query block.
*
* For example (SQL):
* {{{
* SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b))
* }}}
* Plan before the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < min(outer(b#227))) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
* Plan after the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < outer(min(b#227)#249)) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
*/
object UpdateOuterReferences extends Rule[LogicalPlan] {
private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child }
private def updateOuterReferenceInSubquery(
plan: LogicalPlan,
refExprs: Seq[Expression]): LogicalPlan = {
plan transformAllExpressions { case e =>
val outerAlias =
refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e)))
outerAlias match {
case Some(a: Alias) => OuterReference(a.toAttribute)
case _ => e
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = {
plan transform {
case f @ Filter(_, a: Aggregate) if f.resolved =>
f transformExpressions {
case s: SubqueryExpression if s.children.nonEmpty =>
// Collect the aliases from output of aggregate.
val outerAliases = a.aggregateExpressions collect { case a: Alias => a }
// Update the subquery plan to record the OuterReference to point to outer query plan.
s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases))
}
}
}
}
| UndeadBaneGitHub/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala | Scala | apache-2.0 | 112,161 |
package skinny.filter
import scala.language.implicitConversions
import skinny.micro.context.SkinnyContext
import skinny.micro.contrib.csrf.CSRFTokenGenerator
import skinny.micro.contrib.{ AsyncCSRFTokenSupport, CSRFTokenSupport }
import skinny.controller.feature._
/**
* Enables replacing Servlet session with Skinny's session shared among several Servlet apps.
*
* Mounting skinny.session.SkinnySessionInitializer on the top of Bootstrap.scala is required.
*
* {{{
* ctx.mount(classOf[SkinnySessionInitializer], "/\*")
* }}}
*/
trait AsyncSkinnySessionFilter extends SkinnySessionFilterBase with AsyncBeforeAfterActionFeature {
self: FlashFeature with AsyncCSRFTokenSupport with LocaleFeature =>
// --------------------------------------
// SkinnySession by using Skinny beforeAction/afterAction
beforeAction()(implicit ctx => initializeSkinnySession)
afterAction()(implicit ctx => saveCurrentSkinnySession)
// --------------------------------------
// override CsrfTokenSupport
override protected def isForged(implicit ctx: SkinnyContext): Boolean = {
if (skinnySession(context).getAttribute(csrfKey).isEmpty) {
prepareCsrfToken()
}
!request.requestMethod.isSafe &&
skinnySession.getAttribute(csrfKey) != params.get(csrfKey) &&
!CSRFTokenSupport.HeaderNames.map(request.headers.get).contains(skinnySession.getAttribute(csrfKey))
}
override protected def prepareCsrfToken()(implicit ctx: SkinnyContext) = {
skinnySession.getAttributeOrElseUpdate(csrfKey, CSRFTokenGenerator())
}
}
| seratch/skinny-framework | framework/src/main/scala/skinny/filter/AsyncSkinnySessionFilter.scala | Scala | mit | 1,564 |
package colossus
package service
import core.WorkerCommand
import colossus.testkit.{ColossusSpec, FakeIOSystem}
import org.scalatest.{WordSpec, MustMatchers}
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import scala.concurrent.{ExecutionContext, Future, Promise}
import ExecutionContext.Implicits.global
import scala.util.{Try, Success, Failure}
import java.net.InetSocketAddress
import scala.concurrent.duration._
class LoadBalancingClientSpec extends ColossusSpec with MockitoSugar{
type C = ServiceClient[String,Int]
def mockClient(address: InetSocketAddress, customReturn: Option[Try[Int]]): C = {
val config = ClientConfig(
address = address,
name = "/mock",
requestTimeout = 1.second
)
val r = customReturn.getOrElse(Success(address.getPort))
val c = mock[ServiceClient[String, Int]]
when(c.send("hey")).thenReturn(Callback.complete(r))
when(c.config).thenReturn(config)
c
}
def mockClient(port: Int, customReturn: Option[Try[Int]] = None): C = mockClient(new InetSocketAddress("0.0.0.0", port), customReturn)
def staticClients(l: List[C]): InetSocketAddress => C = {
var clients = l
(address) => {
val next = clients.head
clients = clients.tail
next
}
}
val mockGenerator = (address: InetSocketAddress) => mockClient(address, None)
def addrs(num: Int) = (1 to num).map{i => new InetSocketAddress("0.0.0.0", i)}
"LoadBalancingClient" must {
"send two consecutive commands to different clients" in {
val clients = addrs(3)
val (probe, worker) = FakeIOSystem.fakeWorkerRef
val l = new LoadBalancingClient[String,Int](worker, mockGenerator, initialClients = clients)
l.send("hey").execute{_ must equal(Success(2))}
l.send("hey").execute{_ must equal(Success(3))}
l.send("hey").execute{_ must equal(Success(1))}
l.send("hey").execute{_ must equal(Success(2))}
}
"evenly divide requests among clients" in {
(1 to 5).foreach{num =>
val ops = (1 to num).permutations.toList.size //lazy factorial
val clients = addrs(num)
val (probe, worker) = FakeIOSystem.fakeWorkerRef
val l = new LoadBalancingClient[String,Int](worker, mockGenerator, maxTries = 2, initialClients = clients)
(1 to ops).foreach{i =>
l.send("hey").execute()
}
l.currentClients.foreach{c =>
verify(c, times(ops / num)).send("hey")
}
}
}
"properly failover requests" in {
val bad = mockClient(1, Some(Failure(new Exception("I fucked up :("))))
val good = mockClient(2, Some(Success(123)))
val (probe, worker) = FakeIOSystem.fakeWorkerRef
val l = new LoadBalancingClient[String,Int](worker, staticClients(List(good, bad)), maxTries = 2, initialClients = addrs(2))
//sending a bunch of commands ensures both clients are attempted as the first try at least once
//the test succeeds if no exception is thrown
(1 to 10).foreach{i =>
l.send("hey").execute{
case Success(_) => {}
case Failure(wat) => throw wat
}
}
}
"close removed connection on update" in {
val (probe, worker) = FakeIOSystem.fakeWorkerRef
val l = new LoadBalancingClient[String, Int](worker, mockGenerator, maxTries = 2, initialClients = addrs(3))
val clients = l.currentClients
val removed = clients(0)
val newAddrs = clients.drop(1).map{_.config.address}
l.update(newAddrs)
verify(removed).gracefulDisconnect()
}
}
}
| noikiy/colossus | colossus-tests/src/test/scala/colossus/service/LoadBalancingClientSpec.scala | Scala | apache-2.0 | 3,609 |
package com.coveo.blitz.server
package system
import akka.actor._
import com.coveo.blitz.server.user.User
import play.api.libs.iteratee._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.{Failure, Success}
final class Round(val initGame: Game) extends Actor with CustomLogging {
val clients = collection.mutable.Map[Token, ActorRef]()
val moves = collection.mutable.ArrayBuffer[Dir]()
var gameAtStart = initGame
var game = initGame
val (enumerator, channel) = Concurrent.broadcast[Game]
import Round._
context setReceiveTimeout 1.minute
context.system.eventStream.publish(game)
def receive = {
case SendEnumerator(to) => to ! Some {
(enumerator interleave {
(Enumerator enumerate moves) &> Enumeratee.scanLeft(gameAtStart)(Arbiter.replay)
}) &> StreamUnfucker()
}
case msg@Play(token, _) => clients get token match {
case None =>
log.warning(s"No client for ${game.id}/$token")
sender ! notFound("Wrong or expired token")
case Some(client) => client ! ClientPlay(msg, sender)
}
case ClientPlay(Play(token, d), replyTo) => {
val client = sender
val dir = Dir(d)
Arbiter.move(game, token, dir) match {
case Failure(e) =>
log.info(s"Play fail ${game.id}/$token: ${e.getMessage}")
replyTo ! Status.Failure(e)
case Success(g) =>
client ! Client.WorkDone(inputPromise(replyTo))
saveMove(dir)
step(g)
}
}
case Join(user, promise) => {
val heroId = game.heroes.find(hero => hero.userId == Some(user.id)) match {
// This team already registered before. Simply update the hero.
case Some(hero) => hero.id
// New team
case _ => clients.size + 1
}
game = game.withHero(heroId, user.blame)
// FIXME
val token = game.hero(heroId).token
log.info(s"[game ${game.id}] add user ${user.name} #$heroId ($token)")
if (!clients.contains(token)) {
log.info(s"Registering new client associated with token $token")
addClient(token, Props(new HttpClient(token, promise)))
}
}
case JoinBot(name, driver) => {
val heroId = clients.size + 1
game = game.withHero(heroId, _ withName name)
// FIXME
val token = game.hero(heroId).token
log.info(s"[game ${game.id}] add bot $name ($token)")
addClient(token, Props(new BotClient(token, driver)))
}
case Start() => {
// TODO: Fill missing slots with bots?
startGame()
}
case Stop() => {
// TODO: Kill clients? Other cleanup?
game = game.copy(status = com.coveo.blitz.server.Status.Aborted)
context.system.eventStream.publish(game)
}
case Client.Timeout(token) => {
log.info(s"${game.id}/$token timeout")
val dir = Dir.Crash
Arbiter.move(game, token, dir) match {
case Failure(e) => log.warning(s"Crash fail ${game.id}/$token: ${e.getMessage}")
case Success(g) =>
saveMove(dir)
step(g)
}
}
case Terminated(client) => {
context unwatch client
clients filter (_._2 == client) foreach { case (id, _) => clients -= id }
}
case ReceiveTimeout => context.parent ! Inactive(game.id)
}
def addClient(token: Token, props: Props) {
val client = context.actorOf(props, name = token)
clients += (token -> client)
game = game.withHero(game.hero(clients.size).setReady)
context watch client
if (clients.size == 4 && (game.training || game.autostart)) {
startGame()
}
context.system.eventStream.publish(game)
}
def startGame(): Unit = {
if (!game.started) {
if (game.heroes.size != clients.size) {
log.info(s"Unable to start game ${game.id} as it only has ${clients.size} clients")
} else {
log.info(s"[game ${game.id}] start")
game = game.start
gameAtStart = game
game.hero map (_.token) flatMap clients.get match {
case None => throw UtterFailException(s"Game ${game.id} started without a hero client")
case Some(client) =>
Replay insert game
client ! game
}
}
}
}
def stayCrashed(token: String) = Arbiter.move(game, token, Dir.Stay) match {
case Failure(e) => log.info(s"Crashed stay fail ${
game.id
}/$token: ${
e.getMessage
}")
case Success(g) =>
saveMove(Dir.Stay)
step(g)
}
def saveMove(dir: Dir) {
moves += dir
Replay.addMove(game.id, dir)
}
def step(g: Game) {
game = g
channel push game
context.system.eventStream publish game
if (game.finished) {
Replay.finish(game.id, moves, game)
clients.values foreach (_ ! game)
channel.eofAndEnd
}
else game.hero foreach {
case h if h.crashed => stayCrashed(h.token)
case h => clients get h.token foreach (_ ! game)
}
}
}
object Round {
case class Play(token: Token, dir: String)
case class ClientPlay(play: Play, replyTo: ActorRef)
case class Join(user: User, promise: Promise[PlayerInput])
case class JoinBot(name: String, driver: Driver)
case class Start();
case class Stop();
case class Inactive(id: GameId)
case class SendEnumerator(to: ActorRef)
}
| coveord/Blitz2016-Server | server/app/system/Round.scala | Scala | mit | 5,393 |
package com.softwaremill.codebrag.common
import com.typesafe.scalalogging.slf4j.Logging
trait Timed extends Logging {
def timed[T](block: => T): (T, Long) = {
val start = System.currentTimeMillis()
val r = block
val end = System.currentTimeMillis()
(r, end-start)
}
def timedAndLogged[T](msg: String)(block: => T): T = {
val (result, took) = timed(block)
logger.debug(s"$msg took ${took}ms")
result
}
}
object Timed extends Timed
| softwaremill/codebrag | codebrag-common/src/main/scala/com/softwaremill/codebrag/common/Timed.scala | Scala | agpl-3.0 | 470 |
package com.overviewdocs.models
import java.time.Instant
case class CloneJob(
id: Int,
sourceDocumentSetId: Long,
destinationDocumentSetId: Long,
stepNumber: Short,
cancelled: Boolean,
createdAt: Instant
)
object CloneJob {
case class CreateAttributes(
sourceDocumentSetId: Long,
destinationDocumentSetId: Long,
stepNumber: Short = 0.toShort,
cancelled: Boolean = false,
createdAt: Instant = Instant.now
)
}
| overview/overview-server | common/src/main/scala/com/overviewdocs/models/CloneJob.scala | Scala | agpl-3.0 | 447 |
package com.gilt.thehand.rules.comparison
import com.gilt.thehand.rules.conversions.ConvertsTo
import com.gilt.thehand.rules.SeqRule
/**
* A general interface that can be used to implement an In rule for any datatype. In practice, this is used mostly
* for parsing, to differentiate, say, In[String] from In[Long].
*/
trait In extends SeqRule { self: ConvertsTo =>
/**
* The rule matches if the value is in the list of values.
* @param value The value to match against.
* @return
*/
def matchInnerType(value: InnerType) = values.contains(value)
}
| gilt/the-hand | src/main/scala/com/gilt/thehand/rules/comparison/In.scala | Scala | apache-2.0 | 568 |
package test
import com.acework.js.components.bootstrap._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js, js.{Array => JArray}
import utest._
import TestUtil._
object CoreTest extends TestSuite {
val tests = TestSuite {
'alerts {
'ExampleAlert {
Alert(Alert.Alert(bsStyle = Styles.warning),
<.strong("Holy guacamole!"), " Best check yo self, you're not looking too good.") shouldRender
"<div class=\"alert-warning alert\"><strong>Holy guacamole!</strong> Best check yo self, you're not looking too good.</div>"
}
}
'buttons {
Button("Hi") shouldRender "<button type=\"button\" class=\"btn-default btn\">Hi</button>"
}
}
}
| lvitaly/scalajs-react-bootstrap | test/src/test/scala/test/CoreTest.scala | Scala | mit | 796 |
package com.global.snapshot.actors
import akka.actor.{ActorRef, Cancellable, Props}
import akka.pattern.ask
import com.global.snapshot.Config
import com.global.snapshot.actors.CargoScheduler.{StartScheduler, StopScheduler}
import com.global.snapshot.actors.CargoStation.{GetOutgoingChannels, Unload}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
class CargoScheduler
extends CargoActor {
val parent = context.parent
val random = new scala.util.Random
var cancellable: Option[Cancellable] = None
override def receive = {
case StartScheduler =>
cancellable match {
case Some(_) =>
case None =>
cancellable = Some(
context.system.scheduler.schedule(1 second, 3 seconds) {
(parent ? GetOutgoingChannels) (1 second).mapTo[Set[ActorRef]].map { outgoingChannels =>
parent ! Unload(getRandomCargo, getRandomOutgoingChannel(outgoingChannels.toSeq))
}
}
)
}
case StopScheduler =>
cancellable match {
case Some(scheduler) =>
if (!scheduler.isCancelled) {
scheduler.cancel()
}
cancellable = None
case None =>
}
case event =>
super.receive(event)
}
private def getRandomCargo: Long =
getRandom(Config.cargoUnloadMin, Config.cargoUnloadMax)
private def getRandomOutgoingChannel(outgoingChannels: Seq[ActorRef]): ActorRef =
outgoingChannels(getRandom(0, outgoingChannels.size))
private def getRandom(lowInclusive: Int, highInclusive: Int): Int =
lowInclusive + random.nextInt(highInclusive)
}
object CargoScheduler {
def props: Props =
Props(new CargoScheduler)
sealed trait CargoSchedulerOperations
case object StartScheduler extends CargoSchedulerOperations
case object StopScheduler extends CargoSchedulerOperations
}
| NorthernDemon/Cargo-Global-Snapshot | src/main/scala/com/global/snapshot/actors/CargoScheduler.scala | Scala | mit | 1,913 |
package org.infinispan.spark.examples.util
import java.io.IOException
import java.lang.System.{getProperty => sys}
import java.util.concurrent.LinkedBlockingQueue
import com.twitter.hbc._
import com.twitter.hbc.core.Constants
import com.twitter.hbc.core.endpoint.StatusesSampleEndpoint
import com.twitter.hbc.core.processor.StringDelimitedProcessor
import com.twitter.hbc.httpclient.auth.OAuth1
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
import org.infinispan.spark.examples.twitter.Tweet
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.json._
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Simple DStream connector to Twitter
*/
class TwitterDStream(@transient val ssc_ : StreamingContext,
storage: StorageLevel = StorageLevel.MEMORY_ONLY)
extends ReceiverInputDStream[Tweet](ssc_) {
val ConsumerKeyProp = "twitter.oauth.consumerKey"
val ConsumerSecretProp = "twitter.oauth.consumerSecret"
val AccessTokenProp = "twitter.oauth.accessToken"
val AccessTokenSecretProp = "twitter.oauth.accessTokenSecret"
override def getReceiver() = new TwitterReceiver(storage, sys(ConsumerKeyProp), sys(ConsumerSecretProp),
sys(AccessTokenProp), sys(AccessTokenSecretProp))
}
private[util] class TwitterReceiver(storageLevel: StorageLevel, consumerKey: String,
consumerSecret: String, accessToken: String, accessTokenSecret: String)
extends Receiver[Tweet](storageLevel) {
private lazy val logger: Logger = LoggerFactory.getLogger(classOf[TwitterDStream])
val ClientName = "Hosebird-client"
val QueueSize = 10000
val Timeout = 1 minute
@transient lazy val OAuth = new OAuth1(consumerKey, consumerSecret, accessToken, accessTokenSecret)
@transient lazy val blockingQueue = new LinkedBlockingQueue[String](QueueSize)
@transient lazy val Endpoint = {
val sampleEndpoint = new StatusesSampleEndpoint()
sampleEndpoint.stallWarnings(false)
sampleEndpoint
}
@transient lazy val client = new ClientBuilder()
.name(ClientName)
.hosts(Constants.STREAM_HOST)
.endpoint(Endpoint)
.authentication(OAuth)
.processor(new StringDelimitedProcessor(blockingQueue))
.build()
@volatile var stopRequested = false
override def onStart() = {
logger.info("Starting receiver")
client.connect()
new Thread {
setDaemon(true)
override def run() = {
while (!stopRequested) {
if (client.isDone) {
throw new IOException("Client disconnected")
}
val message = blockingQueue.poll(Timeout.toSeconds, SECONDS)
logger.info(s"Received message $message")
val json = Json.parse(message)
val delete = (json \\ "delete").toOption
if(delete.isEmpty) {
val id = json \\ "id"
val retweet = json \\ "retweet_count"
val text = json \\ "text"
val user = json \\ "user" \\ "screen_name"
val country = json \\ "place" \\ "country"
store(new Tweet(id.as[Long], user.as[String], country.asOpt[String].getOrElse("N/A"), retweet.as[Int], text.as[String]))
}
}
}
}.start()
}
override def onStop() = {
stopRequested = true
client.stop()
}
}
object TwitterDStream {
def create(ssc: StreamingContext, storageLevel: StorageLevel) = new TwitterDStream(ssc, storageLevel)
def create(jssc: JavaStreamingContext, storageLevel: StorageLevel) = new TwitterDStream(jssc.ssc, storageLevel)
def create(ssc: StreamingContext) = new TwitterDStream(ssc)
def create(jssc: JavaStreamingContext): JavaReceiverInputDStream[Tweet] = new TwitterDStream(jssc.ssc)
}
| galderz/infinispan-spark | examples/twitter/src/main/scala/org/infinispan/spark/examples/util/TwitterDStream.scala | Scala | apache-2.0 | 4,173 |
package chess
/**
* Utility methods for helping to determine whether a situation is a draw or a draw
* on a player flagging.
*
* See http://www.e4ec.org/immr.html
*/
object InsufficientMatingMaterial {
def nonKingPieces(board: Board) = board.pieces filter (_._2.role != King)
def bishopsOnOppositeColors(board: Board) =
(board.pieces collect { case (pos, Piece(_, Bishop)) => pos.isLight } toList).distinct
.lengthCompare(2) == 0
/*
* Returns true if a pawn cannot progress forward because it is blocked by a pawn
*/
def pawnBlockedByPawn(pawn: Actor, board: Board) =
pawn.moves.isEmpty && {
val blockingPosition = Actor.posAheadOfPawn(pawn.pos, pawn.piece.color)
blockingPosition.flatMap(board.apply).exists(_.is(Pawn))
}
/*
* Determines whether a board position is an automatic draw due to neither player
* being able to mate the other as informed by the traditional chess rules.
*/
def apply(board: Board) = {
lazy val kingsAndBishopsOnly = board.pieces forall { p =>
(p._2 is King) || (p._2 is Bishop)
}
val kingsAndMinorsOnly = board.pieces forall { p =>
(p._2 is King) || (p._2 is Bishop) || (p._2 is Knight)
}
kingsAndMinorsOnly && (board.pieces.size <= 3 || (kingsAndBishopsOnly && !bishopsOnOppositeColors(board)))
}
/*
* Determines whether a color does not have mating material. In general:
* King by itself is not mating material
* King + knight mates against king + any(rook, bishop, knight, pawn)
* King + bishop mates against king + any(bishop, knight, pawn)
* King + bishop(s) versus king + bishop(s) depends upon bishop square colors
*/
def apply(board: Board, color: Color) = {
val kingsAndMinorsOnlyOfColor = board.piecesOf(color) forall { p =>
(p._2 is King) || (p._2 is Bishop) || (p._2 is Knight)
}
lazy val nonKingRolesOfColor = board rolesOf color filter (King !=)
lazy val rolesOfOpponentColor = board rolesOf !color
kingsAndMinorsOnlyOfColor && (nonKingRolesOfColor.distinct match {
case Nil => true
case List(Knight) =>
nonKingRolesOfColor.lengthCompare(
1
) == 0 && !(rolesOfOpponentColor filter (King !=) exists (Queen !=))
case List(Bishop) =>
!(rolesOfOpponentColor.exists(r => r == Knight || r == Pawn) || bishopsOnOppositeColors(board))
case _ => false
})
}
}
| niklasf/scalachess | src/main/scala/InsufficientMatingMaterial.scala | Scala | mit | 2,413 |
// scalastyle:off
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Martin Odersky
*/
package org.apache.spark.repl
import scala.tools.nsc._
import scala.tools.nsc.interpreter._
import Predef.{ println => _, _ }
import util.stringFromWriter
import scala.reflect.internal.util._
import java.net.URL
import scala.sys.BooleanProp
import io.{AbstractFile, PlainFile, VirtualDirectory}
import reporters._
import symtab.Flags
import scala.reflect.internal.Names
import scala.tools.util.PathResolver
import scala.tools.nsc.util.ScalaClassLoader
import ScalaClassLoader.URLClassLoader
import scala.tools.nsc.util.Exceptional.unwrap
import scala.collection.{ mutable, immutable }
import scala.util.control.Exception.{ ultimately }
import SparkIMain._
import java.util.concurrent.Future
import typechecker.Analyzer
import scala.language.implicitConversions
import scala.reflect.runtime.{ universe => ru }
import scala.reflect.{ ClassTag, classTag }
import scala.tools.reflect.StdRuntimeTags._
import scala.util.control.ControlThrowable
import util.stackTraceString
import org.apache.spark.{HttpServer, SparkConf, Logging}
import org.apache.spark.util.Utils
// /** directory to save .class files to */
// private class ReplVirtualDirectory(out: JPrintWriter) extends VirtualDirectory("((memory))", None) {
// private def pp(root: AbstractFile, indentLevel: Int) {
// val spaces = " " * indentLevel
// out.println(spaces + root.name)
// if (root.isDirectory)
// root.toList sortBy (_.name) foreach (x => pp(x, indentLevel + 1))
// }
// // print the contents hierarchically
// def show() = pp(this, 0)
// }
/** An interpreter for Scala code.
*
* The main public entry points are compile(), interpret(), and bind().
* The compile() method loads a complete Scala file. The interpret() method
* executes one line of Scala code at the request of the user. The bind()
* method binds an object to a variable that can then be used by later
* interpreted code.
*
* The overall approach is based on compiling the requested code and then
* using a Java classloader and Java reflection to run the code
* and access its results.
*
* In more detail, a single compiler instance is used
* to accumulate all successfully compiled or interpreted Scala code. To
* "interpret" a line of code, the compiler generates a fresh object that
* includes the line of code and which has public member(s) to export
* all variables defined by that code. To extract the result of an
* interpreted line to show the user, a second "result object" is created
* which imports the variables exported by the above object and then
* exports members called "$eval" and "$print". To accomodate user expressions
* that read from variables or methods defined in previous statements, "import"
* statements are used.
*
* This interpreter shares the strengths and weaknesses of using the
* full compiler-to-Java. The main strength is that interpreted code
* behaves exactly as does compiled code, including running at full speed.
* The main weakness is that redefining classes and methods is not handled
* properly, because rebinding at the Java level is technically difficult.
*
* @author Moez A. Abdel-Gawad
* @author Lex Spoon
*/
class SparkIMain(initialSettings: Settings, val out: JPrintWriter) extends SparkImports with Logging {
imain =>
val SPARK_DEBUG_REPL: Boolean = (System.getenv("SPARK_DEBUG_REPL") == "1")
/** Local directory to save .class files too */
val outputDir = {
val tmp = System.getProperty("java.io.tmpdir")
val rootDir = new SparkConf().get("spark.repl.classdir", tmp)
Utils.createTempDir(rootDir)
}
if (SPARK_DEBUG_REPL) {
echo("Output directory: " + outputDir)
}
val virtualDirectory = new PlainFile(outputDir) // "directory" for classfiles
val classServer = new HttpServer(outputDir) /** Jetty server that will serve our classes to worker nodes */
private var currentSettings: Settings = initialSettings
var printResults = true // whether to print result lines
var totalSilence = false // whether to print anything
private var _initializeComplete = false // compiler is initialized
private var _isInitialized: Future[Boolean] = null // set up initialization future
private var bindExceptions = true // whether to bind the lastException variable
private var _executionWrapper = "" // code to be wrapped around all lines
// Start the classServer and store its URI in a spark system property
// (which will be passed to executors so that they can connect to it)
classServer.start()
if (SPARK_DEBUG_REPL) {
echo("Class server started, URI = " + classServer.uri)
}
/** We're going to go to some trouble to initialize the compiler asynchronously.
* It's critical that nothing call into it until it's been initialized or we will
* run into unrecoverable issues, but the perceived repl startup time goes
* through the roof if we wait for it. So we initialize it with a future and
* use a lazy val to ensure that any attempt to use the compiler object waits
* on the future.
*/
private var _classLoader: AbstractFileClassLoader = null // active classloader
private val _compiler: Global = newCompiler(settings, reporter) // our private compiler
private val nextReqId = {
var counter = 0
() => { counter += 1 ; counter }
}
def compilerClasspath: Seq[URL] = (
if (isInitializeComplete) global.classPath.asURLs
else new PathResolver(settings).result.asURLs // the compiler's classpath
)
def settings = currentSettings
def mostRecentLine = prevRequestList match {
case Nil => ""
case req :: _ => req.originalLine
}
// Run the code body with the given boolean settings flipped to true.
def withoutWarnings[T](body: => T): T = beQuietDuring {
val saved = settings.nowarn.value
if (!saved)
settings.nowarn.value = true
try body
finally if (!saved) settings.nowarn.value = false
}
/** construct an interpreter that reports to Console */
def this(settings: Settings) = this(settings, new NewLinePrintWriter(new ConsoleWriter, true))
def this() = this(new Settings())
lazy val repllog: Logger = new Logger {
val out: JPrintWriter = imain.out
val isInfo: Boolean = BooleanProp keyExists "scala.repl.info"
val isDebug: Boolean = BooleanProp keyExists "scala.repl.debug"
val isTrace: Boolean = BooleanProp keyExists "scala.repl.trace"
}
lazy val formatting: Formatting = new Formatting {
val prompt = Properties.shellPromptString
}
lazy val reporter: ConsoleReporter = new SparkIMain.ReplReporter(this)
import formatting._
import reporter.{ printMessage, withoutTruncating }
// This exists mostly because using the reporter too early leads to deadlock.
private def echo(msg: String) { Console println msg }
private def _initSources = List(new BatchSourceFile("<init>", "class $repl_$init { }"))
private def _initialize() = {
try {
// todo. if this crashes, REPL will hang
new _compiler.Run() compileSources _initSources
_initializeComplete = true
true
}
catch AbstractOrMissingHandler()
}
private def tquoted(s: String) = "\\"\\"\\"" + s + "\\"\\"\\""
// argument is a thunk to execute after init is done
def initialize(postInitSignal: => Unit) {
synchronized {
if (_isInitialized == null) {
_isInitialized = io.spawn {
try _initialize()
finally postInitSignal
}
}
}
}
def initializeSynchronous(): Unit = {
if (!isInitializeComplete) {
_initialize()
assert(global != null, global)
}
}
def isInitializeComplete = _initializeComplete
/** the public, go through the future compiler */
lazy val global: Global = {
if (isInitializeComplete) _compiler
else {
// If init hasn't been called yet you're on your own.
if (_isInitialized == null) {
logWarning("Warning: compiler accessed before init set up. Assuming no postInit code.")
initialize(())
}
// // blocks until it is ; false means catastrophic failure
if (_isInitialized.get()) _compiler
else null
}
}
@deprecated("Use `global` for access to the compiler instance.", "2.9.0")
lazy val compiler: global.type = global
import global._
import definitions.{ScalaPackage, JavaLangPackage, termMember, typeMember}
import rootMirror.{RootClass, getClassIfDefined, getModuleIfDefined, getRequiredModule, getRequiredClass}
implicit class ReplTypeOps(tp: Type) {
def orElse(other: => Type): Type = if (tp ne NoType) tp else other
def andAlso(fn: Type => Type): Type = if (tp eq NoType) tp else fn(tp)
}
// TODO: If we try to make naming a lazy val, we run into big time
// scalac unhappiness with what look like cycles. It has not been easy to
// reduce, but name resolution clearly takes different paths.
object naming extends {
val global: imain.global.type = imain.global
} with Naming {
// make sure we don't overwrite their unwisely named res3 etc.
def freshUserTermName(): TermName = {
val name = newTermName(freshUserVarName())
if (definedNameMap contains name) freshUserTermName()
else name
}
def isUserTermName(name: Name) = isUserVarName("" + name)
def isInternalTermName(name: Name) = isInternalVarName("" + name)
}
import naming._
object deconstruct extends {
val global: imain.global.type = imain.global
} with StructuredTypeStrings
lazy val memberHandlers = new {
val intp: imain.type = imain
} with SparkMemberHandlers
import memberHandlers._
/** Temporarily be quiet */
def beQuietDuring[T](body: => T): T = {
val saved = printResults
printResults = false
try body
finally printResults = saved
}
def beSilentDuring[T](operation: => T): T = {
val saved = totalSilence
totalSilence = true
try operation
finally totalSilence = saved
}
def quietRun[T](code: String) = beQuietDuring(interpret(code))
private def logAndDiscard[T](label: String, alt: => T): PartialFunction[Throwable, T] = {
case t: ControlThrowable => throw t
case t: Throwable =>
logDebug(label + ": " + unwrap(t))
logDebug(stackTraceString(unwrap(t)))
alt
}
/** takes AnyRef because it may be binding a Throwable or an Exceptional */
private def withLastExceptionLock[T](body: => T, alt: => T): T = {
assert(bindExceptions, "withLastExceptionLock called incorrectly.")
bindExceptions = false
try beQuietDuring(body)
catch logAndDiscard("withLastExceptionLock", alt)
finally bindExceptions = true
}
def executionWrapper = _executionWrapper
def setExecutionWrapper(code: String) = _executionWrapper = code
def clearExecutionWrapper() = _executionWrapper = ""
/** interpreter settings */
lazy val isettings = new SparkISettings(this)
/** Instantiate a compiler. Overridable. */
protected def newCompiler(settings: Settings, reporter: Reporter): ReplGlobal = {
settings.outputDirs setSingleOutput virtualDirectory
settings.exposeEmptyPackage.value = true
new Global(settings, reporter) with ReplGlobal {
override def toString: String = "<global>"
}
}
/** Parent classloader. Overridable. */
protected def parentClassLoader: ClassLoader =
SparkHelper.explicitParentLoader(settings).getOrElse( this.getClass.getClassLoader() )
/* A single class loader is used for all commands interpreted by this Interpreter.
It would also be possible to create a new class loader for each command
to interpret. The advantages of the current approach are:
- Expressions are only evaluated one time. This is especially
significant for I/O, e.g. "val x = Console.readLine"
The main disadvantage is:
- Objects, classes, and methods cannot be rebound. Instead, definitions
shadow the old ones, and old code objects refer to the old
definitions.
*/
def resetClassLoader() = {
logDebug("Setting new classloader: was " + _classLoader)
_classLoader = null
ensureClassLoader()
}
final def ensureClassLoader() {
if (_classLoader == null)
_classLoader = makeClassLoader()
}
def classLoader: AbstractFileClassLoader = {
ensureClassLoader()
_classLoader
}
private class TranslatingClassLoader(parent: ClassLoader) extends AbstractFileClassLoader(virtualDirectory, parent) {
/** Overridden here to try translating a simple name to the generated
* class name if the original attempt fails. This method is used by
* getResourceAsStream as well as findClass.
*/
override protected def findAbstractFile(name: String): AbstractFile = {
super.findAbstractFile(name) match {
// deadlocks on startup if we try to translate names too early
case null if isInitializeComplete =>
generatedName(name) map (x => super.findAbstractFile(x)) orNull
case file =>
file
}
}
}
private def makeClassLoader(): AbstractFileClassLoader =
new TranslatingClassLoader(parentClassLoader match {
case null => ScalaClassLoader fromURLs compilerClasspath
case p => new URLClassLoader(compilerClasspath, p)
})
def getInterpreterClassLoader() = classLoader
// Set the current Java "context" class loader to this interpreter's class loader
def setContextClassLoader() = classLoader.setAsContext()
/** Given a simple repl-defined name, returns the real name of
* the class representing it, e.g. for "Bippy" it may return
* {{{
* $line19.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$Bippy
* }}}
*/
def generatedName(simpleName: String): Option[String] = {
if (simpleName endsWith nme.MODULE_SUFFIX_STRING) optFlatName(simpleName.init) map (_ + nme.MODULE_SUFFIX_STRING)
else optFlatName(simpleName)
}
def flatName(id: String) = optFlatName(id) getOrElse id
def optFlatName(id: String) = requestForIdent(id) map (_ fullFlatName id)
def allDefinedNames = definedNameMap.keys.toList.sorted
def pathToType(id: String): String = pathToName(newTypeName(id))
def pathToTerm(id: String): String = pathToName(newTermName(id))
def pathToName(name: Name): String = {
if (definedNameMap contains name)
definedNameMap(name) fullPath name
else name.toString
}
/** Most recent tree handled which wasn't wholly synthetic. */
private def mostRecentlyHandledTree: Option[Tree] = {
prevRequests.reverse foreach { req =>
req.handlers.reverse foreach {
case x: MemberDefHandler if x.definesValue && !isInternalTermName(x.name) => return Some(x.member)
case _ => ()
}
}
None
}
/** Stubs for work in progress. */
def handleTypeRedefinition(name: TypeName, old: Request, req: Request) = {
for (t1 <- old.simpleNameOfType(name) ; t2 <- req.simpleNameOfType(name)) {
logDebug("Redefining type '%s'\\n %s -> %s".format(name, t1, t2))
}
}
def handleTermRedefinition(name: TermName, old: Request, req: Request) = {
for (t1 <- old.compilerTypeOf get name ; t2 <- req.compilerTypeOf get name) {
// Printing the types here has a tendency to cause assertion errors, like
// assertion failed: fatal: <refinement> has owner value x, but a class owner is required
// so DBG is by-name now to keep it in the family. (It also traps the assertion error,
// but we don't want to unnecessarily risk hosing the compiler's internal state.)
logDebug("Redefining term '%s'\\n %s -> %s".format(name, t1, t2))
}
}
def recordRequest(req: Request) {
if (req == null || referencedNameMap == null)
return
prevRequests += req
req.referencedNames foreach (x => referencedNameMap(x) = req)
// warning about serially defining companions. It'd be easy
// enough to just redefine them together but that may not always
// be what people want so I'm waiting until I can do it better.
for {
name <- req.definedNames filterNot (x => req.definedNames contains x.companionName)
oldReq <- definedNameMap get name.companionName
newSym <- req.definedSymbols get name
oldSym <- oldReq.definedSymbols get name.companionName
if Seq(oldSym, newSym).permutations exists { case Seq(s1, s2) => s1.isClass && s2.isModule }
} {
afterTyper(replwarn(s"warning: previously defined $oldSym is not a companion to $newSym."))
replwarn("Companions must be defined together; you may wish to use :paste mode for this.")
}
// Updating the defined name map
req.definedNames foreach { name =>
if (definedNameMap contains name) {
if (name.isTypeName) handleTypeRedefinition(name.toTypeName, definedNameMap(name), req)
else handleTermRedefinition(name.toTermName, definedNameMap(name), req)
}
definedNameMap(name) = req
}
}
def replwarn(msg: => String) {
if (!settings.nowarnings.value)
printMessage(msg)
}
def isParseable(line: String): Boolean = {
beSilentDuring {
try parse(line) match {
case Some(xs) => xs.nonEmpty // parses as-is
case None => true // incomplete
}
catch { case x: Exception => // crashed the compiler
replwarn("Exception in isParseable(\\"" + line + "\\"): " + x)
false
}
}
}
def compileSourcesKeepingRun(sources: SourceFile*) = {
val run = new Run()
reporter.reset()
run compileSources sources.toList
(!reporter.hasErrors, run)
}
/** Compile an nsc SourceFile. Returns true if there are
* no compilation errors, or false otherwise.
*/
def compileSources(sources: SourceFile*): Boolean =
compileSourcesKeepingRun(sources: _*)._1
/** Compile a string. Returns true if there are no
* compilation errors, or false otherwise.
*/
def compileString(code: String): Boolean =
compileSources(new BatchSourceFile("<script>", code))
/** Build a request from the user. `trees` is `line` after being parsed.
*/
private def buildRequest(line: String, trees: List[Tree]): Request = {
executingRequest = new Request(line, trees)
executingRequest
}
// rewriting "5 // foo" to "val x = { 5 // foo }" creates broken code because
// the close brace is commented out. Strip single-line comments.
// ... but for error message output reasons this is not used, and rather than
// enclosing in braces it is constructed like "val x =\\n5 // foo".
private def removeComments(line: String): String = {
showCodeIfDebugging(line) // as we're about to lose our // show
line.lines map (s => s indexOf "//" match {
case -1 => s
case idx => s take idx
}) mkString "\\n"
}
private def safePos(t: Tree, alt: Int): Int =
try t.pos.startOrPoint
catch { case _: UnsupportedOperationException => alt }
// Given an expression like 10 * 10 * 10 we receive the parent tree positioned
// at a '*'. So look at each subtree and find the earliest of all positions.
private def earliestPosition(tree: Tree): Int = {
var pos = Int.MaxValue
tree foreach { t =>
pos = math.min(pos, safePos(t, Int.MaxValue))
}
pos
}
private def requestFromLine(line: String, synthetic: Boolean): Either[IR.Result, Request] = {
val content = indentCode(line)
val trees = parse(content) match {
case None => return Left(IR.Incomplete)
case Some(Nil) => return Left(IR.Error) // parse error or empty input
case Some(trees) => trees
}
logDebug(
trees map (t => {
// [Eugene to Paul] previously it just said `t map ...`
// because there was an implicit conversion from Tree to a list of Trees
// however Martin and I have removed the conversion
// (it was conflicting with the new reflection API),
// so I had to rewrite this a bit
val subs = t collect { case sub => sub }
subs map (t0 =>
" " + safePos(t0, -1) + ": " + t0.shortClass + "\\n"
) mkString ""
}) mkString "\\n"
)
// If the last tree is a bare expression, pinpoint where it begins using the
// AST node position and snap the line off there. Rewrite the code embodied
// by the last tree as a ValDef instead, so we can access the value.
trees.last match {
case _:Assign => // we don't want to include assignments
case _:TermTree | _:Ident | _:Select => // ... but do want other unnamed terms.
val varName = if (synthetic) freshInternalVarName() else freshUserVarName()
val rewrittenLine = (
// In theory this would come out the same without the 1-specific test, but
// it's a cushion against any more sneaky parse-tree position vs. code mismatches:
// this way such issues will only arise on multiple-statement repl input lines,
// which most people don't use.
if (trees.size == 1) "val " + varName + " =\\n" + content
else {
// The position of the last tree
val lastpos0 = earliestPosition(trees.last)
// Oh boy, the parser throws away parens so "(2+2)" is mispositioned,
// with increasingly hard to decipher positions as we move on to "() => 5",
// (x: Int) => x + 1, and more. So I abandon attempts to finesse and just
// look for semicolons and newlines, which I'm sure is also buggy.
val (raw1, raw2) = content splitAt lastpos0
logDebug("[raw] " + raw1 + " <---> " + raw2)
val adjustment = (raw1.reverse takeWhile (ch => (ch != ';') && (ch != '\\n'))).size
val lastpos = lastpos0 - adjustment
// the source code split at the laboriously determined position.
val (l1, l2) = content splitAt lastpos
logDebug("[adj] " + l1 + " <---> " + l2)
val prefix = if (l1.trim == "") "" else l1 + ";\\n"
// Note to self: val source needs to have this precise structure so that
// error messages print the user-submitted part without the "val res0 = " part.
val combined = prefix + "val " + varName + " =\\n" + l2
logDebug(List(
" line" -> line,
" content" -> content,
" was" -> l2,
"combined" -> combined) map {
case (label, s) => label + ": '" + s + "'"
} mkString "\\n"
)
combined
}
)
// Rewriting "foo ; bar ; 123"
// to "foo ; bar ; val resXX = 123"
requestFromLine(rewrittenLine, synthetic) match {
case Right(req) => return Right(req withOriginalLine line)
case x => return x
}
case _ =>
}
Right(buildRequest(line, trees))
}
// normalize non-public types so we don't see protected aliases like Self
def normalizeNonPublic(tp: Type) = tp match {
case TypeRef(_, sym, _) if sym.isAliasType && !sym.isPublic => tp.dealias
case _ => tp
}
/**
* Interpret one line of input. All feedback, including parse errors
* and evaluation results, are printed via the supplied compiler's
* reporter. Values defined are available for future interpreted strings.
*
* The return value is whether the line was interpreter successfully,
* e.g. that there were no parse errors.
*/
def interpret(line: String): IR.Result = interpret(line, false)
def interpretSynthetic(line: String): IR.Result = interpret(line, true)
def interpret(line: String, synthetic: Boolean): IR.Result = {
def loadAndRunReq(req: Request) = {
classLoader.setAsContext()
val (result, succeeded) = req.loadAndRun
/** To our displeasure, ConsoleReporter offers only printMessage,
* which tacks a newline on the end. Since that breaks all the
* output checking, we have to take one off to balance.
*/
if (succeeded) {
if (printResults && result != "")
printMessage(result stripSuffix "\\n")
else if (isReplDebug) // show quiet-mode activity
printMessage(result.trim.lines map ("[quiet] " + _) mkString "\\n")
// Book-keeping. Have to record synthetic requests too,
// as they may have been issued for information, e.g. :type
recordRequest(req)
IR.Success
}
else {
// don't truncate stack traces
withoutTruncating(printMessage(result))
IR.Error
}
}
if (global == null) IR.Error
else requestFromLine(line, synthetic) match {
case Left(result) => result
case Right(req) =>
// null indicates a disallowed statement type; otherwise compile and
// fail if false (implying e.g. a type error)
if (req == null || !req.compile) IR.Error
else loadAndRunReq(req)
}
}
/** Bind a specified name to a specified value. The name may
* later be used by expressions passed to interpret.
*
* @param name the variable name to bind
* @param boundType the type of the variable, as a string
* @param value the object value to bind to it
* @return an indication of whether the binding succeeded
*/
def bind(name: String, boundType: String, value: Any, modifiers: List[String] = Nil): IR.Result = {
val bindRep = new ReadEvalPrint()
val run = bindRep.compile("""
|object %s {
| var value: %s = _
| def set(x: Any) = value = x.asInstanceOf[%s]
|}
""".stripMargin.format(bindRep.evalName, boundType, boundType)
)
bindRep.callEither("set", value) match {
case Left(ex) =>
logDebug("Set failed in bind(%s, %s, %s)".format(name, boundType, value))
logDebug(util.stackTraceString(ex))
IR.Error
case Right(_) =>
val line = "%sval %s = %s.value".format(modifiers map (_ + " ") mkString, name, bindRep.evalPath)
logDebug("Interpreting: " + line)
interpret(line)
}
}
def directBind(name: String, boundType: String, value: Any): IR.Result = {
val result = bind(name, boundType, value)
if (result == IR.Success)
directlyBoundNames += newTermName(name)
result
}
def directBind(p: NamedParam): IR.Result = directBind(p.name, p.tpe, p.value)
def directBind[T: ru.TypeTag : ClassTag](name: String, value: T): IR.Result = directBind((name, value))
def rebind(p: NamedParam): IR.Result = {
val name = p.name
val oldType = typeOfTerm(name) orElse { return IR.Error }
val newType = p.tpe
val tempName = freshInternalVarName()
quietRun("val %s = %s".format(tempName, name))
quietRun("val %s = %s.asInstanceOf[%s]".format(name, tempName, newType))
}
def quietImport(ids: String*): IR.Result = beQuietDuring(addImports(ids: _*))
def addImports(ids: String*): IR.Result =
if (ids.isEmpty) IR.Success
else interpret("import " + ids.mkString(", "))
def quietBind(p: NamedParam): IR.Result = beQuietDuring(bind(p))
def bind(p: NamedParam): IR.Result = bind(p.name, p.tpe, p.value)
def bind[T: ru.TypeTag : ClassTag](name: String, value: T): IR.Result = bind((name, value))
def bindSyntheticValue(x: Any): IR.Result = bindValue(freshInternalVarName(), x)
def bindValue(x: Any): IR.Result = bindValue(freshUserVarName(), x)
def bindValue(name: String, x: Any): IR.Result = bind(name, TypeStrings.fromValue(x), x)
/** Reset this interpreter, forgetting all user-specified requests. */
def reset() {
clearExecutionWrapper()
resetClassLoader()
resetAllCreators()
prevRequests.clear()
referencedNameMap.clear()
definedNameMap.clear()
virtualDirectory.delete()
virtualDirectory.create()
}
/** This instance is no longer needed, so release any resources
* it is using. The reporter's output gets flushed.
*/
def close() {
reporter.flush()
classServer.stop()
}
/** Here is where we:
*
* 1) Read some source code, and put it in the "read" object.
* 2) Evaluate the read object, and put the result in the "eval" object.
* 3) Create a String for human consumption, and put it in the "print" object.
*
* Read! Eval! Print! Some of that not yet centralized here.
*/
class ReadEvalPrint(lineId: Int) {
def this() = this(freshLineId())
private var lastRun: Run = _
private var evalCaught: Option[Throwable] = None
private var conditionalWarnings: List[ConditionalWarning] = Nil
val packageName = sessionNames.line + lineId
val readName = sessionNames.read
val evalName = sessionNames.eval
val printName = sessionNames.print
val resultName = sessionNames.result
def bindError(t: Throwable) = {
if (!bindExceptions) // avoid looping if already binding
throw t
val unwrapped = unwrap(t)
withLastExceptionLock[String]({
directBind[Throwable]("lastException", unwrapped)(tagOfThrowable, classTag[Throwable])
util.stackTraceString(unwrapped)
}, util.stackTraceString(unwrapped))
}
// TODO: split it out into a package object and a regular
// object and we can do that much less wrapping.
def packageDecl = "package " + packageName
def pathTo(name: String) = packageName + "." + name
def packaged(code: String) = packageDecl + "\\n\\n" + code
def readPath = pathTo(readName)
def evalPath = pathTo(evalName)
def printPath = pathTo(printName)
def call(name: String, args: Any*): AnyRef = {
val m = evalMethod(name)
logDebug("Invoking: " + m)
if (args.nonEmpty)
logDebug(" with args: " + args.mkString(", "))
m.invoke(evalClass, args.map(_.asInstanceOf[AnyRef]): _*)
}
def callEither(name: String, args: Any*): Either[Throwable, AnyRef] =
try Right(call(name, args: _*))
catch { case ex: Throwable => Left(ex) }
def callOpt(name: String, args: Any*): Option[AnyRef] =
try Some(call(name, args: _*))
catch { case ex: Throwable => bindError(ex) ; None }
class EvalException(msg: String, cause: Throwable) extends RuntimeException(msg, cause) { }
private def evalError(path: String, ex: Throwable) =
throw new EvalException("Failed to load '" + path + "': " + ex.getMessage, ex)
private def load(path: String): Class[_] = {
try Class.forName(path, true, classLoader)
catch { case ex: Throwable => evalError(path, unwrap(ex)) }
}
lazy val evalClass = load(evalPath)
lazy val evalValue = callEither(resultName) match {
case Left(ex) => evalCaught = Some(ex) ; None
case Right(result) => Some(result)
}
def compile(source: String): Boolean = compileAndSaveRun("<console>", source)
/** The innermost object inside the wrapper, found by
* following accessPath into the outer one.
*/
def resolvePathToSymbol(accessPath: String): Symbol = {
// val readRoot = getRequiredModule(readPath) // the outermost wrapper
// MATEI: Changed this to getClass because the root object is no longer a module (Scala singleton object)
val readRoot = rootMirror.getClassByName(newTypeName(readPath)) // the outermost wrapper
(accessPath split '.').foldLeft(readRoot: Symbol) {
case (sym, "") => sym
case (sym, name) => afterTyper(termMember(sym, name))
}
}
/** We get a bunch of repeated warnings for reasons I haven't
* entirely figured out yet. For now, squash.
*/
private def updateRecentWarnings(run: Run) {
def loop(xs: List[(Position, String)]): List[(Position, String)] = xs match {
case Nil => Nil
case ((pos, msg)) :: rest =>
val filtered = rest filter { case (pos0, msg0) =>
(msg != msg0) || (pos.lineContent.trim != pos0.lineContent.trim) || {
// same messages and same line content after whitespace removal
// but we want to let through multiple warnings on the same line
// from the same run. The untrimmed line will be the same since
// there's no whitespace indenting blowing it.
(pos.lineContent == pos0.lineContent)
}
}
((pos, msg)) :: loop(filtered)
}
//PRASHANT: This leads to a NoSuchMethodError for _.warnings. Yet to figure out its purpose.
// val warnings = loop(run.allConditionalWarnings flatMap (_.warnings))
// if (warnings.nonEmpty)
// mostRecentWarnings = warnings
}
private def evalMethod(name: String) = evalClass.getMethods filter (_.getName == name) match {
case Array(method) => method
case xs => sys.error("Internal error: eval object " + evalClass + ", " + xs.mkString("\\n", "\\n", ""))
}
private def compileAndSaveRun(label: String, code: String) = {
showCodeIfDebugging(code)
val (success, run) = compileSourcesKeepingRun(new BatchSourceFile(label, packaged(code)))
updateRecentWarnings(run)
lastRun = run
success
}
}
/** One line of code submitted by the user for interpretation */
// private
class Request(val line: String, val trees: List[Tree]) {
val reqId = nextReqId()
val lineRep = new ReadEvalPrint()
private var _originalLine: String = null
def withOriginalLine(s: String): this.type = { _originalLine = s ; this }
def originalLine = if (_originalLine == null) line else _originalLine
/** handlers for each tree in this request */
val handlers: List[MemberHandler] = trees map (memberHandlers chooseHandler _)
def defHandlers = handlers collect { case x: MemberDefHandler => x }
/** all (public) names defined by these statements */
val definedNames = handlers flatMap (_.definedNames)
/** list of names used by this expression */
val referencedNames: List[Name] = handlers flatMap (_.referencedNames)
/** def and val names */
def termNames = handlers flatMap (_.definesTerm)
def typeNames = handlers flatMap (_.definesType)
def definedOrImported = handlers flatMap (_.definedOrImported)
def definedSymbolList = defHandlers flatMap (_.definedSymbols)
def definedTypeSymbol(name: String) = definedSymbols(newTypeName(name))
def definedTermSymbol(name: String) = definedSymbols(newTermName(name))
/** Code to import bound names from previous lines - accessPath is code to
* append to objectName to access anything bound by request.
*/
val SparkComputedImports(importsPreamble, importsTrailer, accessPath) =
importsCode(referencedNames.toSet)
/** Code to access a variable with the specified name */
def fullPath(vname: String) = {
// lineRep.readPath + accessPath + ".`%s`".format(vname)
lineRep.readPath + ".INSTANCE" + accessPath + ".`%s`".format(vname)
}
/** Same as fullpath, but after it has been flattened, so:
* $line5.$iw.$iw.$iw.Bippy // fullPath
* $line5.$iw$$iw$$iw$Bippy // fullFlatName
*/
def fullFlatName(name: String) =
// lineRep.readPath + accessPath.replace('.', '$') + nme.NAME_JOIN_STRING + name
lineRep.readPath + ".INSTANCE" + accessPath.replace('.', '$') + nme.NAME_JOIN_STRING + name
/** The unmangled symbol name, but supplemented with line info. */
def disambiguated(name: Name): String = name + " (in " + lineRep + ")"
/** Code to access a variable with the specified name */
def fullPath(vname: Name): String = fullPath(vname.toString)
/** the line of code to compute */
def toCompute = line
/** generate the source code for the object that computes this request */
private object ObjectSourceCode extends CodeAssembler[MemberHandler] {
def path = pathToTerm("$intp")
def envLines = {
if (!isReplPower) Nil // power mode only for now
// $intp is not bound; punt, but include the line.
else if (path == "$intp") List(
"def $line = " + tquoted(originalLine),
"def $trees = Nil"
)
else List(
"def $line = " + tquoted(originalLine),
"def $req = %s.requestForReqId(%s).orNull".format(path, reqId),
"def $trees = if ($req eq null) Nil else $req.trees".format(lineRep.readName, path, reqId)
)
}
val preamble = """
|class %s extends Serializable {
| %s%s%s
""".stripMargin.format(lineRep.readName, envLines.map(" " + _ + ";\\n").mkString, importsPreamble, indentCode(toCompute))
val postamble = importsTrailer + "\\n}" + "\\n" +
"object " + lineRep.readName + " {\\n" +
" val INSTANCE = new " + lineRep.readName + "();\\n" +
"}\\n"
val generate = (m: MemberHandler) => m extraCodeToEvaluate Request.this
/*
val preamble = """
|object %s extends Serializable {
|%s%s%s
""".stripMargin.format(lineRep.readName, envLines.map(" " + _ + ";\\n").mkString, importsPreamble, indentCode(toCompute))
val postamble = importsTrailer + "\\n}"
val generate = (m: MemberHandler) => m extraCodeToEvaluate Request.this
*/
}
private object ResultObjectSourceCode extends CodeAssembler[MemberHandler] {
/** We only want to generate this code when the result
* is a value which can be referred to as-is.
*/
val evalResult =
if (!handlers.last.definesValue) ""
else handlers.last.definesTerm match {
case Some(vname) if typeOf contains vname =>
"lazy val %s = %s".format(lineRep.resultName, fullPath(vname))
case _ => ""
}
// first line evaluates object to make sure constructor is run
// initial "" so later code can uniformly be: + etc
val preamble = """
|object %s {
| %s
| val %s: String = %s {
| %s
| (""
""".stripMargin.format(
lineRep.evalName, evalResult, lineRep.printName,
executionWrapper, lineRep.readName + ".INSTANCE" + accessPath
)
val postamble = """
| )
| }
|}
""".stripMargin
val generate = (m: MemberHandler) => m resultExtractionCode Request.this
}
// get it
def getEvalTyped[T] : Option[T] = getEval map (_.asInstanceOf[T])
def getEval: Option[AnyRef] = {
// ensure it has been compiled
compile
// try to load it and call the value method
lineRep.evalValue filterNot (_ == null)
}
/** Compile the object file. Returns whether the compilation succeeded.
* If all goes well, the "types" map is computed. */
lazy val compile: Boolean = {
// error counting is wrong, hence interpreter may overlook failure - so we reset
reporter.reset()
// compile the object containing the user's code
lineRep.compile(ObjectSourceCode(handlers)) && {
// extract and remember types
typeOf
typesOfDefinedTerms
// Assign symbols to the original trees
// TODO - just use the new trees.
defHandlers foreach { dh =>
val name = dh.member.name
definedSymbols get name foreach { sym =>
dh.member setSymbol sym
logDebug("Set symbol of " + name + " to " + sym.defString)
}
}
// compile the result-extraction object
withoutWarnings(lineRep compile ResultObjectSourceCode(handlers))
}
}
lazy val resultSymbol = lineRep.resolvePathToSymbol(accessPath)
def applyToResultMember[T](name: Name, f: Symbol => T) = afterTyper(f(resultSymbol.info.nonPrivateDecl(name)))
/* typeOf lookup with encoding */
def lookupTypeOf(name: Name) = typeOf.getOrElse(name, typeOf(global.encode(name.toString)))
def simpleNameOfType(name: TypeName) = (compilerTypeOf get name) map (_.typeSymbol.simpleName)
private def typeMap[T](f: Type => T) =
mapFrom[Name, Name, T](termNames ++ typeNames)(x => f(cleanMemberDecl(resultSymbol, x)))
/** Types of variables defined by this request. */
lazy val compilerTypeOf = typeMap[Type](x => x) withDefaultValue NoType
/** String representations of same. */
lazy val typeOf = typeMap[String](tp => afterTyper(tp.toString))
// lazy val definedTypes: Map[Name, Type] = {
// typeNames map (x => x -> afterTyper(resultSymbol.info.nonPrivateDecl(x).tpe)) toMap
// }
lazy val definedSymbols = (
termNames.map(x => x -> applyToResultMember(x, x => x)) ++
typeNames.map(x => x -> compilerTypeOf(x).typeSymbolDirect)
).toMap[Name, Symbol] withDefaultValue NoSymbol
lazy val typesOfDefinedTerms = mapFrom[Name, Name, Type](termNames)(x => applyToResultMember(x, _.tpe))
/** load and run the code using reflection */
def loadAndRun: (String, Boolean) = {
try { ("" + (lineRep call sessionNames.print), true) }
catch { case ex: Throwable => (lineRep.bindError(ex), false) }
}
override def toString = "Request(line=%s, %s trees)".format(line, trees.size)
}
/** Returns the name of the most recent interpreter result.
* Mostly this exists so you can conveniently invoke methods on
* the previous result.
*/
def mostRecentVar: String =
if (mostRecentlyHandledTree.isEmpty) ""
else "" + (mostRecentlyHandledTree.get match {
case x: ValOrDefDef => x.name
case Assign(Ident(name), _) => name
case ModuleDef(_, name, _) => name
case _ => naming.mostRecentVar
})
private var mostRecentWarnings: List[(global.Position, String)] = Nil
def lastWarnings = mostRecentWarnings
def treesForRequestId(id: Int): List[Tree] =
requestForReqId(id).toList flatMap (_.trees)
def requestForReqId(id: Int): Option[Request] =
if (executingRequest != null && executingRequest.reqId == id) Some(executingRequest)
else prevRequests find (_.reqId == id)
def requestForName(name: Name): Option[Request] = {
assert(definedNameMap != null, "definedNameMap is null")
definedNameMap get name
}
def requestForIdent(line: String): Option[Request] =
requestForName(newTermName(line)) orElse requestForName(newTypeName(line))
def requestHistoryForName(name: Name): List[Request] =
prevRequests.toList.reverse filter (_.definedNames contains name)
def definitionForName(name: Name): Option[MemberHandler] =
requestForName(name) flatMap { req =>
req.handlers find (_.definedNames contains name)
}
def valueOfTerm(id: String): Option[AnyRef] =
requestForName(newTermName(id)) flatMap (_.getEval)
def classOfTerm(id: String): Option[JClass] =
valueOfTerm(id) map (_.getClass)
def typeOfTerm(id: String): Type = newTermName(id) match {
case nme.ROOTPKG => RootClass.tpe
case name => requestForName(name).fold(NoType: Type)(_ compilerTypeOf name)
}
def symbolOfType(id: String): Symbol =
requestForName(newTypeName(id)).fold(NoSymbol: Symbol)(_ definedTypeSymbol id)
def symbolOfTerm(id: String): Symbol =
requestForIdent(newTermName(id)).fold(NoSymbol: Symbol)(_ definedTermSymbol id)
def runtimeClassAndTypeOfTerm(id: String): Option[(JClass, Type)] = {
classOfTerm(id) flatMap { clazz =>
new RichClass(clazz).supers find(c => !(new RichClass(c).isScalaAnonymous)) map { nonAnon =>
(nonAnon, runtimeTypeOfTerm(id))
}
}
}
def runtimeTypeOfTerm(id: String): Type = {
typeOfTerm(id) andAlso { tpe =>
val clazz = classOfTerm(id) getOrElse { return NoType }
val staticSym = tpe.typeSymbol
val runtimeSym = getClassIfDefined(clazz.getName)
if ((runtimeSym != NoSymbol) && (runtimeSym != staticSym) && (runtimeSym isSubClass staticSym))
runtimeSym.info
else NoType
}
}
def cleanMemberDecl(owner: Symbol, member: Name): Type = afterTyper {
normalizeNonPublic {
owner.info.nonPrivateDecl(member).tpe match {
case NullaryMethodType(tp) => tp
case tp => tp
}
}
}
object exprTyper extends {
val repl: SparkIMain.this.type = imain
} with SparkExprTyper { }
def parse(line: String): Option[List[Tree]] = exprTyper.parse(line)
def symbolOfLine(code: String): Symbol =
exprTyper.symbolOfLine(code)
def typeOfExpression(expr: String, silent: Boolean = true): Type =
exprTyper.typeOfExpression(expr, silent)
protected def onlyTerms(xs: List[Name]) = xs collect { case x: TermName => x }
protected def onlyTypes(xs: List[Name]) = xs collect { case x: TypeName => x }
def definedTerms = onlyTerms(allDefinedNames) filterNot isInternalTermName
def definedTypes = onlyTypes(allDefinedNames)
def definedSymbols = prevRequestList.flatMap(_.definedSymbols.values).toSet[Symbol]
def definedSymbolList = prevRequestList flatMap (_.definedSymbolList) filterNot (s => isInternalTermName(s.name))
// Terms with user-given names (i.e. not res0 and not synthetic)
def namedDefinedTerms = definedTerms filterNot (x => isUserVarName("" + x) || directlyBoundNames(x))
private def findName(name: Name) = definedSymbols find (_.name == name) getOrElse NoSymbol
/** Translate a repl-defined identifier into a Symbol.
*/
def apply(name: String): Symbol =
types(name) orElse terms(name)
def types(name: String): Symbol = {
val tpname = newTypeName(name)
findName(tpname) orElse getClassIfDefined(tpname)
}
def terms(name: String): Symbol = {
val termname = newTypeName(name)
findName(termname) orElse getModuleIfDefined(termname)
}
// [Eugene to Paul] possibly you could make use of TypeTags here
def types[T: ClassTag] : Symbol = types(classTag[T].runtimeClass.getName)
def terms[T: ClassTag] : Symbol = terms(classTag[T].runtimeClass.getName)
def apply[T: ClassTag] : Symbol = apply(classTag[T].runtimeClass.getName)
def classSymbols = allDefSymbols collect { case x: ClassSymbol => x }
def methodSymbols = allDefSymbols collect { case x: MethodSymbol => x }
/** the previous requests this interpreter has processed */
private var executingRequest: Request = _
private val prevRequests = mutable.ListBuffer[Request]()
private val referencedNameMap = mutable.Map[Name, Request]()
private val definedNameMap = mutable.Map[Name, Request]()
private val directlyBoundNames = mutable.Set[Name]()
def allHandlers = prevRequestList flatMap (_.handlers)
def allDefHandlers = allHandlers collect { case x: MemberDefHandler => x }
def allDefSymbols = allDefHandlers map (_.symbol) filter (_ ne NoSymbol)
def lastRequest = if (prevRequests.isEmpty) null else prevRequests.last
def prevRequestList = prevRequests.toList
def allSeenTypes = prevRequestList flatMap (_.typeOf.values.toList) distinct
def allImplicits = allHandlers filter (_.definesImplicit) flatMap (_.definedNames)
def importHandlers = allHandlers collect { case x: ImportHandler => x }
def visibleTermNames: List[Name] = definedTerms ++ importedTerms distinct
/** Another entry point for tab-completion, ids in scope */
def unqualifiedIds = visibleTermNames map (_.toString) filterNot (_ contains "$") sorted
/** Parse the ScalaSig to find type aliases */
def aliasForType(path: String) = ByteCode.aliasForType(path)
def withoutUnwrapping(op: => Unit): Unit = {
val saved = isettings.unwrapStrings
isettings.unwrapStrings = false
try op
finally isettings.unwrapStrings = saved
}
def symbolDefString(sym: Symbol) = {
TypeStrings.quieter(
afterTyper(sym.defString),
sym.owner.name + ".this.",
sym.owner.fullName + "."
)
}
def showCodeIfDebugging(code: String) {
/** Secret bookcase entrance for repl debuggers: end the line
* with "// show" and see what's going on.
*/
def isShow = code.lines exists (_.trim endsWith "// show")
def isShowRaw = code.lines exists (_.trim endsWith "// raw")
// old style
beSilentDuring(parse(code)) foreach { ts =>
ts foreach { t =>
withoutUnwrapping(logDebug(asCompactString(t)))
}
}
}
// debugging
def debugging[T](msg: String)(res: T) = {
logDebug(msg + " " + res)
res
}
}
/** Utility methods for the Interpreter. */
object SparkIMain {
// The two name forms this is catching are the two sides of this assignment:
//
// $line3.$read.$iw.$iw.Bippy =
// $line3.$read$$iw$$iw$Bippy@4a6a00ca
private def removeLineWrapper(s: String) = s.replaceAll("""\\$line\\d+[./]\\$(read|eval|print)[$.]""", "")
private def removeIWPackages(s: String) = s.replaceAll("""\\$(iw|iwC|read|eval|print)[$.]""", "")
private def removeSparkVals(s: String) = s.replaceAll("""\\$VAL[0-9]+[$.]""", "")
def stripString(s: String) = removeSparkVals(removeIWPackages(removeLineWrapper(s)))
trait CodeAssembler[T] {
def preamble: String
def generate: T => String
def postamble: String
def apply(contributors: List[T]): String = stringFromWriter { code =>
code println preamble
contributors map generate foreach (code println _)
code println postamble
}
}
trait StrippingWriter {
def isStripping: Boolean
def stripImpl(str: String): String
def strip(str: String): String = if (isStripping) stripImpl(str) else str
}
trait TruncatingWriter {
def maxStringLength: Int
def isTruncating: Boolean
def truncate(str: String): String = {
if (isTruncating && (maxStringLength != 0 && str.length > maxStringLength))
(str take maxStringLength - 3) + "..."
else str
}
}
abstract class StrippingTruncatingWriter(out: JPrintWriter)
extends JPrintWriter(out)
with StrippingWriter
with TruncatingWriter {
self =>
def clean(str: String): String = truncate(strip(str))
override def write(str: String) = super.write(clean(str))
}
class ReplStrippingWriter(intp: SparkIMain) extends StrippingTruncatingWriter(intp.out) {
import intp._
def maxStringLength = isettings.maxPrintString
def isStripping = isettings.unwrapStrings
def isTruncating = reporter.truncationOK
def stripImpl(str: String): String = naming.unmangle(str)
}
class ReplReporter(intp: SparkIMain) extends ConsoleReporter(intp.settings, null, new ReplStrippingWriter(intp)) {
override def printMessage(msg: String) {
// Avoiding deadlock when the compiler starts logging before
// the lazy val is done.
if (intp.isInitializeComplete) {
if (intp.totalSilence) ()
else super.printMessage(msg)
}
else Console.println(msg)
}
}
}
class SparkISettings(intp: SparkIMain) extends Logging {
/** A list of paths where :load should look */
var loadPath = List(".")
/** Set this to true to see repl machinery under -Yrich-exceptions.
*/
var showInternalStackTraces = false
/** The maximum length of toString to use when printing the result
* of an evaluation. 0 means no maximum. If a printout requires
* more than this number of characters, then the printout is
* truncated.
*/
var maxPrintString = 800
/** The maximum number of completion candidates to print for tab
* completion without requiring confirmation.
*/
var maxAutoprintCompletion = 250
/** String unwrapping can be disabled if it is causing issues.
* Settings this to false means you will see Strings like "$iw.$iw.".
*/
var unwrapStrings = true
def deprecation_=(x: Boolean) = {
val old = intp.settings.deprecation.value
intp.settings.deprecation.value = x
if (!old && x) logDebug("Enabled -deprecation output.")
else if (old && !x) logDebug("Disabled -deprecation output.")
}
def deprecation: Boolean = intp.settings.deprecation.value
def allSettings = Map(
"maxPrintString" -> maxPrintString,
"maxAutoprintCompletion" -> maxAutoprintCompletion,
"unwrapStrings" -> unwrapStrings,
"deprecation" -> deprecation
)
private def allSettingsString =
allSettings.toList sortBy (_._1) map { case (k, v) => " " + k + " = " + v + "\\n" } mkString
override def toString = """
| SparkISettings {
| %s
| }""".stripMargin.format(allSettingsString)
}
| sryza/spark | repl/src/main/scala/org/apache/spark/repl/SparkIMain.scala | Scala | apache-2.0 | 53,908 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.view.action.popup
import java.io.File
import scalismo.ui.model.{LandmarkNode, LandmarksNode, SceneNode}
import scalismo.ui.resources.icons.BundledIcon
import scalismo.ui.util.FileIoMetadata
import scalismo.ui.view.ScalismoFrame
import scalismo.ui.view.action.SaveAction
import scala.util.Try
object SaveLandmarksAction extends PopupAction.Factory {
override def apply(nodes: List[SceneNode])(implicit frame: ScalismoFrame): List[PopupAction] = {
val landmarks = allMatch[LandmarkNode](nodes)
if (landmarks.isEmpty) {
// could be a landmarkSnode, in which case we should also return save actions
val landmarksNodeOpt = singleMatch[LandmarksNode](nodes)
if (landmarksNodeOpt.isEmpty) {
Nil
} else {
if (landmarksNodeOpt.get.children.nonEmpty) {
List(new SaveLandmarksAction(landmarksNodeOpt.get.children),
new SaveLandmarksAction(landmarksNodeOpt.get.children, false))
} else Nil
}
} else {
val groups = landmarks.map(_.group).distinct
if (groups.length != 1) {
// landmarks are in different groups, we don't support that
Nil
} else {
List(new SaveLandmarksAction(landmarks), new SaveLandmarksAction(landmarks, false))
}
}
}
}
// the companion object took care of the safety checks, like making sure the list is not empty etc.
class SaveLandmarksAction private (nodes: List[LandmarkNode], transformedFlag: Boolean = true)(implicit
val frame: ScalismoFrame)
extends PopupAction(
s"Save${if (transformedFlag) " transformed" else " original"} ${FileIoMetadata.Landmarks.description} ...",
BundledIcon.Save
) {
private val landmarks = nodes.head.parent
def doSave(file: File): Try[Unit] = {
landmarks.saveNodes(nodes, file, transformedFlag)
}
override def apply(): Unit = {
new SaveAction(doSave, FileIoMetadata.Landmarks, title).apply()
}
}
| unibas-gravis/scalismo-ui | src/main/scala/scalismo/ui/view/action/popup/SaveLandmarksAction.scala | Scala | gpl-3.0 | 2,780 |
package wom.graph
import wom.expression.WomExpression
import wom.graph.GraphNodePort.GraphNodeOutputPort
import wom.types.{WomOptionalType, WomType}
sealed trait GraphInputNode extends GraphNode {
def womType: WomType
lazy val singleOutputPort: GraphNodeOutputPort = GraphNodeOutputPort(localName, womType, this)
override val inputPorts: Set[GraphNodePort.InputPort] = Set.empty
override val outputPorts: Set[GraphNodePort.OutputPort] = Set(singleOutputPort)
}
sealed trait ExternalGraphInputNode extends GraphInputNode {
/**
* The fully qualified name should be the same as the one we expect the key in the input file to have.
* e.g in WDL:
* workflow.wdl:
* workflow w {
* String s # "name" = "s", "fullyQualifiedIdentifier" = "w.s"
* }
*
* input.json:
* {
* "w.s": "hi!"
* }
*
* e.g in CWL:
* workflow.cwl:
* class: Workflow
* inputs:
* s: string # "name" = "s", "fullyQualifiedIdentifier" = "s"
*
* inputs.yml:
* s: "hi !"
*
*/
override lazy val singleOutputPort: GraphNodeOutputPort = GraphNodeOutputPort(identifier, womType, this)
}
final case class RequiredGraphInputNode(override val identifier: WomIdentifier,
womType: WomType) extends ExternalGraphInputNode
final case class OptionalGraphInputNode(override val identifier: WomIdentifier,
womType: WomOptionalType) extends ExternalGraphInputNode
// If we want to allow defaults to be "complex" expressions with dependencies we may need to make it an InstantiatedExpression here instead
final case class OptionalGraphInputNodeWithDefault(override val identifier: WomIdentifier,
womType: WomType,
default: WomExpression) extends ExternalGraphInputNode
object OuterGraphInputNode {
def apply(forIdentifier: WomIdentifier, linkToOuterGraph: GraphNodePort.OutputPort, preserveScatterIndex: Boolean): OuterGraphInputNode = {
new OuterGraphInputNode(forIdentifier.copy(fullyQualifiedName = forIdentifier.fullyQualifiedName.prefixWith("^")), linkToOuterGraph, preserveScatterIndex)
}
}
/**
* Used to represent an input to any GraphNode's inner graph which is a link to a value somewhere in the outer graph.
*/
class OuterGraphInputNode protected(override val identifier: WomIdentifier, val linkToOuterGraph: GraphNodePort.OutputPort, val preserveScatterIndex: Boolean) extends GraphInputNode {
override def womType: WomType = linkToOuterGraph.womType
override lazy val singleOutputPort: GraphNodeOutputPort = GraphNodeOutputPort(identifier, womType, this)
lazy val linkToOuterGraphNode = linkToOuterGraph.graphNode
lazy val nameToPortMapping: (String, GraphNodeOutputPort) = localName -> singleOutputPort
}
final case class ScatterVariableNode(override val identifier: WomIdentifier,
scatterExpressionOutputPort: GraphNodePort.OutputPort,
override val womType: WomType) extends OuterGraphInputNode(identifier, scatterExpressionOutputPort, preserveScatterIndex = true)
| ohsu-comp-bio/cromwell | wom/src/main/scala/wom/graph/GraphInputNode.scala | Scala | bsd-3-clause | 3,241 |
/************************************************************************\
** Project **
** ______ ______ __ ______ ____ **
** / ____/ / __ / / / / __ / / __/ (c) 2011-2014 **
** / /__ / /_/ / / / / /_/ / / /_ **
** /___ / / ____/ / / / __ / / __/ Erik Osheim, Tom Switzer **
** ____/ / / / / / / / | | / /__ **
** /_____/ /_/ /_/ /_/ |_| /____/ All rights reserved. **
** **
** Redistribution and use permitted under the MIT license. **
** **
\************************************************************************/
package spire
package random
package rng
/**
* This is an Index Cache for the Well44497a and Well44497b implementations.
*
* <p>The acronym WELL stands for Well Equidistributed Long-period Linear.
*
* <p><b>Reference: </b>
* François Panneton, Pierre L'Ecuyer and Makoto Matsumoto:
* "Improved Long-Period Generators Based on Linear Recurrences Modulo 2",
* <i>ACM Transactions on Mathematical Software,</i> Vol. 32, No. 1, January 2006, pp 1--16.
*
* @see <a href="http://www.iro.umontreal.ca/~panneton/well/WELL44497a.c">WELL44497a.c</a>
* @see <a href="http://www.iro.umontreal.ca/~panneton/WELLRNG.html">Well PRNG Home Page</a>
* @see <a href="http://en.wikipedia.org/wiki/Well_Equidistributed_Long-period_Linear">WELL @ Wikipedia</a>
* @author <a href="mailto:dusan.kysel@gmail.com">Dušan Kysel</a>
*/
private[random] object Well44497abIndexCache {
/** Number of bits in the pool. */
@inline private final val K : Int = 44497
/** Length of the pool in ints. */
@inline private final val R : Int = (K + 31) / 32
/** Length of the pool in ints -1. */
@inline private final val R_1 : Int = R - 1
/** Length of the pool in ints -2. */
@inline private final val R_2 : Int = R - 2
/** First parameter of the algorithm. */
@inline private final val M1 : Int = 23
/** Second parameter of the algorithm. */
@inline private final val M2 : Int = 481
/** Third parameter of the algorithm. */
@inline private final val M3 : Int = 229
val vm1 = Array.tabulate(R)(i => (i + M1) % R)
val vm2 = Array.tabulate(R)(i => (i + M2) % R)
val vm3 = Array.tabulate(R)(i => (i + M3) % R)
val vrm1 = Array.tabulate(R)(i => (i + R_1) % R)
val vrm2 = Array.tabulate(R)(i => (i + R_2) % R)
}
| lrytz/spire | core/src/main/scala/spire/random/rng/Well44497abIndexCache.scala | Scala | mit | 2,647 |
package lila.round
package actorApi
import scala.concurrent.Promise
import scala.concurrent.duration._
import chess.format.Uci
import chess.{ Color, MoveMetrics }
import lila.common.IpAddress
import lila.game.Game.PlayerId
import lila.socket.Socket.SocketVersion
case class ByePlayer(playerId: PlayerId)
case class GetSocketStatus(promise: Promise[SocketStatus])
case class SocketStatus(
version: SocketVersion,
whiteOnGame: Boolean,
whiteIsGone: Boolean,
blackOnGame: Boolean,
blackIsGone: Boolean
) {
def onGame(color: Color) = color.fold(whiteOnGame, blackOnGame)
def isGone(color: Color) = color.fold(whiteIsGone, blackIsGone)
def colorsOnGame: Set[Color] = Color.all.filter(onGame).toSet
}
case class RoomCrowd(white: Boolean, black: Boolean)
case class BotConnected(color: Color, v: Boolean)
package round {
case class HumanPlay(
playerId: PlayerId,
uci: Uci,
blur: Boolean,
moveMetrics: MoveMetrics = MoveMetrics(),
promise: Option[Promise[Unit]] = None
)
case class PlayResult(events: Events, fen: String, lastMove: Option[String])
case object AbortForce
case object Threefold
case object ResignAi
case class ResignForce(playerId: PlayerId)
case class DrawForce(playerId: PlayerId)
case class DrawClaim(playerId: PlayerId)
case class DrawYes(playerId: PlayerId)
case class DrawNo(playerId: PlayerId)
case class TakebackYes(playerId: PlayerId)
case class TakebackNo(playerId: PlayerId)
object Moretime { val defaultDuration = 15.seconds }
case class Moretime(playerId: PlayerId, seconds: FiniteDuration = Moretime.defaultDuration)
case object QuietFlag
case class ClientFlag(color: Color, fromPlayerId: Option[PlayerId])
case object Abandon
case class ForecastPlay(lastMove: chess.Move)
case class Cheat(color: Color)
case class HoldAlert(playerId: PlayerId, mean: Int, sd: Int, ip: IpAddress)
case class GoBerserk(color: Color, promise: Promise[Boolean])
case object NoStart
case object StartClock
case object TooManyPlies
}
| luanlv/lila | modules/round/src/main/actorApi.scala | Scala | mit | 2,055 |
package ui.shader.builder.value
import ui.shader.builder.types.{GlFloatType, GlVec2Type, GlVec3Type, GlVec4Type}
abstract class GlVec3Val extends GlValue[GlVec3Type] {
def xy: GlValue[GlVec2Type] = {
GlVec2Val(this)
}
def x: GlValue[GlFloatType] = {
new GlVec3ValX(this)
}
def y: GlValue[GlFloatType] = {
new GlVec3ValY(this)
}
def z: GlValue[GlFloatType] = {
new GlVec3ValZ(this)
}
}
object GlVec3Val {
def apply(x: GlValue[GlFloatType],
y: GlValue[GlFloatType],
z: GlValue[GlFloatType]): GlVec3Val = {
new GlVec3ValF(x, y, z)
}
def apply(vec2: GlValue[GlVec2Type],
z: GlValue[GlFloatType]): GlVec3Val = {
new GlVec3ValV2F(vec2, z)
}
def xv2(x: GlValue[GlFloatType],
vec2: GlValue[GlVec2Type]): GlVec3Val = {
new GlVec3ValFV2(x, vec2)
}
def apply(vec3: GlValue[GlVec3Type]): GlVec3Val = {
new GlVec3ValV3F(vec3)
}
def apply(name: String): GlVec3Val = {
new GlVec3ValVar(name)
}
}
class GlVec3ValX(val vec3: GlValue[GlVec3Type]) extends GlValue[GlFloatType] {
override def toGlsl: String = {
s"${vec3.toGlsl}.x"
}
}
class GlVec3ValY(val vec3: GlValue[GlVec3Type]) extends GlValue[GlFloatType] {
override def toGlsl: String = {
s"${vec3.toGlsl}.y"
}
}
class GlVec3ValZ(val vec3: GlValue[GlVec3Type]) extends GlValue[GlFloatType] {
override def toGlsl: String = {
s"${vec3.toGlsl}.z"
}
}
class GlVec3ValVar(val name: String) extends GlVec3Val {
override def toGlsl: String = {
name
}
}
class GlVec3ValVec4(val vec4: GlValue[GlVec4Type]) extends GlVec3Val {
override def toGlsl: String = {
s"vec3(${vec4.toGlsl}.x, ${vec4.toGlsl}.y, ${vec4.toGlsl}.z)"
}
}
class GlVec3ValF(val xVal: GlValue[GlFloatType],
val yVal: GlValue[GlFloatType],
val zVal: GlValue[GlFloatType]) extends GlVec3Val {
override def toGlsl: String = {
"vec3(" + xVal.toGlsl + ", " + yVal.toGlsl + ", " + zVal.toGlsl + ")"
}
}
class GlVec3ValV2F(val vec2: GlValue[GlVec2Type],
val zVal: GlValue[GlFloatType]) extends GlVec3Val {
override def toGlsl: String = {
"vec3(" + vec2.toGlsl + ", " + zVal.toGlsl + ")"
}
}
class GlVec3ValFV2(val xVal: GlValue[GlFloatType],
val vec2: GlValue[GlVec2Type]) extends GlVec3Val {
override def toGlsl: String = {
"vec3(" + xVal.toGlsl + ", " + vec2.toGlsl + ")"
}
}
class GlVec3ValV3F(val vec3: GlValue[GlVec3Type]) extends GlVec3Val {
override def toGlsl: String = {
"vec3(" + vec3.toGlsl + ")"
}
}
| gvatn/play-scalajs-webgl-spark | client/src/main/scala/ui/shader/builder/value/GlVec3Val.scala | Scala | mit | 2,704 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.web.settings
trait EditableSettings {
def id: String
def name: String
def group: String
def description: String
def uri: String
def uriType: String
def isEditable: Boolean
}
| equella/Equella | Source/Plugins/Core/com.equella.core/scalasrc/com/tle/web/settings/EditableSettings.scala | Scala | apache-2.0 | 1,010 |
package io.getquill.context.cassandra.monix
import io.getquill.context.cassandra.EncodingSpecHelper
import io.getquill.Query
class EncodingSpec extends EncodingSpecHelper {
"encodes and decodes types" - {
"stream" in {
import monix.execution.Scheduler.Implicits.global
import testMonixDB._
val result =
for {
_ <- testMonixDB.run(query[EncodingTestEntity].delete)
_ <- testMonixDB.run(liftQuery(insertValues).foreach(e => query[EncodingTestEntity].insert(e)))
result <- testMonixDB.run(query[EncodingTestEntity])
} yield {
result
}
val f = result.runToFuture
verify(await(f))
}
}
"encodes collections" - {
"stream" in {
import monix.execution.Scheduler.Implicits.global
import testMonixDB._
val q = quote {
(list: Query[Int]) =>
query[EncodingTestEntity].filter(t => list.contains(t.id))
}
val result =
for {
_ <- testMonixDB.run(query[EncodingTestEntity].delete)
_ <- testMonixDB.run(liftQuery(insertValues).foreach(e => query[EncodingTestEntity].insert(e)))
result <- testMonixDB.run(q(liftQuery(insertValues.map(_.id))))
} yield {
result
}
val f = result.runToFuture
verify(await(f))
}
}
}
| getquill/quill | quill-cassandra-monix/src/test/scala/io/getquill/context/cassandra/monix/EncodingSpec.scala | Scala | apache-2.0 | 1,338 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Changes for SnappyData data platform.
*
* Portions Copyright (c) 2017-2019 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{GenerateSafeProjection, GenerateUnsafeProjection}
import org.apache.spark.sql.types._
/**
* A [[Projection]] that is calculated by calling the `eval` of each of the specified expressions.
*
* @param expressions a sequence of expressions that determine the value of each column of the
* output row.
*/
class InterpretedProjection(expressions: Seq[Expression]) extends Projection {
def this(expressions: Seq[Expression], inputSchema: Seq[Attribute]) =
this(expressions.map(BindReferences.bindReference(_, inputSchema)))
override def initialize(partitionIndex: Int): Unit = {
expressions.foreach(_.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
})
}
// null check is required for when Kryo invokes the no-arg constructor.
protected val exprArray = if (expressions != null) expressions.toArray else null
def apply(input: InternalRow): InternalRow = {
val outputArray = new Array[Any](exprArray.length)
var i = 0
while (i < exprArray.length) {
outputArray(i) = exprArray(i).eval(input)
i += 1
}
new GenericInternalRow(outputArray)
}
override def toString(): String = s"Row => [${exprArray.mkString(",")}]"
}
/**
* A [[MutableProjection]] that is calculated by calling `eval` on each of the specified
* expressions.
*
* @param expressions a sequence of expressions that determine the value of each column of the
* output row.
*/
case class InterpretedMutableProjection(expressions: Seq[Expression]) extends MutableProjection {
def this(expressions: Seq[Expression], inputSchema: Seq[Attribute]) =
this(expressions.map(BindReferences.bindReference(_, inputSchema)))
private[this] val buffer = new Array[Any](expressions.size)
override def initialize(partitionIndex: Int): Unit = {
expressions.foreach(_.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
})
}
private var targetUnsafe = false
type UnsafeSetter = (UnsafeRow, Any) => Unit
private var setters: Array[UnsafeSetter] = _
private[this] val exprArray = expressions.toArray
private[this] var mutableRow: InternalRow = new GenericInternalRow(exprArray.length)
def currentValue: InternalRow = mutableRow
override def target(row: InternalRow): MutableProjection = {
mutableRow = row
targetUnsafe = row match {
case _: UnsafeRow =>
if (setters == null) {
setters = Array.ofDim[UnsafeSetter](exprArray.length)
for (i <- exprArray.indices) {
setters(i) = exprArray(i).dataType match {
case IntegerType => (target: UnsafeRow, value: Any) =>
target.setInt(i, value.asInstanceOf[Int])
case LongType => (target: UnsafeRow, value: Any) =>
target.setLong(i, value.asInstanceOf[Long])
case DoubleType => (target: UnsafeRow, value: Any) =>
target.setDouble(i, value.asInstanceOf[Double])
case FloatType => (target: UnsafeRow, value: Any) =>
target.setFloat(i, value.asInstanceOf[Float])
case NullType => (target: UnsafeRow, value: Any) =>
target.setNullAt(i)
case BooleanType => (target: UnsafeRow, value: Any) =>
target.setBoolean(i, value.asInstanceOf[Boolean])
case ByteType => (target: UnsafeRow, value: Any) =>
target.setByte(i, value.asInstanceOf[Byte])
case ShortType => (target: UnsafeRow, value: Any) =>
target.setShort(i, value.asInstanceOf[Short])
}
}
}
true
case _ => false
}
this
}
override def apply(input: InternalRow): InternalRow = {
var i = 0
while (i < exprArray.length) {
// Store the result into buffer first, to make the projection atomic (needed by aggregation)
buffer(i) = exprArray(i).eval(input)
i += 1
}
i = 0
while (i < exprArray.length) {
if (targetUnsafe) {
setters(i)(mutableRow.asInstanceOf[UnsafeRow], buffer(i))
} else {
mutableRow(i) = buffer(i)
}
i += 1
}
mutableRow
}
}
/**
* A projection that returns UnsafeRow.
*/
abstract class UnsafeProjection extends Projection {
override def apply(row: InternalRow): UnsafeRow
}
object UnsafeProjection {
/**
* Returns an UnsafeProjection for given StructType.
*/
def create(schema: StructType): UnsafeProjection = create(schema.fields.map(_.dataType))
/**
* Returns an UnsafeProjection for given Array of DataTypes.
*/
def create(fields: Array[DataType]): UnsafeProjection = {
create(fields.zipWithIndex.map(x => new BoundReference(x._2, x._1, true)))
}
/**
* Returns an UnsafeProjection for given sequence of Expressions (bounded).
*/
def create(exprs: Seq[Expression]): UnsafeProjection = {
val unsafeExprs = exprs.map(_ transform {
case CreateNamedStruct(children) => CreateNamedStructUnsafe(children)
})
GenerateUnsafeProjection.generate(unsafeExprs)
}
def create(expr: Expression): UnsafeProjection = create(Seq(expr))
/**
* Returns an UnsafeProjection for given sequence of Expressions, which will be bound to
* `inputSchema`.
*/
def create(exprs: Seq[Expression], inputSchema: Seq[Attribute]): UnsafeProjection = {
create(exprs.map(BindReferences.bindReference(_, inputSchema)))
}
/**
* Same as other create()'s but allowing enabling/disabling subexpression elimination.
* TODO: refactor the plumbing and clean this up.
*/
def create(
exprs: Seq[Expression],
inputSchema: Seq[Attribute],
subexpressionEliminationEnabled: Boolean): UnsafeProjection = {
val e = exprs.map(BindReferences.bindReference(_, inputSchema))
.map(_ transform {
case CreateNamedStruct(children) => CreateNamedStructUnsafe(children)
})
GenerateUnsafeProjection.generate(e, subexpressionEliminationEnabled)
}
}
/**
* A projection that could turn UnsafeRow into GenericInternalRow
*/
object FromUnsafeProjection {
/**
* Returns a Projection for given StructType.
*/
def apply(schema: StructType): Projection = {
apply(schema.fields.map(_.dataType))
}
/**
* Returns an UnsafeProjection for given Array of DataTypes.
*/
def apply(fields: Seq[DataType]): Projection = {
create(fields.zipWithIndex.map(x => new BoundReference(x._2, x._1, true)))
}
/**
* Returns a Projection for given sequence of Expressions (bounded).
*/
private def create(exprs: Seq[Expression]): Projection = {
GenerateSafeProjection.generate(exprs)
}
}
| SnappyDataInc/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Projection.scala | Scala | apache-2.0 | 8,364 |
/*
* @author Philip Stutz
* @author Sara Magliacane
*
* Copyright 2014 University of Zurich & VU University Amsterdam
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.admm.graph
import com.signalcollect.GraphEditor
import com.signalcollect.MemoryEfficientDataGraphVertex
import com.signalcollect.admm.optimizers.OptimizableFunction
/**
* In the ADMM algorithm there are two types of nodes: consensus variable nodes and subproblem nodes.
* Each subproblem node represents a piece of the big problem that can be solved independently, which is an optimizable function.
* For example, in PSL this node represents the truth value of a grounded rule, e.g. "friend(bob, anna) AND votes(anna, DP) => votes (bob, DP)".
* Each subproblem node is connected to all the consensus variables of the variables it uses.
* For example, in PSL the subproblem "friend(bob, anna) AND votes(anna, DP) => votes (bob, DP)" is connected to
* the consensus variables "votes(anna, DP)", "votes(bob, DP)" and "friend(bob, anna)".
*/
class AsyncSubproblemVertex(
subproblemId: Int, // The id of the subproblem.
optimizableFunction: OptimizableFunction) // The function that is contained in the subproblem.
extends SubproblemVertex(subproblemId, optimizableFunction) {
var signalsReceivedSinceCollect = 0
override def executeSignalOperation(graphEditor: GraphEditor[Int, Double]) {
super.executeSignalOperation(graphEditor)
shouldSignal = false
}
var shouldSignal = true
/**
* Overriding the internal S/C signal implementation.
*/
override def deliverSignalWithSourceId(signal: Double, sourceId: Int, graphEditor: GraphEditor[Int, Double]): Boolean = {
signalsReceivedSinceCollect += 1
mostRecentSignalMap.put(sourceId, signal)
if (signalsReceivedSinceCollect == _targetIds.size) {
signalsReceivedSinceCollect = 0
state = collect
shouldSignal = true
true
} else {
false
}
}
override def scoreCollect = 0
override def scoreSignal = {
if (shouldSignal) {
1
} else {
0
}
}
}
| uzh/fox | src/main/scala/com/signalcollect/admm/graph/AsyncSubproblemVertex.scala | Scala | apache-2.0 | 2,640 |
package edu.gsu.cs.kgem.io
import edu.gsu.cs.kgem.model.Genotype
import java.io.{File, PrintStream}
import org.biojava3.core.sequence.DNASequence
import org.biojava3.core.sequence.io.FastaWriterHelper
import collection.JavaConversions._
import edu.gsu.cs.kgem.exec._
import scala.Some
/**
* Created with IntelliJ IDEA.
* User: aartyomenko
* Date: 3/30/13
* Time: 3:23 PM
*/
object OutputHandler {
val DEFAULT_FILENAME = "haplotypes.fa"
/**
* Output corrected reads into specified {@see PrintStream}
* @param out
* { @see PrintStream} object, either file or stdout
* @param gens
* Collection of haplotypes (Result)
*/
@deprecated
def outputResult(out: PrintStream, gens: Iterable[Genotype]) = {
outputHaplotypes(out, gens)
}
/**
* Output corrected reads into specified {@see PrintStream}
* @param out
* { @see PrintStream} object, either file or stdout
* @param gens
* Collection of haplotypes (Result)
* @param n
* Number of reads
*/
def outputResult(out: PrintStream, gens: Iterable[Genotype], n: Int, clean: (String => String) = s => s) = {
val gg = gens
var i = 0
val haplSeqs = gg.flatMap(g => {
val fn = (g.freq * n).asInstanceOf[Int]
val cleanedSeq = trim(clean(g.toIntegralString), 'N')
(0 until fn).map(x => {
val dna = new DNASequence(cleanedSeq)
dna.setOriginalHeader("read%d".format(i))
i += 1
dna
})
})
writeFasta(out, haplSeqs)
}
/**
* Output haplotypes into specified {@see PrintStream}
* @param out
* { @see PrintStream} object, either file or stdout
* @param gens
* Collection of haplotypes (Result)
*/
def outputHaplotypes(out: PrintStream, gens: Iterable[Genotype], clean: (String => String) = s => s) = {
val gg = gens
val haplSeqs = gg.map(g => {
val seq = new DNASequence(trim(clean(g.toIntegralString), 'N'))
seq.setOriginalHeader("haplotype%d_freq_%.10f".format(g.ID, g.freq))
seq
})
writeFasta(out, haplSeqs)
}
private def trim(str: String, char: Char): String = {
str.dropWhile(c => c == char).reverse.dropWhile(c => c == char).reverse
}
private def column[A, M[_]](matrix: M[M[A]], colIdx: Int)
(implicit v1: M[M[A]] => Seq[M[A]], v2: M[A] => Seq[A]): Seq[A] =
matrix.map(_(colIdx))
private def writeFasta(out: PrintStream, seq: Iterable[DNASequence]) {
try {
FastaWriterHelper.writeNucleotideSequence(out, seq)
} finally {
out.close()
}
}
/**
* Setup the output directory and files.
* @param dir
* The output directory file. This is where the reconstructed
* haplotypes and corrected reads will be stored.
* @return
* Returns None if the output directory, or output files cannot
* be created. If they are created successfully then it returns
* Some((hapOutput: PrintStream, resultsOutput: PrintStream)).
*
*/
def setupOutput(dir: File): Option[(PrintStream)] = {
// Try to make the output directory. If it fails, return None.
val file = if (dir != null) dir else new File(DEFAULT_FILENAME)
val parent = file.getParentFile
val tmp = if (parent == null) new File(System.getProperty(USER_DIR)) else parent
if (!tmp.exists()) {
if (!tmp.mkdir()) {
println("Cannot create output directory!")
return None
}
}
try {
val out = new PrintStream(file)
new Some[PrintStream](out)
} catch {
case _: Throwable => log("Cannot create file: " + file.getAbsolutePath); None
}
}
/**
* Merge two maps with collections as
* values. Result map will contain union
* of keys as new keyset and joint lists of
* values
* @param m1
* Operand 1
* @param m2
* Operand 2
* @tparam K
* Generic parameter type of Key
* @tparam V
* Generic parameter type of Value
* @return
*/
def merge[K, V](m1: Map[K, Iterable[V]], m2: Map[K, Iterable[V]]): Map[K, Iterable[V]] = {
val k1 = Set(m1.keysIterator.toList: _*)
val k2 = Set(m2.keysIterator.toList: _*)
val intersection = k1 & k2
val r1 = for (key <- intersection) yield key -> (m1(key) ++ m2(key))
val r2 = m1.filterKeys(!intersection.contains(_)) ++ m2.filterKeys(!intersection.contains(_))
r2 ++ r1
}
def mean[T](vals: Iterable[T])(implicit ev1: T => Double) = {
val N = vals.size
vals.map(_.toDouble).sum / N
}
def sigma[T](vals: Iterable[T])(implicit ev1: T => Double) = {
val meanValue = mean(vals)
Math.sqrt(mean(vals.map(x => Math.pow(x - meanValue, 2))))
}
}
| night-stalker/2SNV | src/main/edu/gsu/cs/2snv/io/OutputHandler.scala | Scala | gpl-2.0 | 4,610 |
package unfiltered.kit
import unfiltered.request._
import unfiltered.response._
/** Routing kits for directing requests to handlers based on paths */
object Routes {
/** Matches request paths that start with the given string to functions
* that take the request and the remaining path string as parameters */
def startsWith[A,B](
route: (String, (HttpRequest[A], String) => ResponseFunction[B])*
) =
toIntent(route) { (req: HttpRequest[A], path, k, rf) =>
if (path.startsWith(k))
Some(rf(req, path.substring(k.length)))
else None
}
/** Matches requests that fully match the given regular expression string
* to functions that take the request and the list of matching groups
* as parameters. */
def regex[A, B](
route: (String, (HttpRequest[A], List[String]) => ResponseFunction[B])*
) =
toIntent(
route.map { case (k, v) => k.r -> v }
) { (req: HttpRequest[A], path, regex, rf) =>
regex.unapplySeq(path).map { groups =>
rf(req, groups)
}
}
/**
* Matches requests that match the given rails-style path specification
* to functions that take the request and a Map of path-keys to their
* values. e.g. "/thing/:thing_id" for the path "/thing/1" would call
* the corresponding function with a `Map("thing_id" -> "1")`.
*/
def specify[A, B](
route: (String, ((HttpRequest[A], Map[String,String]) =>
ResponseFunction[B]))*) =
toIntent(
route.map {
case (Seg(spec), f) => spec -> f
}
) { (req: HttpRequest[A], path, spec, rf) =>
val Seg(actual) = path
if (spec.length != actual.length)
None
else {
val start: Option[Map[String,String]] = Some(Map.empty[String,String])
spec.zip(actual).foldLeft(start) {
case (None, _) => None
case (Some(m), (sp, act)) if sp.startsWith(":") =>
Some(m + (sp.substring(1) -> act))
case (opt, (sp, act)) if sp == act =>
opt
case _ => None
}.map { m =>
rf(req, m)
}
}
}
def toIntent[A,B,K,F](route: Iterable[(K,F)])(
f: (HttpRequest[A], String, K, F) => Option[ResponseFunction[B]]
): unfiltered.Cycle.Intent[A, B] = {
case req @ Path(path) =>
route.view.flatMap { case (key, handler) =>
f(req, path, key, handler)
}.find{ _ != Pass }.getOrElse { Pass }
}
}
| omarkilani/unfiltered | library/src/main/scala/kit/routes.scala | Scala | mit | 2,435 |
package org.jetbrains.plugins.scala.lang.parser.parsing.builder
import com.intellij.lang.PsiBuilder
import com.intellij.lang.impl.PsiBuilderAdapter
import com.intellij.openapi.util.text.StringUtil
import org.jetbrains.plugins.scala.lang.TokenSets
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
import scala.collection.mutable.Stack
/**
* @author Alexander Podkhalyuzin
*/
class ScalaPsiBuilderImpl(builder: PsiBuilder)
extends PsiBuilderAdapter(builder) with ScalaPsiBuilder {
private final val newlinesEnabled: Stack[Boolean] = new Stack[Boolean]
def newlineBeforeCurrentToken: Boolean = {
countNewlineBeforeCurrentToken() > 0
}
def twoNewlinesBeforeCurrentToken: Boolean = {
countNewlineBeforeCurrentToken() > 1
}
/**
* @return 0 if new line is disabled here, or there is no \\n chars between tokens
* 1 if there is no blank lines between tokens
* 2 otherwise
*/
private def countNewlineBeforeCurrentToken(): Int = {
if (!newlinesEnabled.isEmpty && !newlinesEnabled.top) return 0
if (eof) return 0
if (!ParserUtils.elementCanStartStatement(getTokenType, this)) return 0
var i = 1
while (i < getCurrentOffset && TokenSets.WHITESPACE_OR_COMMENT_SET.contains(rawLookup(-i))) i += 1
val textBefore = getOriginalText.subSequence(rawTokenTypeStart(-i + 1), rawTokenTypeStart(0)).toString
if (!textBefore.contains('\\n')) return 0
val lines = s"start $textBefore end".split('\\n')
if (lines.exists(_.forall(StringUtil.isWhiteSpace))) 2
else 1
}
def isNewlinesEnabled = newlinesEnabled.isEmpty || newlinesEnabled.top
def disableNewlines {
newlinesEnabled.push(false)
}
def enableNewlines {
newlinesEnabled.push(true)
}
def restoreNewlinesState {
assert(newlinesEnabled.size >= 1)
newlinesEnabled.pop()
}
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/builder/ScalaPsiBuilderImpl.scala | Scala | apache-2.0 | 1,851 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.suiteprop
import org.scalatest._
class TwoTestsIgnoredExamples extends SuiteExamples {
trait Services {
val theTestNames = Vector("first test", "second test")
}
trait NestedTestNames extends Services {
override val theTestNames = Vector("A subject should first test", "A subject should second test")
}
trait DeeplyNestedTestNames extends Services {
override val theTestNames = Vector("A subject when created should first test", "A subject when created should second test")
}
trait NestedTestNamesWithMust extends Services {
override val theTestNames = Vector("A subject must first test", "A subject must second test")
}
trait DeeplyNestedTestNamesWithMust extends Services {
override val theTestNames = Vector("A subject when created must first test", "A subject when created must second test")
}
trait NestedTestNamesWithCan extends Services {
override val theTestNames = Vector("A subject can first test", "A subject can second test")
}
trait DeeplyNestedTestNamesWithCan extends Services {
override val theTestNames = Vector("A subject when created can first test", "A subject when created can second test")
}
type FixtureServices = Services
class SpecExample extends Spec with Services {
@Ignore def `test first` = {}
@Ignore def `test second` = {}
override val theTestNames = Vector("test first", "test second")
}
class FixtureSpecExample extends StringFixtureSpec with Services {
@Ignore def `test first`(s: String) = {}
@Ignore def `test second`(s: String) = {}
override val theTestNames = Vector("test first", "test second")
}
class FunSuiteExample extends FunSuite with Services {
ignore("first test") {}
ignore("second test") {}
}
class FixtureFunSuiteExample extends StringFixtureFunSuite with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
class FunSpecExample extends FunSpec with Services {
ignore("first test") {}
ignore("second test") {}
}
class NestedFunSpecExample extends FunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") {}
ignore("should second test") {}
}
}
class DeeplyNestedFunSpecExample extends FunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") {}
ignore("should second test") {}
}
}
}
class FixtureFunSpecExample extends StringFixtureFunSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
class NestedFixtureFunSpecExample extends StringFixtureFunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") { s => }
ignore("should second test") { s => }
}
}
class DeeplyNestedFixtureFunSpecExample extends StringFixtureFunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") { s => }
ignore("should second test") { s => }
}
}
}
class PathFunSpecExample extends path.FunSpec with Services {
ignore("first test") {}
ignore("second test") {}
override def newInstance = new PathFunSpecExample
}
class NestedPathFunSpecExample extends path.FunSpec with NestedTestNames {
describe("A subject") {
ignore("should first test") {}
ignore("should second test") {}
}
override def newInstance = new NestedPathFunSpecExample
}
class DeeplyNestedPathFunSpecExample extends path.FunSpec with DeeplyNestedTestNames {
describe("A subject") {
describe("when created") {
ignore("should first test") {}
ignore("should second test") {}
}
}
override def newInstance = new DeeplyNestedPathFunSpecExample
}
class WordSpecExample extends WordSpec with Services {
"first test" ignore {}
"second test" ignore {}
}
class NestedWordSpecExample extends WordSpec with NestedTestNames {
"A subject" should {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecExample extends WordSpec with DeeplyNestedTestNames {
"A subject" when {
"created" should {
"first test" ignore {}
"second test" ignore {}
}
}
}
class FixtureWordSpecExample extends StringFixtureWordSpec with Services {
"first test" ignore { s => }
"second test" ignore { s => }
}
class NestedFixtureWordSpecExample extends StringFixtureWordSpec with NestedTestNames {
"A subject" should {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecExample extends StringFixtureWordSpec with DeeplyNestedTestNames {
"A subject" when {
"created" should {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class NestedWordSpecWithMustExample extends WordSpec with NestedTestNamesWithMust {
"A subject" must {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecWithMustExample extends WordSpec with DeeplyNestedTestNamesWithMust {
"A subject" when {
"created" must {
"first test" ignore {}
"second test" ignore {}
}
}
}
class NestedFixtureWordSpecWithMustExample extends StringFixtureWordSpec with NestedTestNamesWithMust {
"A subject" must {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecWithMustExample extends StringFixtureWordSpec with DeeplyNestedTestNamesWithMust {
"A subject" when {
"created" must {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class NestedWordSpecWithCanExample extends WordSpec with NestedTestNamesWithCan {
"A subject" can {
"first test" ignore {}
"second test" ignore {}
}
}
class DeeplyNestedWordSpecWithCanExample extends WordSpec with DeeplyNestedTestNamesWithCan {
"A subject" when {
"created" can {
"first test" ignore {}
"second test" ignore {}
}
}
}
class NestedFixtureWordSpecWithCanExample extends StringFixtureWordSpec with NestedTestNamesWithCan {
"A subject" can {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
class DeeplyNestedFixtureWordSpecWithCanExample extends StringFixtureWordSpec with DeeplyNestedTestNamesWithCan {
"A subject" when {
"created" can {
"first test" ignore { s => }
"second test" ignore { s => }
}
}
}
class FlatSpecExample extends FlatSpec with Services {
it should "first test" ignore {}
it should "second test" ignore {}
override val theTestNames = Vector("should first test", "should second test")
}
class SubjectFlatSpecExample extends FlatSpec with NestedTestNames {
behavior of "A subject"
it should "first test" ignore {}
it should "second test" ignore {}
}
class ShorthandSubjectFlatSpecExample extends FlatSpec with NestedTestNames {
"A subject" should "first test" ignore {}
it should "second test" ignore {}
}
class FixtureFlatSpecExample extends StringFixtureFlatSpec with Services {
it should "first test" ignore { s => }
it should "second test" ignore { s => }
override val theTestNames = Vector("should first test", "should second test")
}
class SubjectFixtureFlatSpecExample extends StringFixtureFlatSpec with NestedTestNames {
behavior of "A subject"
it should "first test" ignore { s => }
it should "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecExample extends StringFixtureFlatSpec with NestedTestNames {
"A subject" should "first test" ignore { s => }
it should "second test" ignore { s => }
}
class FlatSpecWithMustExample extends FlatSpec with Services {
it must "first test" ignore {}
it must "second test" ignore {}
override val theTestNames = Vector("must first test", "must second test")
}
class SubjectFlatSpecWithMustExample extends FlatSpec with NestedTestNamesWithMust {
behavior of "A subject"
it must "first test" ignore {}
it must "second test" ignore {}
}
class ShorthandSubjectFlatSpecWithMustExample extends FlatSpec with NestedTestNamesWithMust {
"A subject" must "first test" ignore {}
it must "second test" ignore {}
}
class FixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with Services {
it must "first test" ignore { s => }
it must "second test" ignore { s => }
override val theTestNames = Vector("must first test", "must second test")
}
class SubjectFixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with NestedTestNamesWithMust {
behavior of "A subject"
it must "first test" ignore { s => }
it must "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecWithMustExample extends StringFixtureFlatSpec with NestedTestNamesWithMust {
"A subject" must "first test" ignore { s => }
it must "second test" ignore { s => }
}
class FlatSpecWithCanExample extends FlatSpec with Services {
it can "first test" ignore {}
it can "second test" ignore {}
override val theTestNames = Vector("can first test", "can second test")
}
class SubjectFlatSpecWithCanExample extends FlatSpec with NestedTestNamesWithCan {
behavior of "A subject"
it can "first test" ignore {}
it can "second test" ignore {}
}
class ShorthandSubjectFlatSpecWithCanExample extends FlatSpec with NestedTestNamesWithCan {
"A subject" can "first test" ignore {}
it can "second test" ignore {}
}
class FixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with Services {
it can "first test" ignore { s => }
it can "second test" ignore { s => }
override val theTestNames = Vector("can first test", "can second test")
}
class SubjectFixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with NestedTestNamesWithCan {
behavior of "A subject"
it can "first test" ignore { s => }
it can "second test" ignore { s => }
}
class ShorthandSubjectFixtureFlatSpecWithCanExample extends StringFixtureFlatSpec with NestedTestNamesWithCan {
"A subject" can "first test" ignore { s => }
it can "second test" ignore { s => }
}
class FreeSpecExample extends FreeSpec with Services {
"first test" ignore {}
"second test" ignore {}
}
class NestedFreeSpecExample extends FreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
class DeeplyNestedFreeSpecExample extends FreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
}
class FixtureFreeSpecExample extends StringFixtureFreeSpec with Services {
"first test" ignore { s => }
"second test" ignore { s => }
}
class NestedFixtureFreeSpecExample extends StringFixtureFreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore { s => }
"should second test" ignore { s => }
}
}
class DeeplyNestedFixtureFreeSpecExample extends StringFixtureFreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore { s => }
"should second test" ignore { s => }
}
}
}
class PathFreeSpecExample extends path.FreeSpec with Services {
"first test" ignore {}
"second test" ignore {}
override def newInstance = new PathFreeSpecExample
}
class NestedPathFreeSpecExample extends path.FreeSpec with NestedTestNames {
"A subject" - {
"should first test" ignore {}
"should second test" ignore {}
}
override def newInstance = new NestedPathFreeSpecExample
}
class DeeplyNestedPathFreeSpecExample extends path.FreeSpec with DeeplyNestedTestNames {
"A subject" - {
"when created" - {
"should first test" ignore {}
"should second test" ignore {}
}
}
override def newInstance = new DeeplyNestedPathFreeSpecExample
}
class FeatureSpecExample extends FeatureSpec with Services {
ignore("first test") {}
ignore("second test") {}
override val theTestNames = Vector("Scenario: first test", "Scenario: second test")
}
class NestedFeatureSpecExample extends FeatureSpec with Services {
feature("A feature") {
ignore("first test") {}
ignore("second test") {}
}
override val theTestNames = Vector("Feature: A feature Scenario: first test", "Feature: A feature Scenario: second test")
}
class FixtureFeatureSpecExample extends StringFixtureFeatureSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
override val theTestNames = Vector("Scenario: first test", "Scenario: second test")
}
class NestedFixtureFeatureSpecExample extends StringFixtureFeatureSpec with Services {
feature("A feature") {
ignore("first test") { s => }
ignore("second test") { s => }
}
override val theTestNames = Vector("Feature: A feature Scenario: first test", "Feature: A feature Scenario: second test")
}
class PropSpecExample extends PropSpec with Services {
ignore("first test") {}
ignore("second test") {}
}
class FixturePropSpecExample extends StringFixturePropSpec with Services {
ignore("first test") { s => }
ignore("second test") { s => }
}
lazy val spec = new SpecExample
lazy val fixtureSpec = new FixtureSpecExample
lazy val funSuite = new FunSuiteExample
lazy val fixtureFunSuite = new FixtureFunSuiteExample
lazy val funSpec = new FunSpecExample
lazy val nestedFunSpec = new NestedFunSpecExample
lazy val deeplyNestedFunSpec = new DeeplyNestedFunSpecExample
lazy val fixtureFunSpec = new FixtureFunSpecExample
lazy val nestedFixtureFunSpec = new NestedFixtureFunSpecExample
lazy val deeplyNestedFixtureFunSpec = new DeeplyNestedFixtureFunSpecExample
lazy val pathFunSpec = new PathFunSpecExample
lazy val nestedPathFunSpec = new NestedPathFunSpecExample
lazy val deeplyNestedPathFunSpec = new DeeplyNestedPathFunSpecExample
lazy val wordSpec = new WordSpecExample
lazy val nestedWordSpec = new NestedWordSpecExample
lazy val deeplyNestedWordSpec = new DeeplyNestedWordSpecExample
lazy val fixtureWordSpec = new FixtureWordSpecExample
lazy val nestedFixtureWordSpec = new NestedFixtureWordSpecExample
lazy val deeplyNestedFixtureWordSpec = new DeeplyNestedFixtureWordSpecExample
lazy val nestedWordSpecWithMust = new NestedWordSpecWithMustExample
lazy val deeplyNestedWordSpecWithMust = new DeeplyNestedWordSpecWithMustExample
lazy val nestedFixtureWordSpecWithMust = new NestedFixtureWordSpecWithMustExample
lazy val deeplyNestedFixtureWordSpecWithMust = new DeeplyNestedFixtureWordSpecWithMustExample
lazy val nestedWordSpecWithCan = new NestedWordSpecWithCanExample
lazy val deeplyNestedWordSpecWithCan = new DeeplyNestedWordSpecWithCanExample
lazy val nestedFixtureWordSpecWithCan = new NestedFixtureWordSpecWithCanExample
lazy val deeplyNestedFixtureWordSpecWithCan = new DeeplyNestedFixtureWordSpecWithCanExample
lazy val flatSpec = new FlatSpecExample
lazy val subjectFlatSpec = new SubjectFlatSpecExample
lazy val shorthandSubjectFlatSpec = new ShorthandSubjectFlatSpecExample
lazy val fixtureFlatSpec = new FixtureFlatSpecExample
lazy val subjectFixtureFlatSpec = new SubjectFixtureFlatSpecExample
lazy val shorthandSubjectFixtureFlatSpec = new ShorthandSubjectFixtureFlatSpecExample
lazy val flatSpecWithMust = new FlatSpecWithMustExample
lazy val subjectFlatSpecWithMust = new SubjectFlatSpecWithMustExample
lazy val shorthandSubjectFlatSpecWithMust = new ShorthandSubjectFlatSpecWithMustExample
lazy val fixtureFlatSpecWithMust = new FixtureFlatSpecWithMustExample
lazy val subjectFixtureFlatSpecWithMust = new SubjectFixtureFlatSpecWithMustExample
lazy val shorthandSubjectFixtureFlatSpecWithMust = new ShorthandSubjectFixtureFlatSpecWithMustExample
lazy val flatSpecWithCan = new FlatSpecWithCanExample
lazy val subjectFlatSpecWithCan = new SubjectFlatSpecWithCanExample
lazy val shorthandSubjectFlatSpecWithCan = new ShorthandSubjectFlatSpecWithCanExample
lazy val fixtureFlatSpecWithCan = new FixtureFlatSpecWithCanExample
lazy val subjectFixtureFlatSpecWithCan = new SubjectFixtureFlatSpecWithCanExample
lazy val shorthandSubjectFixtureFlatSpecWithCan = new ShorthandSubjectFixtureFlatSpecWithCanExample
lazy val freeSpec = new FreeSpecExample
lazy val nestedFreeSpec = new NestedFreeSpecExample
lazy val deeplyNestedFreeSpec = new DeeplyNestedFreeSpecExample
lazy val fixtureFreeSpec = new FixtureFreeSpecExample
lazy val nestedFixtureFreeSpec = new NestedFixtureFreeSpecExample
lazy val deeplyNestedFixtureFreeSpec = new DeeplyNestedFixtureFreeSpecExample
lazy val pathFreeSpec = new PathFreeSpecExample
lazy val nestedPathFreeSpec = new NestedPathFreeSpecExample
lazy val deeplyNestedPathFreeSpec = new DeeplyNestedPathFreeSpecExample
lazy val featureSpec = new FeatureSpecExample
lazy val nestedFeatureSpec = new NestedFeatureSpecExample
lazy val fixtureFeatureSpec = new FixtureFeatureSpecExample
lazy val nestedFixtureFeatureSpec = new NestedFixtureFeatureSpecExample
lazy val propSpec = new PropSpecExample
lazy val fixturePropSpec = new FixturePropSpecExample
// Two ways to ignore in a flat spec, so add two more examples
override def examples = super.examples ++
List(
new FlatSpecExample2,
new FixtureFlatSpecExample2,
new FlatSpecWithMustExample2,
new FixtureFlatSpecWithMustExample2,
new FlatSpecWithCanExample2,
new FixtureFlatSpecWithCanExample2
)
class FlatSpecExample2 extends FlatSpec with Services {
ignore should "first test" in {}
ignore should "second test" in {}
override val theTestNames = Vector("should first test", "should second test")
}
class FixtureFlatSpecExample2 extends StringFixtureFlatSpec with Services {
ignore should "first test" in { s => }
ignore should "second test" in { s => }
override val theTestNames = Vector("should first test", "should second test")
}
class FlatSpecWithMustExample2 extends FlatSpec with Services {
ignore must "first test" in {}
ignore must "second test" in {}
override val theTestNames = Vector("must first test", "must second test")
}
class FixtureFlatSpecWithMustExample2 extends StringFixtureFlatSpec with Services {
ignore must "first test" in { s => }
ignore must "second test" in { s => }
override val theTestNames = Vector("must first test", "must second test")
}
class FlatSpecWithCanExample2 extends FlatSpec with Services {
ignore can "first test" in {}
ignore can "second test" in {}
override val theTestNames = Vector("can first test", "can second test")
}
class FixtureFlatSpecWithCanExample2 extends StringFixtureFlatSpec with Services {
ignore can "first test" in { s => }
ignore can "second test" in { s => }
override val theTestNames = Vector("can first test", "can second test")
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/suiteprop/TwoTestsIgnoredExamples.scala | Scala | apache-2.0 | 19,984 |
package org.rembo.unxml
import io.Source
import scala.xml._
/**
* Convert a scala.io.Source to an Iterator of NodeSeq's from the
* provided XmlPath.
* Based on the Stack Overflow question:
* http://stackoverflow.com/questions/8525675/how-to-get-a-streaming-iteratornode-from-a-large-xml-document
*/
object StreamingNodeTraversable {
// Desired external API:
// at(path).readOne[Header] followedBy
// at(path).readMany[TraversableOnce[Bla]]
// Return value is a TupleN with max 1 TraversableOnce
// When returning as a tuple or case class nothing is possible anymore after returning the traversable
// Other option: Return value is a TraversableOnce[Any] and pattern match on output. Allows for
// getting multiple sections as traversables but less convenient to work with.
def readFromSource(targetPaths: List[XmlPath])(input: Source): Traversable[NodeSeq] =
generatorToTraversable(processSource(input, targetPaths))
def processSource[T](input: Source, targetPaths: List[XmlPath])(f: NodeSeq ⇒ T) {
val parser = new scala.xml.parsing.ConstructingParser(input, false) {
var currentPath = XmlPath.empty
var targets = targetPaths
override def elemStart(pos: Int, pre: String, label: String,
attrs: MetaData, scope: NamespaceBinding) {
super.elemStart(pos, pre, label, attrs, scope)
if (label != null) currentPath = currentPath \ label
}
override def elemEnd(pos: Int, pre: String, label: String) {
if (label != null) currentPath = currentPath.init
super.elemEnd(pos, pre, label)
}
override def elem(pos: Int, pre: String, label: String, attrs: MetaData,
pscope: NamespaceBinding, empty: Boolean, nodes: NodeSeq): NodeSeq = {
val node = super.elem(pos, pre, label, attrs, pscope, empty, nodes)
if (targets.head == currentPath) {
if (targets.tail != Nil) targets = targets.tail
f(node)
NodeSeq.Empty
// } else if (targetPath.startsWith(currentPath)) {
// node
} else {
<dummy/> // All other nodes are not interesting
// node
}
}
}
parser.nextch // initialize per documentation
parser.document // trigger parsing by requesting document
}
private def generatorToTraversable[T](func: (T ⇒ Unit) ⇒ Unit) =
new Traversable[T] {
def foreach[X](f: T ⇒ X) {
func(f(_))
}
}
}
| remcobeckers/unxml | src/main/scala/org/rembo/unxml/StreamingNodeTraversable.scala | Scala | apache-2.0 | 2,478 |
package org.jetbrains.plugins.scala.lang.resolve2
/**
* Pavel.Fatin, 02.02.2010
*/
class QualifierSourceTest extends ResolveTestBase {
override def folderPath: String = {
super.folderPath + "qualifier/source/"
}
def testChainLong = doTest
def testChainDeep = doTest
def testPackage = doTest
//TODO getClass
// def testPackageAsValue = doTest
//TODO packageobject
// def testPackageObject = doTest
//TODO packageobject
// def testPackageObjectAsValue = doTest
} | LPTK/intellij-scala | test/org/jetbrains/plugins/scala/lang/resolve2/QualifierSourceTest.scala | Scala | apache-2.0 | 489 |
package moduleclass
@pkg.placebo
object Bar
trait Bar
| scalamacros/paradise | tests/src/test/scala/annotations/run/module-class/B.scala | Scala | bsd-3-clause | 56 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Table
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class IndexSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val index = Index[Float](1).setName("index")
val input1 = Tensor[Float](3).apply1(e => Random.nextFloat())
val input2 = Tensor[Float](4)
input2(Array(1)) = 1
input2(Array(2)) = 2
input2(Array(3)) = 2
input2(Array(4)) = 3
val input = new Table()
input(1.toFloat) = input1
input(2.toFloat) = input2
runSerializationTest(index, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/IndexSpec.scala | Scala | apache-2.0 | 1,304 |
package com.twitter.finagle.http2.transport
import com.twitter.finagle.http2.transport.Http2ClientDowngrader.Message
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.channel.{DefaultChannelPromise, ChannelHandlerContext}
import io.netty.handler.codec.http.{DefaultFullHttpRequest, HttpMethod, HttpVersion,
HttpHeaderNames}
import io.netty.handler.codec.http2.HttpConversionUtil.ExtensionHeaderNames
import io.netty.handler.codec.http2._
import org.junit.runner.RunWith
import org.mockito.Matchers.{eq => meq, _}
import org.mockito.Mockito.{verify, when, RETURNS_SMART_NULLS}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RichHttpToHttp2ConnectionHandlerTest extends FunSuite with BeforeAndAfter with MockitoSugar {
var mockCtx: ChannelHandlerContext = null
var promise: DefaultChannelPromise = null
var connectionHandler: HttpToHttp2ConnectionHandler = null
var request: DefaultFullHttpRequest = null
var mockEncoder: Http2ConnectionEncoder = null
before {
val mockConnection = mock[DefaultHttp2Connection]
mockEncoder = mock[Http2ConnectionEncoder](RETURNS_SMART_NULLS)
val mockDecoder = mock[Http2ConnectionDecoder](RETURNS_SMART_NULLS)
mockCtx = mock[ChannelHandlerContext](RETURNS_SMART_NULLS)
val channel = new EmbeddedChannel()
promise = new DefaultChannelPromise(channel)
when(mockCtx.newPromise()).thenReturn(promise)
when(mockEncoder.connection()).thenReturn(mockConnection)
when(mockDecoder.connection()).thenReturn(mockConnection)
val settings = new Http2Settings()
connectionHandler = new RichHttpToHttp2ConnectionHandler(mockDecoder, mockEncoder, settings, () => ())
request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")
request.headers().add(ExtensionHeaderNames.SCHEME.text(), "https")
}
test("Client sets default stream-dependency and weight") {
val streamId: Int = 1
val defaultStreamDependency = 0
val defaultWeight = Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT
val message = Message(request, 1)
connectionHandler.write(mockCtx, message, promise)
verify(mockEncoder).writeHeaders(
meq(mockCtx),
meq(streamId),
anyObject(),
meq(defaultStreamDependency),
meq(defaultWeight),
meq(false),
meq(0),
meq(true),
meq(promise))
}
test("Allows client to specify stream-dependency-id and weight") {
val streamDependencyId: Int = 15
val weight: Short = 4
val streamId: Int = 1
request.headers().addInt(ExtensionHeaderNames.STREAM_DEPENDENCY_ID.text(), streamDependencyId)
request.headers().addInt(ExtensionHeaderNames.STREAM_WEIGHT.text(), weight)
val message = Message(request, 1)
connectionHandler.write(mockCtx, message, promise)
verify(mockEncoder).writeHeaders(
meq(mockCtx),
meq(streamId),
anyObject(),
meq(streamDependencyId),
meq(weight),
meq(false),
meq(0),
meq(true),
meq(promise))
}
test("Client properly strips bad headers") {
val streamId: Int = 1
val defaultStreamDependency = 0
val defaultWeight = Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT
request.headers.add(HttpHeaderNames.TE, "cool")
request.headers.add(HttpHeaderNames.CONNECTION, "bad")
request.headers.add("bad", "news")
val message = Message(request, 1)
connectionHandler.write(mockCtx, message, promise)
verify(mockEncoder).writeHeaders(
meq(mockCtx),
meq(streamId),
anyObject(),
meq(defaultStreamDependency),
meq(defaultWeight),
meq(false),
meq(0),
meq(true),
meq(promise))
}
}
| koshelev/finagle | finagle-http2/src/test/scala/com/twitter/finagle/http2/transport/RichHttpToHttp2ConnectionHandlerTest.scala | Scala | apache-2.0 | 3,766 |
package controllers
import scala.concurrent.ExecutionContext.Implicits.global
import models.Dealer
import models.Dealer.dealerFormat
import models.Dealer.DealerBSONReader
import models.Dealer.DealerBSONWriter
import models.Address
import models.Address.addressFormat
import models.Address.AddressBSONReader
import models.Address.AddressBSONWriter
import play.api.libs.json.Json
import play.api.mvc.Action
import play.api.mvc.Controller
import play.modules.reactivemongo.MongoController
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.BSONDocument
import reactivemongo.bson.BSONDocumentIdentity
import reactivemongo.bson.BSONObjectID
import reactivemongo.bson.BSONObjectIDIdentity
import reactivemongo.bson.BSONStringHandler
import reactivemongo.bson.Producer.nameValue2Producer
/*
* Author: Tariq Ansari
*/
object Dealers extends Controller with MongoController {
val collection = db[BSONCollection]("dealers")
/** list all dealers */
def index = Action { implicit request =>
Async {
val cursor = collection.find(
BSONDocument(), BSONDocument()).cursor[Dealer] // get all the fields of all the dealers
val futureList = cursor.toList // convert it to a list of Dealer
futureList.map { dealers => Ok(Json.toJson(dealers)) } // convert it to a JSON and return it
}
}
/** create a dealer from the given JSON */
def create() = Action(parse.json) { request =>
Async {
val name = request.body.\\("name").toString()
println ("name"->name)
val addressJSON = request.body.\\("address")
val address = addressFormat.reads(addressJSON).get
val website = request.body.\\("website").toString().replace("\\"", "")
val dealer = Dealer(Option(BSONObjectID.generate), name, address, website) // create the dealer
collection.insert(dealer).map(
_ => Ok(Json.toJson(dealer))) // return the created dealer in a JSON
}
}
/** retrieve the dealer for the given id as JSON */
def show(id: String) = Action(parse.empty) { request =>
Async {
val objectID = new BSONObjectID(id) // get the corresponding BSONObjectID
// get the dealer having this id (there will be 0 or 1 result)
val futureDealer = collection.find(BSONDocument("_id" -> objectID)).one[Dealer]
futureDealer.map { dealer => Ok(Json.toJson(dealer)) }
}
}
/** update the dealer for the given id from the JSON body */
def update(id: String) = Action(parse.json) { request =>
Async {
val objectID = new BSONObjectID(id) // get the corresponding BSONObjectID
val name = request.body.\\("name").toString().replace("\\"", "")
val addressJSON = request.body.\\("address")
val address = addressFormat.reads(addressJSON).get
val website = request.body.\\("website").toString().replace("\\"", "")
val modifier = BSONDocument( // create the modifier dealer
"$set" -> BSONDocument(
"name" -> name,
"address" -> address,
"website" -> website))
collection.update(BSONDocument("_id" -> objectID), modifier).map(
_ => Ok(Json.toJson(Dealer(Option(objectID), name, address, website)))) // return the modified dealer in a JSON
}
}
/** delete a dealer for the given id */
def delete(id: String) = Action(parse.empty) { request =>
Async {
val objectID = new BSONObjectID(id) // get the corresponding BSONObjectID
collection.remove(BSONDocument("_id" -> objectID)).map( // remove the dealer
_ => Ok(Json.obj())).recover { case _ => InternalServerError } // and return an empty JSON while recovering from errors if any
}
}
}
| taqgit/carsurebackoffice | app/controllers/Dealers.scala | Scala | gpl-3.0 | 3,668 |
package models
/**
* Created by basso on 07/04/15.
*/
package object mining {
/** An enumeration of all the mlops we use **/
object Algorithm extends Enumeration {
type Algorithm = Value
val Clustering, FPM, SVM, ALS = Value
}
}
/** Notes from Anish,
* Scrap the oldAlgo
*
* Define each algo in it's own object.
* Single function which takes the required parameters and gives uniform result,
* preferably an scala.util.Either[ErrorString, Result]
*/
| ChetanBhasin/Veracious | app/models/mining/package.scala | Scala | apache-2.0 | 481 |
object Hello extends App {
println("Hello, world")
}
| ponkotuy/ProjectEular | src/main/scala/Hello.scala | Scala | mit | 56 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.market
import squants._
/**
* Represents a price
*
* A price is an [[squants.Ratio]] between a quantity of [[squants.market.Money]]
* and some other [[squants.Quantity]]
*
* @author garyKeorkunian
* @since 0.1
*
* @param money Money
* @param quantity Quantity
* @tparam A Quantity Type
*/
case class Price[A <: Quantity[A]](money: Money, quantity: A) extends Ratio[Money, A] with Serializable {
def base = money
def counter = quantity
// TODO Add verification that money amounts are the same OR convert
def plus(that: Price[A]): Price[A] = Price(money + that.money, quantity)
def +(that: Price[A]): Price[A] = plus(that)
def minus(that: Price[A]): Price[A] = Price(money - that.money, quantity)
def -(that: Price[A]): Price[A] = minus(that)
def times(that: Double): Price[A] = Price(money * that, quantity)
def *(that: Double): Price[A] = Price(money * that, quantity)
def times(that: BigDecimal): Price[A] = Price(money * that, quantity)
def *(that: BigDecimal): Price[A] = Price(money * that, quantity)
def divide(that: Double): Price[A] = Price(money / that, quantity)
def /(that: Double): Price[A] = divide(that)
def divide(that: BigDecimal): Price[A] = Price(money / that, quantity)
def /(that: BigDecimal): Price[A] = divide(that)
def divide(that: Price[A]): BigDecimal = money.amount / that.money.amount
def /(that: Price[A]): BigDecimal = divide(that)
def in(currency: Currency)(implicit moneyContext: MoneyContext) =
(money in currency) / quantity
/**
* Returns the Cost (Money) for a quantity `that` of A
* @param that Quantity
* @return
*/
def *(that: A): Money = convertToBase(that)
/**
* Returns the Quantity that will cost that)
* @param that Money
* @return
*/
def *(that: Money): A = convertToCounter(that)
override def toString = money.toString + "/" + quantity.toString
def toString(unit: UnitOfMeasure[A]) = money.toString + "/" + quantity.toString(unit)
def toString(currency: Currency, unit: UnitOfMeasure[A])(implicit moneyContext: MoneyContext) =
(money in currency).toString + "/" + quantity.toString(unit)
}
| rmihael/squants | shared/src/main/scala/squants/market/Price.scala | Scala | apache-2.0 | 2,684 |
package org.skycastle.core.map
/**
*
*/
trait Layer {
def fields: Set[Symbol]
def field(name: Symbol): Field
} | zzorn/skycastle | src/main/scala/org/skycastle/core/map/Layer.scala | Scala | gpl-2.0 | 122 |
:q
import scala.io.Source
/* find most used words in a text */
/* Created by sapodaca@natera.com 3/10/16 */
object CommonWords extends App {
/* given a filename, get all words from the file */
def words(filename: String) = Source.fromFile(filename).mkString.toLowerCase.split("\\\\P{L}+")
/* get top n most used words and the count of their usages */
def topWords(file: String, n: Int) = words(file).groupBy(w => w).mapValues(_.size).toList.sortBy(-_._2).take(n)
println (topWords("warandpeace.txt", 10))
}
| kedarmhaswade/impatiently-j8 | CommonWords.scala | Scala | mit | 536 |
package implement.arena
import framework.arena.Blueprintable
import framework.utility.Coordinate
import org.bukkit.{Material, Location, World}
import org.bukkit.util.Vector
import implement.general.AbstractInformable
import scala.collection.JavaConverters._
import org.bukkit.block.Block
/**
* The Base implementation for Blueprint. Also, shows how awesome Scala is.
*/
class AbstractBlueprint(name: String, desc: String, private var point1: Location, private var point2: Location, var w: Option[World], var radius: Option[Int]) extends AbstractInformable(name, desc) with Blueprintable {
def this(name: String, desc: String, m: java.util.List[Coordinate], w: Option[World]) = {
this(name, desc, null, null, w, null)
val map = m.asScala
val max = map.maxBy(x => x.v.length).v
val min = map.minBy(x => x.v.length).v
if(w.isDefined) {
point1 = w.get.getBlockAt(max.getBlockX, max.getBlockY, max.getBlockZ).getLocation
point2 = w.get.getBlockAt(min.getBlockX, min.getBlockY, min.getBlockZ).getLocation
}
}
def this(name: String, desc: String) = this(name, desc, null, null, null, null)
private[AbstractBlueprint] class Bounds[T](val upper: T, val lower: T)
private val norm1 = point1.toVector.normalize
private val norm2 = point2.toVector.normalize
var xBound = bound(norm1.getBlockX, norm2.getBlockX)
var yBound = bound(norm1.getBlockY, norm2.getBlockY)
var zBound = bound(norm1.getBlockZ, norm2.getBlockZ)
override def getCurrentLayout: java.util.List[Location] = if(w.isDefined) generateArea(w.get) else List[Location]().asJava
private[arena] var relativeLayout: List[Coordinate] = (for(v: Location <- getCurrentLayout.asScala) yield new Coordinate(v.toVector, v.getBlock.getType)).toList
/**
* Gets the Lower and Upper Bounds of the pair given
* @param x the first number
* @param y the second number
* @return a Tuple that has the order: Upper-Bound, Lower-Bound
*/
protected def bound(x: Int, y: Int) = if(x > y) new Bounds(x, y) else new Bounds(y, x)
/**
* Gets the difference between two Ints within a Tuple.
* @param t the Tuple of Ints
* @return the difference between the Ints
*/
protected def diff(t: Bounds[Int]): Int = t.upper - t.lower
override def getRelativeLayout = {
(for {
v <- relativeLayout
} yield (v.v, v.m)).toMap[Vector, Material].asJava
}
override val getSize = diff(xBound) * diff(yBound) * diff(zBound)
override val getArea = diff(xBound) * diff(zBound)
/**
* Default radius implementation just uses the volume method, getSize()
*/
var getRadius: Int = if(radius.isDefined) radius.get else getSize
/**
* Creates a list of Locations
*/
protected final def generateArea(w: World): java.util.List[Location] = {
//Create a list of coordinates
(for {
x <- generateRelativeArea()
b = w.getBlockAt (x.getBlockX, x.getBlockY, x.getBlockZ)
i = b.getDrops.iterator().next() //Just grabs the first item
} yield b.getLocation).asJava
}
/**
* Creates a list of Vectors
*/
protected final def generateRelativeArea() = {
//Create a list of coordinates
for {
x <- xBound.lower to xBound.upper
y <- yBound.lower to yBound.upper
z <- zBound.lower to zBound.upper
} yield new Vector(x, y, z)
}
override val getRelativeBoundary = (for(relV <- relativeLayout.filter(v => v.v.getBlockX >= getRadius || v.v.getBlockY >= getRadius || v.v.getBlockZ >= getRadius)) yield relV.v).asJava
override val getCurrentBoundary = getCurrentLayout.asScala.filter(x => getRelativeBoundary.contains(x.toVector)).asJava
override def createReferentialLayout(southWest: Location, northEast: Location) {
if(southWest.distance(northEast) >= getSize) {
this.w = Option.apply(southWest.getWorld)
val xb = bound(southWest.getBlockX, northEast.getBlockX)
val yb = bound(southWest.getBlockY, northEast.getBlockY)
val zb = bound(southWest.getBlockZ, northEast.getBlockZ)
val rel = getRelativeLayout
val w = this.w.get
val list = for {
x <- xb.upper to xb.lower
y <- yb.upper to yb.lower
z <- zb.upper to zb.lower
b = w.getBlockAt(x, y, z)
v = b.getLocation.toVector.normalize
} yield new Tuple2[Vector, Block](v, b)
list.foreach(x => {
if(rel.containsKey(x._1)) x._2.setType(rel.get(x._1))
})
}
}
override def createReferentialLayout(center: Location) {
}
} | GoldRushMC/quick-game-framework | src/main/java/implement/arena/AbstractBlueprint.scala | Scala | gpl-2.0 | 4,497 |
package com.twitter.finagle.http2
import io.netty.channel.socket.SocketChannel
import io.netty.channel.{ChannelInitializer, ChannelHandler}
import io.netty.handler.codec.http.HttpServerUpgradeHandler.{UpgradeCodec, UpgradeCodecFactory}
import io.netty.handler.codec.http.{HttpServerCodec, HttpServerUpgradeHandler}
import io.netty.handler.codec.http2.{Http2ServerUpgradeCodec, Http2CodecUtil, Http2MultiplexCodec}
import io.netty.util.AsciiString;
/**
* The handler will be added to all http2 child channels, and must be Sharable.
*/
private[http2] class Http2ServerInitializer(handler: ChannelHandler)
extends ChannelInitializer[SocketChannel] {
val upgradeCodecFactory: UpgradeCodecFactory = new UpgradeCodecFactory {
override def newUpgradeCodec(protocol: CharSequence): UpgradeCodec = {
if (AsciiString.contentEquals(Http2CodecUtil.HTTP_UPGRADE_PROTOCOL_NAME, protocol)) {
new Http2ServerUpgradeCodec(new Http2MultiplexCodec(true, handler))
} else null
}
}
def initChannel(ch: SocketChannel) {
val p = ch.pipeline()
val sourceCodec = new HttpServerCodec()
p.addLast(sourceCodec)
p.addLast(new HttpServerUpgradeHandler(sourceCodec, upgradeCodecFactory))
}
}
| sveinnfannar/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/Http2ServerInitializer.scala | Scala | apache-2.0 | 1,222 |
package mesosphere.marathon.api.v2
import javax.inject.Inject
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.core.Response.Status._
import javax.ws.rs.core.{ Context, MediaType, Response }
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.{ AuthResource, MarathonMediaType }
import mesosphere.marathon.plugin.auth._
import mesosphere.marathon.state.GroupManager
import mesosphere.marathon.upgrade.DeploymentPlan
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService }
import mesosphere.util.Logging
@Path("v2/deployments")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
class DeploymentsResource @Inject() (
service: MarathonSchedulerService,
groupManager: GroupManager,
val authenticator: Authenticator,
val authorizer: Authorizer,
val config: MarathonConf)
extends AuthResource
with Logging {
@GET
def running(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val infos = result(service.listRunningDeployments())
.filter(_.plan.affectedApplications.exists(isAuthorized(ViewRunSpec, _)))
ok(infos)
}
@DELETE
@Path("{id}")
def cancel(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val plan = result(service.listRunningDeployments()).find(_.plan.id == id).map(_.plan)
plan.fold(notFound(s"DeploymentPlan $id does not exist")) { deployment =>
deployment.affectedApplications.foreach(checkAuthorization(UpdateRunSpec, _))
deployment match {
case plan: DeploymentPlan if force =>
// do not create a new deployment to return to the previous state
log.info(s"Canceling deployment [$id]")
service.cancelDeployment(id)
status(ACCEPTED) // 202: Accepted
case plan: DeploymentPlan =>
// create a new deployment to return to the previous state
deploymentResult(result(groupManager.update(
plan.original.id,
plan.revert,
force = true
)))
}
}
}
}
| yp-engineering/marathon | src/main/scala/mesosphere/marathon/api/v2/DeploymentsResource.scala | Scala | apache-2.0 | 2,261 |
package org.apache.spark.sql.parser
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, ParseException}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.execution.SparkSqlAstBuilder
/**
* Created by sandeep on 4/8/17.
*/
class SanSparkSqlParser(conf: SQLConf, sparkSession: SparkSession) extends AbstractSqlParser {
val astBuilder = new SanSqlAstBuilder(conf)
override def parsePlan(sqlText: String): LogicalPlan = {
try {
super.parsePlan(sqlText)
} catch {
case ce: ParseException =>
astBuilder.parser.parse(sqlText)
case ex: Throwable =>
sys.error("\\n" + "BaseSqlParser>>>> " + ex.getMessage
+ "\\n" + "CarbonSqlParser>>>> " + ex.getMessage)
}
}
}
class SanSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf) {
val parser = new SanSparkExtendedSqlParser
}
| sanjosh/scala | spark_extensions/new_command/src/main/scala/org/apache/spark/sql/parser/SanSparkSqlParser.scala | Scala | apache-2.0 | 967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.ml.utils
import org.apache.spark.sql.types._
object DatasetShape extends Enumeration {
type DatasetShape = Value
val Flat, StructsOnly, Nested = Value
def getDatasetShape(schema: StructType): DatasetShape = {
def mergeShape(first: DatasetShape, second: DatasetShape): DatasetShape = (first, second) match {
case (DatasetShape.Nested, _) => DatasetShape.Nested
case (_, DatasetShape.Nested) => DatasetShape.Nested
case (DatasetShape.StructsOnly, _) => DatasetShape.StructsOnly
case (_, DatasetShape.StructsOnly) => DatasetShape.StructsOnly
case _ => DatasetShape.Flat
}
def fieldToShape(field: StructField): DatasetShape = field.dataType match {
case _: ArrayType | _: MapType | _: BinaryType => DatasetShape.Nested
case s: StructType => mergeShape(DatasetShape.StructsOnly, getDatasetShape(s))
case _ => DatasetShape.Flat
}
schema.fields.foldLeft(DatasetShape.Flat) { (acc, field) =>
val fieldShape = fieldToShape(field)
mergeShape(acc, fieldShape)
}
}
}
| h2oai/sparkling-water | utils/src/main/scala/ai/h2o/sparkling/ml/utils/DatasetShape.scala | Scala | apache-2.0 | 1,879 |
package org.aja.tej.examples.spark.rdd.misc
import org.aja.tej.utils.TejUtils
import org.apache.spark.SparkContext
/**
* Created by mageswaran on 12/8/15.
*/
/*
Will create a checkpoint when the RDD is computed next. Checkpointed RDDs are
stored as a binary file within the checkpoint directory which can be specified using the
Spark context. (Warning: Spark applies lazy evaluation. Checkpointing will not occur
until an action is invoked.)
Important note: the directory ”my directory name” should exist in all slaves. As an
alternative you could use an HDFS directory URL as well.
getCheckpointFile
Returns the path to the checkpoint file or null if RDD has not yet been checkpointed.
*/
object CheckPointExample extends App{
def useCases(sc: SparkContext) = {
println(this.getClass.getSimpleName)
sc.setCheckpointDir("data/CheckPointExampleDir")
//Check in tej/data/
val a = sc.parallelize(1 to 500 , 5)
val b = a ++ a ++ a ++ a ++ a
println("a.isCheckpointed : " + a.isCheckpointed) // a.isCheckpointed : false
//enable checkpoin ton this rdd
println("a.checkpoint : " + a.checkpoint) // a.checkpoint : ()
println("a.count : " + a.count) // a.count : 500
println("a.isCheckpointed : " + a.isCheckpointed) // a.isCheckpointed : true
println("a.getCheckpointFile : " + a.getCheckpointFile) // a.getCheckpointFile : Some(file:/opt/aja/data/CheckPointExampleDir/ee7a927e-3a48-412f-adcd-8e79c20e860c/rdd-0)
println("----------------------------------------------------------")
println("b.isCheckpointed : " + b.isCheckpointed) // b.isCheckpointed : false
println("b.collect : " + b.collect) // b.collect : [I@2dad7733
println("b.isCheckpointed : " + b.isCheckpointed) // b.isCheckpointed : false
println("b.getCheckpointFile : " + b.getCheckpointFile) // b.getCheckpointFile : None
}
useCases(TejUtils.getSparkContext(this.getClass.getSimpleName))
}
| Mageswaran1989/aja | src/examples/scala/org/aja/tej/examples/spark/rdd/misc/CheckPointExample.scala | Scala | apache-2.0 | 1,948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, Expression}
import org.apache.spark.sql.catalyst.plans.physical
import org.apache.spark.sql.sources.v2.reader.partitioning.{ClusteredDistribution, Partitioning}
/**
* An adapter from public data source partitioning to catalyst internal `Partitioning`.
*/
class DataSourcePartitioning(
partitioning: Partitioning,
colNames: AttributeMap[String]) extends physical.Partitioning {
override val numPartitions: Int = partitioning.numPartitions()
override def satisfies(required: physical.Distribution): Boolean = {
super.satisfies(required) || {
required match {
case d: physical.ClusteredDistribution if isCandidate(d.clustering) =>
val attrs = d.clustering.map(_.asInstanceOf[Attribute])
partitioning.satisfy(
new ClusteredDistribution(attrs.map { a =>
val name = colNames.get(a)
assert(name.isDefined, s"Attribute ${a.name} is not found in the data source output")
name.get
}.toArray))
case _ => false
}
}
}
private def isCandidate(clustering: Seq[Expression]): Boolean = {
clustering.forall {
case a: Attribute => colNames.contains(a)
case _ => false
}
}
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourcePartitioning.scala | Scala | apache-2.0 | 2,154 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.planner.runtime.utils.TestData
import org.apache.flink.table.planner.utils.TableTestUtil
import org.apache.flink.test.util.AbstractTestBase
import org.junit.Test
class UnsupportedOpsValidationTest extends AbstractTestBase {
@Test(expected = classOf[ValidationException])
def testSort(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
env.fromCollection(TestData.smallTupleData3).toTable(tEnv).orderBy('_1.desc)
}
@Test(expected = classOf[ValidationException])
def testJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.join(t2)
}
@Test(expected = classOf[ValidationException])
def testUnion(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.union(t2)
}
@Test(expected = classOf[ValidationException])
def testIntersect(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.intersect(t2)
}
@Test(expected = classOf[ValidationException])
def testIntersectAll(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.intersectAll(t2)
}
@Test(expected = classOf[ValidationException])
def testMinus(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.minus(t2)
}
@Test(expected = classOf[ValidationException])
def testMinusAll(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
val t2 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.minusAll(t2)
}
@Test(expected = classOf[ValidationException])
def testOffset(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.offset(5)
}
@Test(expected = classOf[ValidationException])
def testFetch(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val t1 = env.fromCollection(TestData.smallTupleData3).toTable(tEnv)
t1.fetch(5)
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/validation/UnsupportedOpsValidationTest.scala | Scala | apache-2.0 | 4,620 |
package controllers
import scalaz._
import Scalaz._
import scalaz.Validation._
import net.liftweb.json._
import io.megam.auth.funnel._
import io.megam.auth.funnel.FunnelErrors._
import play.api.mvc._
import controllers.stack.Results
object SshKeys extends Controller with controllers.stack.APIAuthElement {
def post = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result, Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
val clientAPIBody = freq.clientAPIBody.getOrElse(throw new Error("Body not found (or) invalid."))
models.base.SshKeys.create(grabAuthBag, clientAPIBody) match {
case Success(succ) =>
Status(CREATED)(
FunnelResponse(CREATED, """SSHKey created successfully.""", "Megam::SshKey").toJson(true))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
def list = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result, Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
models.base.SshKeys.findByOrgId(grabAuthBag) match {
case Success(succ) =>
Ok(Results.resultset(models.Constants.SSHKEYCOLLECTIONCLAZ, compactRender(Extraction.decompose(succ))))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
def show(id: String) = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result, Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
models.base.SshKeys.findByName(List(id).some) match {
case Success(succ) =>
Ok(Results.resultset(models.Constants.SSHKEYCOLLECTIONCLAZ, compactRender(Extraction.decompose(succ))))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
}
| indykish/vertice_gateway | app/controllers/SshKeys.scala | Scala | mit | 3,502 |
package chapter.twelve
object ExerciseThree {
def factorial(n: Int): Int = {
require(n > 0)
(1 to n).reduceLeft(_ * _)
}
}
| deekim/impatient-scala | src/main/scala/chapter/twelve/ExerciseThree.scala | Scala | apache-2.0 | 139 |
package org.s3q
import com.aboutus.auctors.kernel.reactor.{DefaultCompletableFutureResult, FutureTimeoutException}
import org.mortbay.jetty.client.ContentExchange
import org.mortbay.io.Buffer
import java.util.concurrent._
import net.lag.configgy.Configgy
import net.lag.logging.Logger
abstract case class Eviction() {
val name: String
}
case object Eviction {
implicit def string2eviction(name: String): Eviction = {
name match {
case "discard" => DiscardPolicy
case "append" => AppendPolicy
case _ => throw new IllegalArgumentException("Invalid eviction policy")
}
}
}
case object DiscardPolicy extends Eviction {
override val name = "discard"
}
case object AppendPolicy extends Eviction {
override val name = "append"
}
case class S3Config(
val accessKeyId: String, val secretAccessKey: String,
val maxConcurrency:Int, val timeout:Int, val hostname:String,
val evictionPolicy: Eviction
) {
def this(
accessKeyId: String, secretAccessKey: String, maxConcurrency:Int, timeout:Int, hostname:String
) = this(accessKeyId, secretAccessKey, maxConcurrency, timeout, hostname, AppendPolicy)
def this(
accessKeyId: String, secretAccessKey: String, maxConcurrency:Int, timeout:Int
) = this(accessKeyId, secretAccessKey, maxConcurrency, timeout, "s3.amazonaws.com")
def this(
accessKeyId: String, secretAccessKey: String, maxConcurrency:Int
) = this(accessKeyId, secretAccessKey, maxConcurrency, 6000)
def this(accessKeyId: String, secretAccessKey: String) =
this(accessKeyId, secretAccessKey, 500)
}
class S3Client(val config:S3Config) {
private val log = Logger.get
val activeRequests = new ArrayBlockingQueue[S3Exchange](config.maxConcurrency)
val client = new org.mortbay.jetty.client.HttpClient
client.setConnectorType(org.mortbay.jetty.client.HttpClient.CONNECTOR_SELECT_CHANNEL)
client.start
def execute(request: S3Request): S3Response = {
val exchange = new S3Exchange(this, request, activeRequests)
log.debug("Queuing request... %s slots remaining", activeRequests.remainingCapacity())
executeOnQueue(exchange).response
}
def execute(request: S3List): S3ListResponse = {
execute(request.asInstanceOf[S3Request]).asInstanceOf[S3ListResponse]
}
def queueFull = activeRequests.remainingCapacity() == 0
def executeOnQueue(exchange: S3Exchange): S3Exchange = {
if (queueFull) {
val evicted = evictHeadFromQueue
executeExchange(exchange)
config.evictionPolicy match {
case DiscardPolicy =>
case AppendPolicy => {
evicted match {
case Some(ex) => ex.response.retry(new Exception)
case None =>
}
}
}
} else {
executeExchange(exchange)
}
exchange
}
def executeExchange(exchange: S3Exchange): S3Exchange = {
activeRequests.put(exchange)
client.send(exchange)
exchange
}
def evictHeadFromQueue: Option[S3Exchange] = {
activeRequests.poll match {
case ex: S3Exchange => {
log.warning("Eviction on full queue (Policy: " + config.evictionPolicy.name + "): " + ex.request.bucket + " " + ex.request.path)
Some(ex)
}
case null => None
}
}
}
class S3Exchange(val client: S3Client, val request: S3Request,
activeRequests: BlockingQueue[S3Exchange]) extends ContentExchange {
setMethod(request.verb)
setURL(request.url)
request.body match {
case Some(string) => setRequestContent(string)
case None => ()
}
for ((key, value) <- request.headers) {
setRequestHeader(key, value)
}
lazy val response: S3Response = {
request.response(this)
}
var responseHeaders = new scala.collection.mutable.HashMap[String, String]
override def onResponseHeader(key: Buffer, value: Buffer) = {
super.onResponseHeader(key, value)
responseHeaders += key.toString.toLowerCase -> value.toString
}
val future = new DefaultCompletableFutureResult(client.config.timeout)
def status = getResponseStatus
def get: Either[Throwable, S3Exchange] = {
try {
future.await
}
catch {
case e: FutureTimeoutException => return Left(new TimeoutException)
} finally {
markAsFinished
}
if (future.exception.isDefined) {
future.exception.get match {case (blame, exception) => return Left(exception)}
}
Right(future.result.get.asInstanceOf[S3Exchange])
}
def markAsFinished = {
activeRequests.remove(this)
}
override def onResponseContent(content: Buffer) {
super.onResponseContent(content)
}
override def onResponseComplete {
future.completeWithResult(this)
markAsFinished
response.verify
request.callback(Some(response))
}
override def onException(ex: Throwable) {
future.completeWithException(this, ex)
markAsFinished
request.callback(None)
}
override def onConnectionFailed(ex: Throwable) { onException(ex) }
}
| AboutUs/s3q | src/main/scala/S3Client.scala | Scala | bsd-3-clause | 4,965 |
package com.twitter.scalding.reducer_estimation
import org.apache.hadoop.mapred.JobConf
object ReducerEstimatorConfig {
/** Output param: what the Reducer Estimator recommended, regardless of if it was used. */
val estimatedNumReducers = "scalding.reducer.estimator.result"
/**
* Output param: same as estimatedNumReducers but with the cap specified by maxEstimatedReducersKey
* applied. Can be used to determine whether a cap was applied to the estimated number of reducers
* and potentially to trigger alerting / logging.
*/
val cappedEstimatedNumReducersKey = "scalding.reducer.estimator.result.capped"
/** Output param: what the original job config was. */
val originalNumReducers = "scalding.reducer.estimator.original.mapred.reduce.tasks"
/**
* If we estimate more than this number of reducers,
* we will use this number instead of the estimated value
*/
val maxEstimatedReducersKey = "scalding.reducer.estimator.max.estimated.reducers"
/* fairly arbitrary choice here -- you will probably want to configure this in your cluster defaults */
val defaultMaxEstimatedReducers = 5000
/** Maximum number of history items to use for reducer estimation. */
val maxHistoryKey = "scalding.reducer.estimator.max.history"
def getMaxHistory(conf: JobConf): Int = conf.getInt(maxHistoryKey, 1)
}
| tdyas/scalding | scalding-core/src/main/scala/com/twitter/scalding/reducer_estimation/ReducerEstimatorConfig.scala | Scala | apache-2.0 | 1,345 |
package io.ssc.angles.pipeline.explorers
import org.apache.commons.math3.linear.{OpenMapRealVector, RealVector}
import org.slf4j.LoggerFactory
/**
* Created by niklas on 07.05.15.
*/
object EvaluateClustering extends App {
val logger = LoggerFactory.getLogger(EvaluateClustering.getClass)
val pairsFile = args(0)
val clusterFile = args(1)
val clusters = loadClusters(pairsFile, clusterFile)
// calculate intra-cluster distance as average of all distances
val intraClusterDistance = (x: Iterable[RealVector]) => {
val l = x.toList
average((0 until l.size).flatMap { case i => (i + 1 until l.size).map { case j => l(i) getDistance l(j) } }.seq)
}
// calculate inter-cluster distance as distance between cluster centroids
val interClusterDistance =
(x: Iterable[RealVector], y: Iterable[RealVector]) =>
centroid(x) getDistance centroid(y)
val dunn = dunnIndex(clusters, intraClusterDistance, interClusterDistance)
logger.info("Dunn index: {} (higher is better)", dunn)
val daviesBouldin = daviesBouldinIndex(clusters)
logger.info("Davies-Bouldin index: {} (lower is better)", daviesBouldin)
def dunnIndex(clusters: ClusterSet[RealVector],
intraDistanceMeasure: (Iterable[RealVector] => Double),
interDistanceMeasure: (Iterable[RealVector], Iterable[RealVector]) => Double): Double = {
logger.info("Calculating inter-cluster distance")
val interDistance = {
(0 until clusters.getNumClusters).flatMap { case i =>
((i + 1) until clusters.getNumClusters).map { case j =>
val measure = interDistanceMeasure(clusters.getCluster(i), clusters.getCluster(j))
measure
}
}
}.min
logger.info("Calculating intra-cluster distance")
val intraDistance = {
(0 until clusters.getNumClusters).map { case i =>
intraDistanceMeasure(clusters.getCluster(i))
}
}.max
// logger.info("Inter distance: {}", interDistance)
// logger.info("Intra distance: {}", intraDistance)
interDistance / intraDistance
}
def daviesBouldinIndex(clusterSet: ClusterSet[RealVector]) = {
case class ClusterInfo(centroid: RealVector, avgDist: Double)
val clusters = clusterSet.getClusters
val centroids = clusters.map(c => centroid(c))
val averageDistanceToCentroid = (x: (RealVector, Iterable[RealVector])) => average(x._2.map(node => node getDistance x._1))
val distances = centroids.zip(clusters).map(averageDistanceToCentroid)
val infos = centroids.zip(distances).map { x => ClusterInfo(x._1, x._2) }
val foo = infos.map(c => Set(c)).map(c => c cross infos).map(x => x.filter(p => p._1 != p._2).map(p => (p._1.avgDist + p._1.avgDist) / (p._1.centroid getDistance p._2.centroid)).max)
foo.sum / foo.size
}
def loadClusters(pairsFile: String, clusterFile: String): ClusterSet[RealVector] = {
val explorerSpace = loadExplorerSpace(pairsFile)
val clusters = ClusterReadWriter.readClusterFile(clusterFile)
logger.info("Got {} clusters from CSV", clusters.getNumClusters)
val set = new ClusterSet[RealVector]
for (i <- 0 until clusters.getNumClusters) {
val c = clusters.getCluster(i)
val nonNullVectors = c.map(e => explorerSpace.getOrElse(e, null)).filter(v => v != null)
if (nonNullVectors.size > 0) {
set.newCluster()
nonNullVectors.foreach(e => set.addExplorerToCurrentCluster(e))
}
}
set
}
def loadExplorerSpace(pairsFile: String) = {
val workingList: List[ExplorerUriPair] = CSVReader.readExplorerPairsFromCSV(pairsFile)
logger.info("Got {} pairs from CSV", workingList.size)
val explorerSpace = new GraphGenerator().buildExplorerSpace(workingList, BuildExplorerGraph.uriToSecondLevelDomain)
logger.info("Got {} explorers", explorerSpace.size)
explorerSpace
}
def centroid(cluster: Iterable[RealVector]): RealVector = {
val dimensions = cluster.iterator.next().getDimension
var centroid: RealVector = new OpenMapRealVector(dimensions)
cluster.foreach(v => centroid = centroid add v)
centroid.mapDivide(cluster.size)
}
implicit class Crossable[X](xs: Traversable[X]) {
def cross[Y](ys: Traversable[Y]) = (xs).flatMap { case x => (ys).map { case y => (x, y) } }
}
def average[T](ts: Traversable[T])(implicit num: Numeric[T]) = {
if (ts.size != 0) {
num.toDouble(ts.sum) / ts.size
} else {
logger.warn("Tried to calculate average on empty sequence.")
0.0
}
}
}
| jhendess/angles | src/main/scala/io/ssc/angles/pipeline/explorers/EvaluateClustering.scala | Scala | gpl-3.0 | 4,518 |
package com.ox.bigdata.util.sftp
import java.io._
import com.ox.bigdata.util.ftp.FtpManager
import com.ox.bigdata.util.log.LogSupport
import com.jcraft.jsch.ChannelSftp
class SFTPManager(server: String, port: String, user: String, password: String) extends
FtpManager(server: String, port: String, user: String, password: String) with LogSupport {
protected def getChannel(): SFTPChannel = {
new SFTPChannel()
}
protected def getChannelSFTP(channel: SFTPChannel): ChannelSftp = {
val sftpDetails: java.util.Map[String, String] = new java.util.HashMap[String, String]()
sftpDetails.put(SFTPConstants.SFTP_REQ_HOST, server)
sftpDetails.put(SFTPConstants.SFTP_REQ_USERNAME, user)
sftpDetails.put(SFTPConstants.SFTP_REQ_PASSWORD, password)
sftpDetails.put(SFTPConstants.SFTP_REQ_PORT, port)
channel.getChannel(sftpDetails, 60000)
}
protected def usingSFTP(op: ChannelSftp => Unit): Unit = {
val channel: SFTPChannel = getChannel()
val channelSftp: ChannelSftp = getChannelSFTP(channel)
try {
op(channelSftp)
} catch {
case e: Exception => LOG.error("SFTP actions failed ! :" + e.printStackTrace())
} finally {
channelSftp.quit()
channel.closeChannel()
}
}
// test failed
override def listFiles(parentPath: String): Array[String] = {
var res: Array[String] = Nil.toArray
usingSFTP {
channelSftp =>
val result = channelSftp.ls(parentPath).toArray
val files = result.filterNot(x => x.toString.startsWith("d"))
res = files.toList.map(x => x.toString.split(" ").last).toArray
}
res
}
override def listDirectories(parentPath: String): Array[String] = {
var res: Array[String] = Nil.toArray
usingSFTP {
ChannelSFTP =>
val result = ChannelSFTP.ls(parentPath).toArray
val directory = result.filter(x => x.toString.startsWith("d"))
val folderstemp = directory.toList.map(x => x.toString.split(" ").last)
res = folderstemp.filterNot(x => x == "." || x == "..").toArray
}
res
}
//upload file ok
override def upload(local: String, remote: String): Boolean = {
var res = false
usingSFTP {
channelSftp =>
MakeRemoteDirectory(channelSftp, remote)
channelSftp.put(local, remote, ChannelSftp.OVERWRITE)
res = true
}
res
}
override def upload(localStream: InputStream, remote: String): Unit = {
usingSFTP {
channelSFTP =>
MakeRemoteDirectory(channelSFTP, remote)
channelSFTP.put(localStream, remote, ChannelSftp.OVERWRITE)
}
}
//download file ok
override def download(src: String, dst: String, timeout: Int = FtpManager.FTP_DATA_TIMEOUT_DEFAULT): Unit = {
usingSFTP {
channelSFTP =>
if (fileIsExists(channelSFTP, src)) {
var index = src.lastIndexOf('/')
if (index == -1) index = src.lastIndexOf('\\')
val fileName = src.substring(index + 1, src.length)
val localFile = new File(dst + "/" + fileName)
if (!localFile.getParentFile.exists)
localFile.getParentFile.mkdirs
val outputStream = new FileOutputStream(localFile)
channelSFTP.get(src, outputStream)
outputStream.close()
}
}
}
override def downloadFiles(src: List[String], dst: String, timeout: Int = FtpManager.FTP_DATA_TIMEOUT_DEFAULT): Unit = {
usingSFTP {
channelSFTP =>
for (elem <- src) {
if (fileIsExists(channelSFTP, elem)) {
var index = elem.lastIndexOf('/')
if (index == -1) index = elem.lastIndexOf('\\')
val fileName = elem.substring(index + 1, elem.length)
val localFile = new File(dst + "/" + fileName)
if (!localFile.getParentFile.exists)
localFile.getParentFile.mkdirs
val os = new FileOutputStream(localFile)
channelSFTP.get(elem, os)
os.close()
}
}
}
}
override def deleteDirectory(remote: String): Int = {
var isDeleteDirectorySuccess = -1
usingSFTP {
channelSFTP =>
val remoteWithoutPoint: String = remote.take(1) match {
case "." => remote.drop(1)
case _ => remote
}
isDeleteDirectorySuccess = deleteDirectory(channelSFTP, remoteWithoutPoint)
}
isDeleteDirectorySuccess
}
//delete directory test ok
private def deleteDirectory(ChannelSFTP: ChannelSftp, remoteWithoutPoint: String): Int = {
try {
val result = ChannelSFTP.ls(remoteWithoutPoint).toArray
val (dirlist, filelist) = result.partition(x => x.toString.startsWith("d"))
val folderstemp = dirlist.toList.map(x => x.toString.split(" ").last)
val folders = folderstemp.filterNot(x => x == "." || x == "..")
val filenames = filelist.toList.map(x => x.toString.split(" ").last)
if (filenames.nonEmpty)
filenames.foreach(f => ChannelSFTP.rm(s"/$remoteWithoutPoint/$f"))
if (folders.isEmpty)
ChannelSFTP.rmdir(s"/$remoteWithoutPoint")
else {
folders.foreach(f => {
val filePath = s"$remoteWithoutPoint/$f"
deleteDirectory(ChannelSFTP, filePath)
})
ChannelSFTP.rmdir(remoteWithoutPoint)
}
0
}
catch {
case _: Exception =>
// LOG.debug("delete Directory exception!")
-1
}
}
//delete foldor ok
def deletefolder(folder: String): Boolean = {
var res = false
usingSFTP {
channelSFTP =>
channelSFTP.rmdir(folder)
res = true
}
res
}
//delete file ok
override def delete(pathname: String): Unit = {
usingSFTP {
channelSFTP =>
if (fileIsExists(channelSFTP, pathname)) {
channelSFTP.rm(pathname)
}
}
}
override def downloadByExt(srcDir: String, baseDstDir: String, ext: String): Unit = {
usingSFTP {
channelSFTP =>
downloadByExt(channelSFTP, srcDir, baseDstDir, ext)
}
}
private def downloadByExt(ChannelSFTP: ChannelSftp, srcDir: String, baseDstDir: String, ext: String): Unit = {
try {
val result = ChannelSFTP.ls(srcDir).toArray
val direction = result.partition(x => x.toString.startsWith("d"))
val folderstemp = direction._1.toList.map(x => x.toString.split(" ").last)
val folders = folderstemp.filterNot(x => x == "." || x == "..")
val filenames = direction._2.toList.map(x => x.toString.split(" ").last)
for (file <- filenames) {
if (file.endsWith(ext))
ChannelSFTP.get(s"$srcDir/$file", s"$baseDstDir/$file")
}
folders.foreach(x => downloadByExt(ChannelSFTP, s"$srcDir/$x", s"$baseDstDir/$x", ext))
}
catch {
case _: Exception =>
// LOG.debug("downloadByExt file exception!")
}
}
/**
* download all the files in the given path and subpaths with the same ext
* relativePath
* srcDir
* baseDstDir (must be abslute path)
* ext [file ext]
*/
private def MakeRemoteDirectory(ChannelSFTP: ChannelSftp, remote: String): String = {
try {
def remotepathVerified(path: String): String = path.take(1) match {
case "." => remotepathVerified(path.drop(1))
case "/" => path.drop(1)
case _ => path
}
var checkedRemotePath = remotepathVerified(remote)
val directories = checkedRemotePath.split('/')
val folder = directories.head.toString
val result = ChannelSFTP.ls(s"/$folder").toArray
val direction = result.filter(x => x.toString.startsWith("d"))
if (directories.length > 2)
if (!direction.toList.map(x => x.toString.split(" ").last).contains(directories(1).toString)) {
ChannelSFTP.cd(folder)
ChannelSFTP.mkdir(directories(1))
checkedRemotePath = directories(1)
}
checkedRemotePath
}
catch {
case _: Exception =>
// LOG.debug("downloadByExt file exception!")
""
}
}
private def fileIsExists(channelSFTP: ChannelSftp, file: String): Boolean = {
try {
var index = file.lastIndexOf('/')
if (index == -1) index = file.lastIndexOf('\\')
val parentPath = file.substring(0, index)
val filename = file.substring(index + 1, file.length)
val result = channelSFTP.ls(parentPath).toArray
val files = result.filterNot(x => x.toString.startsWith("d"))
val filenames = files.toList.map(x => x.toString.split(" ").last)
filenames.contains(filename)
}
catch {
case _: Exception =>
// LOG.debug("check file exists exception!")
false
}
}
}
object SFTPManager {
def apply(ftpInfo: SFtpServerInfo): SFTPManager =
new SFTPManager(ftpInfo.ip, ftpInfo.port, ftpInfo.user, ftpInfo.password)
}
case class SFtpServerInfo(user: String,
password: String,
ip: String,
port: String)
| black-ox/simple | src/main/scala/com/ox/bigdata/util/sftp/SFTPManager.scala | Scala | apache-2.0 | 8,980 |
package spire
package benchmark
/*
import scala.util.Random
import Random._
import spire.implicits._
object PowBenchmarks extends MyRunner(classOf[PowBenchmarks])
class PowBenchmarks extends MyBenchmark {
var longs: Array[Long] = null
var ints: Array[Int] = null
override def setUp(): Unit = {
ints = init(200000)(nextInt)
longs = init(200000)(nextLong)
}
def timeLongPowForInt(reps:Int) = run(reps) {
var t = 0
ints.foreach { n =>
t += spire.math.pow(n.toLong, 2.toLong).toInt
}
t
}
def timeDoublePowForInt(reps:Int) = run(reps) {
var t = 0
ints.foreach { n =>
t += spire.math.pow(n.toDouble, 2.0).toInt
}
t
}
def timeBigIntPowForInt(reps:Int) = run(reps) {
var t = 0
ints.foreach { n =>
t += (BigInt(n) pow 2).toInt
}
t
}
def timeLongPowForLong(reps:Int) = run(reps) {
var t = 0L
longs.foreach { n =>
t += spire.math.pow(n, 2L)
}
t
}
def timeDoublePowForLong(reps:Int) = run(reps) {
var t = 0L
longs.foreach { n =>
t += spire.math.pow(n.toDouble, 2.0).toLong
}
t
}
def timeBigIntPowForLong(reps:Int) = run(reps) {
var t = 0L
longs.foreach { n =>
t += (BigInt(n) pow 2).toLong
}
t
}
def timeDoublePowForDouble(reps:Int) = run(reps) {
var t = 0.0
longs.foreach { n =>
t += spire.math.pow(n, 2.0)
}
t
}
}
*/ | non/spire | benchmark/src/main/scala/spire/benchmark/PowBenchmark.scala | Scala | mit | 1,417 |
/*
* Copyright 2021 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.ppml
import com.intel.analytics.bigdl.dllib.nn.Sequential
import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity
import com.intel.analytics.bigdl.dllib.optim.LocalPredictor
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.ppml.base.Estimator
import com.intel.analytics.bigdl.ppml.utils.DataFrameUtils
import org.apache.spark.sql.DataFrame
abstract class FLModel() {
val model: Sequential[Float]
val estimator: Estimator
/**
*
* @param trainData DataFrame of training data
* @param epoch training epoch
* @param batchSize training batch size
* @param featureColumn Array of String, specifying feature columns
* @param labelColumn Array of String, specifying label columns
* @param valData DataFrame of validation data
* @param hasLabel whether dataset has label, dataset always has label in common machine learning
* and HFL cases, while dataset of some parties in VFL cases does not has label
* @return
*/
def fit(trainData: DataFrame,
epoch: Int = 1,
batchSize: Int = 4,
featureColumn: Array[String] = null,
labelColumn: Array[String] = null,
valData: DataFrame = null,
hasLabel: Boolean = true) = {
val _trainData = DataFrameUtils.dataFrameToMiniBatch(trainData, featureColumn, labelColumn,
hasLabel = hasLabel, batchSize = batchSize)
val _valData = DataFrameUtils.dataFrameToMiniBatch(valData, featureColumn, labelColumn,
hasLabel = hasLabel, batchSize = batchSize)
estimator.train(epoch, _trainData.toLocal(), _valData.toLocal())
}
/**
*
* @param data DataFrame of evaluation data
* @param batchSize evaluation batch size
* @param featureColumn Array of String, specifying feature columns
* @param labelColumn Array of String, specifying label columns
* @param hasLabel whether dataset has label, dataset always has label in common machine learning
* and HFL cases, while dataset of some parties in VFL cases does not has label
*/
def evaluate(data: DataFrame = null,
batchSize: Int = 4,
featureColumn: Array[String] = null,
labelColumn: Array[String] = null,
hasLabel: Boolean = true) = {
if (data == null) {
estimator.getEvaluateResults().foreach{r =>
println(r._1 + ":" + r._2.mkString(","))
}
} else {
val _data = DataFrameUtils.dataFrameToMiniBatch(data, featureColumn, labelColumn,
hasLabel = hasLabel, batchSize = batchSize)
estimator.evaluate(_data.toLocal())
}
}
/**
*
* @param data DataFrame of prediction data
* @param batchSize prediction batch size
* @param featureColumn Array of String, specifying feature columns
* @return
*/
def predict(data: DataFrame,
batchSize: Int = 4,
featureColumn: Array[String] = null): Array[Activity] = {
val _data = DataFrameUtils.dataFrameToMiniBatch(data, featureColumn,
hasLabel = false, batchSize = batchSize)
estimator.predict(_data.toLocal())
}
}
| intel-analytics/BigDL | scala/ppml/src/main/scala/com/intel/analytics/bigdl/ppml/FLModel.scala | Scala | apache-2.0 | 3,756 |
package io.buoyant.router
import com.twitter.finagle.{Mux => FinagleMux, _}
import com.twitter.finagle.buoyant._
import com.twitter.finagle.client.StackClient
import com.twitter.finagle.mux.{Request, Response}
import com.twitter.finagle.param.ProtocolLibrary
import com.twitter.finagle.server.StackServer
import com.twitter.util._
import io.buoyant.router.RoutingFactory.{RequestIdentification, IdentifiedRequest}
import java.net.SocketAddress
object Mux extends Router[Request, Response] with Server[Request, Response] {
object Router {
val pathStack: Stack[ServiceFactory[Request, Response]] =
StackRouter.newPathStack[Request, Response]
val boundStack: Stack[ServiceFactory[Request, Response]] =
StackRouter.newBoundStack[Request, Response]
.replace(MuxEncodeResidual.role, MuxEncodeResidual)
val client: StackClient[Request, Response] =
FinagleMux.client
.transformed(StackRouter.Client.mkStack(_))
val defaultParams: Stack.Params =
StackRouter.defaultParams +
ProtocolLibrary("mux")
class Identifier(
prefix: Path = Path.empty,
dtab: () => Dtab = () => Dtab.base
) extends RoutingFactory.Identifier[Request] {
def apply(req: Request): Future[RequestIdentification[Request]] = {
val dst = Dst.Path(prefix ++ req.destination, dtab(), Dtab.local)
Future.value(new IdentifiedRequest(dst, req))
}
}
}
case class Router(
pathStack: Stack[ServiceFactory[Request, Response]] = Router.pathStack,
boundStack: Stack[ServiceFactory[Request, Response]] = Router.boundStack,
client: StackClient[Request, Response] = Router.client,
params: Stack.Params = Router.defaultParams
) extends StdStackRouter[Request, Response, Router] {
protected def copy1(
pathStack: Stack[ServiceFactory[Request, Response]] = this.pathStack,
boundStack: Stack[ServiceFactory[Request, Response]] = this.boundStack,
client: StackClient[Request, Response] = this.client,
params: Stack.Params = this.params
): Router = copy(pathStack, boundStack, client, params)
protected def newIdentifier(): RoutingFactory.Identifier[Request] = {
val RoutingFactory.DstPrefix(pfx) = params[RoutingFactory.DstPrefix]
val RoutingFactory.BaseDtab(baseDtab) = params[RoutingFactory.BaseDtab]
new Router.Identifier(pfx, baseDtab)
}
}
val router = Router()
def factory(): ServiceFactory[Request, Response] =
router.factory()
object Server {
val stack: Stack[ServiceFactory[Request, Response]] =
FinagleMux.server.stack
val defaultParams: Stack.Params =
StackServer.defaultParams +
ProtocolLibrary("mux")
}
val server = FinagleMux.Server(Server.stack, Server.defaultParams)
def serve(
addr: SocketAddress,
factory: ServiceFactory[Request, Response]
): ListeningServer = server.serve(addr, factory)
}
| denverwilliams/linkerd | router/mux/src/main/scala/io/buoyant/router/Mux.scala | Scala | apache-2.0 | 2,914 |
package src.main.scala.geodecoding
import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal
import net.liftweb.json._
import src.main.scala.logging.Logging._
import src.main.scala.types.PostalCode
import src.main.scala.cache.KeyValueCache
/**
* object: GeoDecodingProviderNominatim
*
* Implements a GeoDecoding using Nominatim OpenStreetMap API
* ( http://wiki.openstreetmap.org/wiki/Nominatim )
*
* TODO: Local caching to avoid querying the remote Nominatim OpenStreetMap
*/
object GeoDecodingProviderNominatim extends GeoDecodingProvider {
override protected [this] val urlGeoDecodeFmt =
"http://nominatim.openstreetmap.org/reverse?lat=%f&lon=%f&format=json&accept-language=en&addressdetails=1"
override protected [this] val cacheGeoDecode =
new KeyValueCache[(Double, Double), PostalCode]("GeoDecoderNominatim")
override protected [this] def parsePostalCodeInAnswer(answerJson: String): Try[PostalCode] = {
try {
implicit val formats = net.liftweb.json.DefaultFormats
val jsonTree = parse(answerJson)
val address = jsonTree \\ "address"
val postalCode = (address \\ "postcode").extract[String]
val countryCode = ( address \\ "country_code").extract[String]
Success(PostalCode(countryCode, postalCode))
} catch {
case NonFatal(e) => {
logMsg(ERROR, "Error occurred while parsing GeoDecoded JSON: %s".
format(e.getMessage))
Failure(e)
}
}
/*
* This is an example of the JSON string that Nominatim returns (Oct 2015):
*
{
"place_id":"87278433",
"licence":"Data OpenStreetMap contributors, ...",
"osm_type":"way",
"osm_id":"138141251",
"lat":"40.7505247",
"lon":"-73.9935501780078",
"display_name":"Madison Square Garden, 46, West 31st Street, ... New York City, New York, 10001, United States of America",
"address":{
"stadium":"Madison Square Garden",
"house_number":"46",
"road":"West 31st Street",
"neighbourhood":"Koreatown",
"county":"New York County",
"city":"New York City",
"state":"New York",
"postcode":"10001",
"country":"United States of America",
"country_code":"us"
}
}
*/
}
}
| je-nunez/urban_planning_on_gtfs_traffic_congestion | src/main/scala/geodecoding/GeoDecodingProviderNominatim.scala | Scala | gpl-2.0 | 2,446 |
package coursier.util
import coursier.core._
import coursier.graph.{Conflict, DependencyTree, ReverseModuleTree}
import dataclass.data
object Print {
object Colors {
private val `with`: Colors = Colors(Console.RED, Console.YELLOW, Console.RESET)
private val `without`: Colors = Colors("", "", "")
def get(colors: Boolean): Colors = if (colors) `with` else `without`
}
@data class Colors private (red: String, yellow: String, reset: String)
def dependency(dep: Dependency): String =
dependency(dep, printExclusions = false)
def dependency(dep: Dependency, printExclusions: Boolean): String = {
def exclusionsStr = dep
.exclusions
.toVector
.sorted
.map {
case (org, name) =>
s"\\n exclude($org, $name)"
}
.mkString
s"${dep.module}:${dep.version}:${dep.configuration.value}" +
(if (printExclusions) exclusionsStr else "")
}
def dependenciesUnknownConfigs(
deps: Seq[Dependency],
projects: Map[(Module, String), Project]
): String =
dependenciesUnknownConfigs(deps, projects, printExclusions = false)
def dependenciesUnknownConfigs(
deps: Seq[Dependency],
projects: Map[(Module, String), Project],
printExclusions: Boolean,
useFinalVersions: Boolean = true,
reorder: Boolean = false
): String = {
val deps0 =
if (useFinalVersions)
deps.map { dep =>
dep.withVersion(
projects
.get(dep.moduleVersion)
.fold(dep.version)(_.version)
)
}
else
deps
val deps1 =
if (reorder)
deps0
.groupBy(_.withConfiguration(Configuration.empty).withAttributes(Attributes.empty))
.toVector
.map { case (k, l) =>
val conf = Configuration.join(l.toVector.map(_.configuration).sorted.distinct: _*)
k.withConfiguration(conf)
}
.sortBy { dep =>
(dep.module.organization, dep.module.name, dep.module.toString, dep.version)
}
else
deps0
val l = deps1.map(dependency(_, printExclusions))
val l0 = if (reorder) l.distinct else l
l0.mkString(System.lineSeparator())
}
def compatibleVersions(compatibleWith: String, selected: String): Boolean = {
// too loose for now
// e.g. RCs and milestones should not be considered compatible with subsequent non-RC or
// milestone versions - possibly not with each other either
val c = Parse.versionConstraint(compatibleWith)
if (c.interval == VersionInterval.zero)
compatibleWith.split('.').take(2).toSeq == selected.split('.').take(2).toSeq
else
c.interval.contains(Version(selected))
}
def dependencyTree(
resolution: Resolution,
roots: Seq[Dependency] = null,
printExclusions: Boolean = false,
reverse: Boolean = false,
colors: Boolean = true
): String = {
val colors0 = Colors.get(colors)
if (reverse) {
val roots0 = Option(roots).getOrElse(resolution.minDependencies.toSeq)
val t = ReverseModuleTree.fromDependencyTree(
roots0.map(_.module).distinct,
DependencyTree(resolution, withExclusions = printExclusions)
)
val tree0 = Tree(
t.toVector.sortBy(t =>
(t.module.organization.value, t.module.name.value, t.module.nameWithAttributes)
)
)(_.dependees)
tree0.render { node =>
if (node.excludedDependsOn)
s"${colors0.yellow}(excluded by)${colors0.reset} ${node.module}:${node.reconciledVersion}"
else if (node.dependsOnVersion == node.dependsOnReconciledVersion)
s"${node.module}:${node.reconciledVersion}"
else {
val assumeCompatibleVersions =
compatibleVersions(node.dependsOnVersion, node.dependsOnReconciledVersion)
s"${node.module}:${node.reconciledVersion} " +
(if (assumeCompatibleVersions) colors0.yellow else colors0.red) +
s"${node.dependsOnModule}:${node.dependsOnVersion} -> ${node.dependsOnReconciledVersion}" +
colors0.reset
}
}
}
else {
val roots0 = Option(roots).getOrElse(resolution.rootDependencies)
val t = DependencyTree(resolution, roots0, withExclusions = printExclusions)
Tree(t.toVector)(_.children)
.render { t =>
render(
t.dependency.module,
t.dependency.version,
t.excluded,
resolution.reconciledVersions.get(t.dependency.module),
colors0
)
}
}
}
private def render(
module: Module,
version: String,
excluded: Boolean,
reconciledVersionOpt: Option[String],
colors: Colors
): String =
if (excluded)
reconciledVersionOpt match {
case None =>
s"${colors.yellow}(excluded)${colors.reset} $module:$version"
case Some(version0) =>
val versionMsg =
if (version0 == version)
"this version"
else
s"version $version0"
s"$module:$version " +
s"${colors.red}(excluded, $versionMsg present anyway)${colors.reset}"
}
else {
val versionStr =
if (reconciledVersionOpt.forall(_ == version))
version
else {
val reconciledVersion = reconciledVersionOpt.getOrElse(version)
val assumeCompatibleVersions =
compatibleVersions(version, reconciledVersionOpt.getOrElse(version))
(if (assumeCompatibleVersions) colors.yellow else colors.red) +
s"$version -> $reconciledVersion" +
(if (assumeCompatibleVersions) "" else " (possible incompatibility)") +
colors.reset
}
s"$module:$versionStr"
}
private def aligned(l: Seq[(String, String)]): Seq[String] =
if (l.isEmpty)
Nil
else {
val m = l.iterator.map(_._1.length).max
l.map {
case (a, b) =>
a + " " * (m - a.length + 1) + b
}
}
def conflicts(conflicts: Seq[Conflict]): Seq[String] = {
// for deterministic order in the output
val indices = conflicts
.map(_.module)
.zipWithIndex
.reverse
.toMap
conflicts
.groupBy(_.module)
.toSeq
.sortBy {
case (mod, _) =>
indices(mod)
}
.map {
case (mod, l) =>
assert(l.map(_.version).distinct.size == 1)
val messages = l.map { c =>
val extra =
if (c.wasExcluded)
" (and excluded it)"
else
""
(
s"${c.dependeeModule}:${c.dependeeVersion}",
s"wanted version ${c.wantedVersion}" + extra
)
}
s"$mod:${l.head.version} was selected, but" + System.lineSeparator() +
aligned(messages).map(" " + _ + System.lineSeparator()).mkString
}
}
}
| alexarchambault/coursier | modules/core/shared/src/main/scala/coursier/util/Print.scala | Scala | apache-2.0 | 6,966 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testHelpers
trait CommonErrorMessages {
val errorRequired = "error.required"
val errorReal ="error.real"
val errorMissingAmount = "pla.base.errors.errorQuestion"
val errorNegative = "pla.base.errors.errorNegative"
val errorDecimal = "pla.base.errors.errorDecimalPlaces"
val errorMaximum = "pla.base.errors.errorMaximum"
val errorMissingDay = "pla.base.errors.dayEmpty"
val errorMissingMonth = "pla.base.errors.monthEmpty"
val errorMissingYear = "pla.base.errors.yearEmpty"
val errorDate = "pla.base.errors.invalidDate"
val errorQuestion = "pla.base.errors.mandatoryError"
val errorEmptyDay = "pla.withdraw.date-input.form.day-empty"
val errorEmptyMonth = "pla.withdraw.date-input.form.month-empty"
val errorEmptyYear = "pla.withdraw.date-input.form.year-empty"
val errorLowDay = "pla.withdraw.date-input.form.day-too-low"
val errorLowMonth = "pla.withdraw.date-input.form.month-too-low"
val errorHighDay = "pla.withdraw.date-input.form.day-too-high"
val errorHighMonth = "pla.withdraw.date-input.form.month-too-high"
val errorFutureDate = "pla.withdraw.date-input.form.date-in-future"
}
trait PSODetailsMessages extends CommonErrorMessages {
val errorAmendPsoDetailsMissingAmount = "pla.psoDetails.errorQuestion"
val errorDateRange = "pla.IP16PsoDetails.errorDateOutOfRange"
} | hmrc/pensions-lifetime-allowance-frontend | test/testHelpers/CommonErrorMessages.scala | Scala | apache-2.0 | 1,933 |
package tir
import exceptions.ICE
import scala.collection.mutable.Map
import toplev.GenericPrintable
import toplev.GenericType
import toplev.GenericTypeSet
import toplev.GenericUnifier
import tpass.TPass
/* Note that since the typechecking has complete, we don not expect
* any type sets to remain.
*/
sealed trait TType extends TTree with GenericType[TType] {
// These are left unimplemented as they are not
// required by this phase of the compiler.
//
// They should be implemented in future to make this
// pass more versatile.
def contains(otherType: TType): Boolean = ???
def specializesTo(otherType: TType): Boolean = ???
def substituteFor(subFor: TType,subIn: TType): TType = ???
def unify(typ: TType): GenericUnifier[TType] = ???
def nodeClone(env: TTypeEnv): TType = typeClone
}
case class TFunctionType(var argType: TType, var resType: TType)
extends TType {
def getTypeVars() =
argType.getTypeVars union resType.getTypeVars
def substituteFor(map: Map[TType, TType]): TType =
new TFunctionType(argType.substituteFor(map), resType.substituteFor(map))
def atomicClone = throw new ICE("Error: TFunctionType is not atmoic")
def prettyPrint =
"(%s -> %s)".format(argType.prettyPrint, resType.prettyPrint)
}
case class TTupleType(var subTypes: List[TType])
extends TType with TFlattenable[TType] {
assert(subTypes.length > 1)
def getTypeVars() = {
val emptySet: GenericTypeSet[TType] = new TTypeSet()
subTypes.map(_.getTypeVars).foldLeft(emptySet) {
case (set, nextSet) => set union nextSet
}
}
def substituteFor(map: Map[TType, TType]): TType =
new TTupleType(subTypes.map(_.substituteFor(map)))
def atomicClone = throw new ICE("Error: TTupleType is not atmoic")
def prettyPrint = "(" + subTypes.map(_.prettyPrint).mkString(", ") + ")"
def flatten = if (subTypes.length == 1)
subTypes(0) match {
case flattenable: TFlattenable[TType] @unchecked => flattenable.flatten
case other => other
}
else
this
}
case class TEqualityTypeVar(var name: String) extends TType {
def getTypeVars() = {
val set = new TTypeSet()
set.insert(this)
set
}
def substituteFor(map: Map[TType, TType]): TType =
if (map.contains(this)) {
map(this).atomicClone
} else {
// Create a new variable node to avoid confusion.
new TEqualityTypeVar(name)
}
def atomicClone = new TEqualityTypeVar(name)
def prettyPrint = "''" + name
}
case class TUnconstrainedTypeVar(var name: String) extends TType {
def getTypeVars() = {
val set = new TTypeSet()
set.insert(this)
set
}
def substituteFor(map: Map[TType, TType]): TType =
if (map.contains(this)) {
map(this).atomicClone
} else {
// Create a new variable node to avoid confusion.
new TUnconstrainedTypeVar(name)
}
def atomicClone = new TUnconstrainedTypeVar(name)
def prettyPrint = "'" + name
}
case class TListType(var subType: TType) extends TType {
def getTypeVars() =
subType.getTypeVars
def substituteFor(map: Map[TType, TType]): TType =
new TListType(subType.substituteFor(map))
def atomicClone = throw new ICE("Error: TUnconstrainedTypeVar is not atmoic")
def prettyPrint = subType.prettyPrint + " list"
}
case class TDataType(var name: String) extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone =
throw new ICE("Error: TDataType is not atomic")
def prettyPrint =
"%s".format(name)
}
case class TDataTypeInstance(var name: TIdent) extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = new TDataTypeInstance(name)
def prettyPrint = "datatype %s".format(name)
}
case class TIntType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TIntType is not atmoic")
def prettyPrint = "int"
}
case class TStringType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TIntType is not atmoic")
def prettyPrint = "string"
}
case class TExceptionType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TExceptionType is not atomic")
def prettyPrint = "exn"
}
case class TRealType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TRealType is not atmoic")
def prettyPrint = "real"
}
case class TBoolType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TBoolType is not atmoic")
def prettyPrint = "bool"
}
case class TCharType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TCharType is not atmoic")
def prettyPrint = "char"
}
case class TUnitType() extends TType {
def getTypeVars() = new TTypeSet()
def substituteFor(map: Map[TType, TType]): TType =
this
def atomicClone = throw new ICE("Error: TUnitType is not atmoic")
def prettyPrint = "unit"
}
| j-c-w/mlc | src/main/scala/tir/TType.scala | Scala | gpl-3.0 | 5,513 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.examples.funspec.ignore
import org.scalatest.FunSpec
class SetSpec extends FunSpec {
describe("A Set") {
describe("when empty") {
ignore("should have size 0") {
assert(Set.empty.size === 0)
}
it("should produce NoSuchElementException when head is invoked") {
assertThrows[NoSuchElementException] {
Set.empty.head
}
}
}
}
}
| dotty-staging/scalatest | examples/src/test/scala/org/scalatest/examples/funspec/ignore/SetSpec.scala | Scala | apache-2.0 | 1,028 |
package org.camunda.worker.akka
import scala.collection.JavaConversions._
import akka.actor._
import org.camunda.worker.akka.worker._
import scala.io.StdIn._
import org.camunda.worker.akka.PollActor.Poll
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.camunda.worker.akka.worker.SimpleWorker
import org.camunda.worker.akka.worker.UnreliableWorker
/**
* @author Philipp Ossler
*/
// example app as template
object Main extends App {
println("starting...........")
println("press ENTER to exit")
println("===================")
println("")
// create actor system
val system = ActorSystem("MyActorSystem")
// create worker
val worker = system.actorOf(UnreliableWorker.props(delay = 200, reliability = 0.75), name = "worker-1")
val worker2 = system.actorOf(SimpleWorker.props(delay = 100), name = "worker-2")
val worker3 = system.actorOf(SimpleWorker.props(delay = 100), name = "worker-3")
// start polling
val pollActor = system.actorOf(PollActor.props(hostAddress = "http://localhost:8080/engine-rest", maxTasks = 5, waitTime= 100, lockTime = 600), name = "poller")
pollActor ! Poll(topicName = "reserveOrderItems", worker)
pollActor ! Poll(topicName = "payment", worker2, variableNames = List("var"))
pollActor ! Poll(topicName = "shipment", worker3)
// waiting for end
val input = readLine()
println("")
println("===================")
println("shutting down......")
system.shutdown
system.awaitTermination()
println("done")
System.exit(0)
} | saig0/camunda-worker-akka | src/test/scala/org/camunda/worker/akka/Main.scala | Scala | apache-2.0 | 1,576 |
package org.jba
import com.github.mustachejava.DefaultMustacheFactory
import com.twitter.mustache.ScalaObjectHandler
import java.io.{StringWriter, InputStreamReader}
import org.apache.commons.lang.StringEscapeUtils
import scala.io.Source
import play.api._
import play.api.templates._
import play.api.Configuration._
import play.api.Play.current
class MustachePlugin(app: Application) extends Plugin {
lazy val instance = {
val i = new JavaMustache
i
}
override def onStart(){
Logger("mustache").info("start on mode: " + app.mode)
instance
}
def api: MustacheAPI = instance
override lazy val enabled = {
!app.configuration.getString("mustacheplugin").filter(_ == "disabled").isDefined
}
}
trait MustacheAPI {
def render(template: String, data: Any): Html
}
class JavaMustache extends MustacheAPI{
private lazy val fs = java.io.File.separator
private val rootPath = fs + "public" + fs + "mustache" + fs
val mf = createMustacheFactory
private def createMustacheFactory = {
val factory = new DefaultMustacheFactory {
// override for load ressouce with play classloader
override def getReader(resourceName: String): java.io.Reader = {
Logger("mustache").debug("read in factory: " + rootPath + resourceName + ".html")
val input = Play.current.resourceAsStream(rootPath + resourceName + ".html").getOrElse(throw new Exception("mustache: could not find template: " + resourceName))
new InputStreamReader(input)
}
}
factory.setObjectHandler(new ScalaObjectHandler)
factory
}
private def readTemplate(template: String) = {
Logger("mustache").debug("load template: " + rootPath + template)
val factory = if(Play.isProd) mf else createMustacheFactory
val input = Play.current.resourceAsStream(rootPath + template + ".html").getOrElse(throw new Exception("mustache: could not find template: " + template))
val mustache = factory.compile(new InputStreamReader(input), template)
mustache
}
def render(template: String, data: Any): Html = {
Logger("mustache").debug("Mustache render template " + template)
val mustache = {
if(Play.isProd) {
val maybeTemplate = mf.compile(template)
if(maybeTemplate == null) {
readTemplate(template)
} else maybeTemplate
} else {
readTemplate(template)
}
}
val writer = new StringWriter()
mustache.execute(writer, data).flush()
writer.close()
Html(writer.toString())
}
private[jba] def loadAllTemplate: Unit = {
Logger("mustache").info("Load all mustache template")
Play.current.resource("mustache/mustache.tmpl").map { url =>
for(fileName <- Source.fromFile(url.getFile()).getLines)
readTemplate(fileName)
}.getOrElse {
Logger("mustache").error("Impossible to read file mustache/mustache.tmpl")
}
}
}
object Mustache {
private def plugin = play.api.Play.maybeApplication.map { app =>
app.plugin[MustachePlugin].getOrElse(throw new RuntimeException("you should enable MustachePlugin in play.plugins"))
}.getOrElse(throw new RuntimeException("you should have a running app in scope a this point"))
def render(template: String, data: Any): Html = plugin.api.render(template,data)
}
| julienba/play2-mustache | project-code/app/org/jba/Mustache.scala | Scala | apache-2.0 | 3,354 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.bijection.json4s
import com.twitter.bijection.{BaseProperties, CheckProperties, Injection}
import org.json4s.JsonAST.{JString, _}
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
/**
* @author
* Mansur Ashraf
* @since 1/10/14
*/
class Json4sInjectionLaws extends CheckProperties with BaseProperties {
def createTwit(i: (String, Int, String, List[Int], String)): Twit =
Twit(i._1, i._2, i._3, i._4, i._5)
implicit val testCaseClassToJson = arbitraryViaFn {
in: (String, Int, String, List[Int], String) => createTwit(in)
}
implicit val testJValueToJson =
arbitraryViaFn[(String, Int, String, List[Int], String), JValue] {
in: (String, Int, String, List[Int], String) =>
JObject(
List(
JField("name", JString(in._1)),
JField("id", JInt(in._2)),
JField("id_String", JString(in._3)),
JField("indices", JArray(in._4.map(JInt(_)))),
JField("screen_name", JString(in._5))
)
)
}
def roundTripCaseClassToJson(implicit
inj: Injection[Twit, String],
tt: TypeTag[Twit],
ct: ClassTag[Twit]
) =
isLooseInjection[Twit, String]
def roundTripCaseClassToJValue(implicit
inj: Injection[Twit, JValue],
tt: TypeTag[Twit],
ct: ClassTag[Twit]
) =
isLooseInjection[Twit, JValue]
def roundTripJValueToString(implicit inj: Injection[JValue, String]) =
isLooseInjection[JValue, String]
property("round trip Case Class to Json") {
import com.twitter.bijection.json4s.Json4sInjections.caseClass2Json
roundTripCaseClassToJson
}
property("round trip Case Class to JValue") {
import com.twitter.bijection.json4s.Json4sInjections.caseClass2JValue
roundTripCaseClassToJValue
}
property("round trip JValue to String") {
import com.twitter.bijection.json4s.Json4sInjections.jvalue2Json
roundTripJValueToString
}
}
case class Twit(name: String, id: Int, id_str: String, indices: List[Int], screen_name: String)
| twitter/bijection | bijection-json4s/src/test/scala/com/twitter/bijection/json4s/Json4sInjectionLaws.scala | Scala | apache-2.0 | 2,647 |
package com.sksamuel.elastic4s.requests.security.roles
case class GetRoleRequest(name: String) | stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/security/roles/GetRoleRequest.scala | Scala | apache-2.0 | 95 |
package com.ornithoptergames.psav
import javafx.scene.image.WritableImage
case class Size(val w: Double, val h: Double)
case class FrameInfo(val size: Size, val frames: List[Frame]) {
override lazy val toString = "FrameInfo(%s, %d frames)".format(size, frames.size)
}
case class Frame(val image: WritableImage, val name: String,
val top: Double, val bottom: Double,
val left: Double, val right: Double) {
val width = right - left
val height = bottom - top
override def toString() = "Frame(L: %.0f, R: %.0f, T: %.0f, B: %.0f)".format(left, right, top, bottom)
} | JavadocMD/anim-view | src/main/scala/com/ornithoptergames/psav/FrameInfo.scala | Scala | apache-2.0 | 600 |
package org.openmole.gui.plugin.task.statistic.server
/*
* Copyright (C) 24/02/2015 // mathieu.leclaire@openmole.org
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import org.openmole.gui.plugin.task.statistic.ext.StatisticTaskData
import org.openmole.plugin.task.statistic.StatisticTask
import org.openmole.gui.ext.data.Factory
import org.openmole.core.workflow.task.PluginSet
import scala.util.Try
class StatisticTaskFactory(val data: StatisticTaskData) extends Factory {
def coreObject(implicit plugins: PluginSet): Try[Any] = Try {
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/gui/plugins/org.openmole.gui.plugin.task.statistic.server/src/main/scala/org/openmole/gui/plugin/task/statistic/server/StatisticTaskFactory.scala | Scala | agpl-3.0 | 1,169 |
package de.fosd.typechef.linux
import java.io._
import de.fosd.typechef.featureexpr.FeatureExprFactory.{False, True, createDefinedExternal}
import de.fosd.typechef.featureexpr.{FeatureExpr, FeatureExprFactory, FeatureExprParser}
import scala.io.Source
import scala.util.parsing.combinator._
/**
* The CleanFileList tool takes a file list produced by KConfigMiner and a feature model
*
* It parses the KConfigMiner format, removes all .S files, removes all files
* with unsatisfiable presence conditions, and prints the output in a format
* that is easier to process (directly parsable by TypeChef) to stdout
*
* If also an open feature list is given, all features not on that list are substituted
* by False
*/
object CleanFileList {
private case class Config(openFeatureList: Option[java.io.File] = None,
featureModel: Option[java.io.File] = None,
inputFile: java.io.File = null)
def main(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[Config]("CleanFileList") {
opt[File]("openFeatureList") valueName ("<dir>") action { (x, c) =>
c.copy(openFeatureList = Some(x))
} text ("an open feature list can be provided to filter any features not supported in this architecture")
opt[File]("featureModel") valueName ("<dimacs file>") action { (x, c) =>
c.copy(openFeatureList = Some(x))
} text ("feature model in dimacs format")
arg[File]("<file>") required() action { (x, c) =>
c.copy(inputFile = x)
}
}
parser.parse(args, Config()) map { config =>
_main(config)
} getOrElse {
}
}
var openFeatures: Option[Set[String]] = None
def _main(config: Config) {
val stderr = new PrintWriter(System.err, true)
if (!config.featureModel.isDefined)
stderr.println("Warning: No feature model provided.")
if (!config.openFeatureList.isDefined)
stderr.println("Warning: No open-feature list provided.")
openFeatures = config.openFeatureList map (Source.fromFile(_).getLines().toSet)
val fmFactory = FeatureExprFactory.dflt.featureModelFactory
val featureModel = config.featureModel.map(f => fmFactory.createFromDimacsFile(f.getAbsolutePath)).getOrElse(fmFactory.empty)
val parser = new KConfigMinerParser(openFeatures)
val FileNameFilter = """.*\\.c""".r
val lines = io.Source.fromFile(config.inputFile).getLines
for (line <- lines; fields = line.split(':'); fullFilename = fields(0) if (
fullFilename match {
case FileNameFilter(_*) => true
case _ => false
}
)) {
val pcExpr: parser.ParseResult[FeatureExpr] = parser.parseAll(parser.expr, fields(1))
pcExpr match {
case parser.Success(cond: FeatureExpr, _) =>
//file should be parsed
if (cond.isSatisfiable(featureModel))
println(fullFilename.dropRight(2) + ": " + cond)
else
stderr.println("Skipping file with unsatisfiable condition %s: %s (%s)".format(fullFilename, fields(1), cond))
case parser.NoSuccess(msg, _) =>
stderr.println("Could not parse " + fullFilename + ": " + fields(1))
}
}
}
}
/**
* The ExtractFileList tool takes a preprocessed file with presence conditions (created by CleanFileList)
* and creates .pc files
*/
object GeneratePCFiles {
private case class Config(workingDir: java.io.File = new File("."),
inputFile: java.io.File = null)
def main(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[Config]("CleanFileList") {
opt[File]("workingDir") valueName ("<dir>") action { (x, c) =>
c.copy(workingDir = x)
} text ("working directory (root of the linux tree)")
arg[File]("<file>") required() action { (x, c) =>
c.copy(inputFile = x)
}
}
parser.parse(args, Config()) map { config =>
_main(config)
} getOrElse {
}
}
def _main(config: Config) {
assert(config.workingDir.isDirectory && config.workingDir.exists(), "working directory not found")
assert(config.inputFile.exists(), "input file not found: " + config.inputFile)
val lines = io.Source.fromFile(config.inputFile).getLines
for (line <- lines; fields = line.split(':')) {
val filename = fields(0)
val fexpr = new FeatureExprParser().parse(fields(1))
val pcFile = new File(config.workingDir, filename + ".pc")
// println(pcFile+" -> "+fexpr)
if (!fexpr.isTautology()) {
val pcWriter = new PrintWriter(pcFile)
fexpr.print(pcWriter)
pcWriter.close
} else
if (pcFile.exists()) pcFile.delete()
}
}
}
/**
* processes thorstens file list (passed as parameter) and checks which files have satisfiable
* presence conditions
*/
class KConfigMinerParser(openFeatures: Option[Set[String]]) extends RegexParsers {
def toFeature(name: String, isModule: Boolean): FeatureExpr = {
val cname = "CONFIG_" + (if (isModule) name + "_MODULE" else name)
if (openFeatures.isDefined && !(openFeatures.get contains cname)) False
else
createDefinedExternal(cname)
}
def expr: Parser[FeatureExpr] =
("(" ~> (expr ~ "||" ~ expr) <~ ")") ^^ {
case (a ~ _ ~ b) => a or b
} |
term
def term: Parser[FeatureExpr] =
"!" ~> commit(expr) ^^ (_ not) |
("(" ~> (expr ~ "&&" ~ expr) <~ ")") ^^ {
case (a ~ _ ~ b) => a and b
} |
bool
def bool: Parser[FeatureExpr] =
"[TRUE]" ^^ (_ => True) |
"InvalidExpression()" ^^ (_ => False) |
("(" ~> (ID ~ "!=" ~! featVal) <~ ")") ^^ {
case (id ~ _ ~ isModule) =>
if (!isModule)
toFeature(id, false).not
// X != y should be probably translated to something like
// CONFIG_X && CONFIG_X_2 || !CONFIG_X; to represent X ==
// "m", in the SAT-solver we enable CONFIG_X and CONFIG_X_2
// (by having CONFIG_X_2 imply CONFIG_X), while autoconf.h
// defines just CONFIG_X_MODULE.
else
// X != m should be probably translated to something like CONFIG_X && !CONFIG_X_2.
throw new RuntimeException("Can't handle this case!")
} |
("(" ~> (ID ~ "==" ~ featVal) <~ ")") ^^ {
case (id ~ _ ~ isModule) => toFeature(id, isModule)
} |
("(" ~> (ID ~ "==" ~ stringLit) <~ ")") ^^ {
case (id ~ _ ~ value) => System.err.println("nonboolean %s=%s not supported".format(id, value)); True
} |
ID ^^ (id => toFeature(id, true) or toFeature(id, false))
//Having this case here makes the grammar not LL(1) - and one expression
//triggers exponential backtracking. Since source
//phrases are always parenthesizes, I can include parentheses in each
//production.
//|
//"(" ~> expr <~ ")"
def ID = "[A-Za-z0-9_]*".r
def featVal = ("\\"" ~> "(y|m)".r <~ "\\"") ^^ (_ == "m")
def stringLit = ("\\"" ~> "[a-z]*".r <~ "\\"")
}
| aJanker/TypeChef-LinuxAnalysis | src/main/scala/de/fosd/typechef/linux/ProcessPCFile.scala | Scala | gpl-3.0 | 7,720 |
/**
* @author Yuuto
*/
package yuuto.enhancedinventories.compat.nei
import yuuto.enhancedinventories.client.gui.GuiContainerAutoAssembler
import yuuto.enhancedinventories.client.gui.GuiContainerWorkbench
import yuuto.enhancedinventories.client.gui.GuiContainerWorktable
import codechicken.nei.api.API
import codechicken.nei.api.IConfigureNEI
import codechicken.nei.recipe.DefaultOverlayHandler;
import yuuto.enhancedinventories.ref.ReferenceEI
class NEIConfig extends IConfigureNEI{
override def loadConfig() {
API.registerRecipeHandler(new EIShapedHandlerBasic());
API.registerUsageHandler(new EIShapedHandlerBasic());
API.registerGuiOverlay(classOf[GuiContainerWorkbench], "crafting");
API.registerGuiOverlayHandler(classOf[GuiContainerWorkbench], new DefaultOverlayHandler(), "crafting");
API.registerGuiOverlay(classOf[GuiContainerWorktable], "crafting");
API.registerGuiOverlayHandler(classOf[GuiContainerWorktable], new WorktableOverlayHandler(), "crafting");
API.registerGuiOverlay(classOf[GuiContainerAutoAssembler], "crafting", -17, 11);
API.registerGuiOverlayHandler(classOf[GuiContainerAutoAssembler], new AutoAssemblerOverlayHandler(), "crafting");
}
override def getName():String="EnhancedInventoriesNEIHandler";
override def getVersion():String=ReferenceEI.VERSION;
} | AnimeniacYuuto/EnhancedInventories | src/main/scala/yuuto/enhancedinventories/compat/nei/NEIConfig.scala | Scala | gpl-2.0 | 1,329 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.