code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package io.scalajs.nodejs /** * tty package object * @author lawrence.daniels@gmail.com */ package object tty { /** * Write Stream Events * @param stream the given [[WriteStream stream]] */ implicit class WriteStreamEvents(val stream: WriteStream) extends AnyVal { /** * The 'resize' event is emitted whenever either of the writeStream.columns or writeStream.rows properties have * changed. No arguments are passed to the listener callback when called. * @param listener the given event handler * @since 0.7.7 */ def onResize(listener: () => Any): stream.type = stream.on("resize", listener) } }
scalajs-io/nodejs
app/common/src/main/scala/io/scalajs/nodejs/tty/package.scala
Scala
apache-2.0
663
package cromwell.engine.io.gcs import com.google.api.client.util.DateTime import com.google.api.services.storage.model.Bucket.Owner case class GcsBucketInfo(bucketName: String, location: String, timeCreated: DateTime, owner: Owner)
dgtester/cromwell
src/main/scala/cromwell/engine/io/gcs/GcsBucketInfo.scala
Scala
bsd-3-clause
233
package articles.config import articles.ArticleComponents import articles.models.{Article, ArticleId} import articles.repositories.ArticleRepo import commons.repositories.ActionRunner import testhelpers.TestUtils import scala.concurrent.duration.DurationInt trait ArticleTestComponents { _: ArticleComponents => lazy val articlePopulator: ArticlePopulator = new ArticlePopulator(articleRepo, actionRunner) } class ArticlePopulator(articleRepo: ArticleRepo, implicit private val actionRunner: ActionRunner) { def save(article: Article): Article = { val action = articleRepo.create(article) TestUtils.runAndAwaitResult(action)(actionRunner, new DurationInt(1).seconds) } } object Articles { val hotToTrainYourDragon: Article = Article( ArticleId(-1), "how-to-train-your-dragon", "how-to-train-your-dragon", "Ever wonder how?", "It takes a Jacobian", null, null ) }
Dasiu/play-framework-test-project
test/articles/config/ArticleTestComponents.scala
Scala
mit
942
package blended.testsupport.retry import scala.concurrent.Await import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ import akka.actor.Scheduler import blended.util.logging.Logger object Retry { /** * Executed and in case of an failure retries an operation `op`. As long as there are retries left, the next retry starts after `delay` times. * * @param delay The time between a failure and the next retry. * @param retries The max count of retries, before giving up. * @param onRetry Action to be run before a retry. * @param op The operation to be executed and, iff failed, retried. * @param ex ExecutionContext to run the inner futures with. * @param s The Scheduler used to schedule the next retry. * * @return The Future containing the result of `op` or the last failure. */ def retry[T]( delay: FiniteDuration, retries: Int, onRetry: (Int, Throwable) => Unit = (n, e) => Logger[Retry.type].debug(e)(s"Retrying after failed execution (${n} retries left)") )( op: => T )(implicit ec: ExecutionContext, s: Scheduler): Future[T] = Future { op } recoverWith { case e: Throwable if retries > 0 => akka.pattern.after(delay, s)({ onRetry(retries - 1, e) retry(delay, retries - 1, onRetry)(op)(ec, s) }) } /** * Executed and in case of an failure retries an operation `op`. As long as there are retries left, the next retry starts after `delay` times. * * @param delay The time between a failure and the next retry. * @param retries The max count of retries, before giving up. * @param onRetry Action to be run before a retry. * @param finalDelay The overall delay forwarded to [[Await]], before failing the whole retry block. * @param op The operation to be executed and, iff failed, retried. * @param ex ExecutionContext to run the inner futures with. * @param s The Scheduler used to schedule the next retry. * * @return The result of `op` or throws an exception. */ def unsafeRetry[T]( delay: FiniteDuration, retries: Int, onRetry: (Int, Throwable) => Unit = (n, e) => Logger[Retry.type].debug(e)(s"Retrying after failed execution (${n} retries left)"), finalDelay: Option[FiniteDuration] = None )( op: => T )(implicit ec: ExecutionContext, s: Scheduler): T = { val res = retry(delay, retries, onRetry)(op)(ec, s) Await.result(res, finalDelay.getOrElse(delay * retries + 2.seconds)) } }
lefou/blended
blended.testsupport/src/main/scala/blended/testsupport/retry/Retry.scala
Scala
apache-2.0
2,513
package com.korpisystems.SimpleANN import Math._ abstract class Activator { def activation(x: NodeSignal): NodeSignal def derivative(x: NodeSignal): NodeSignal } object sigmoid extends Activator { def activation(x: NodeSignal): NodeSignal = { 1 / (1 + pow(E, -x)) } def derivative(x: NodeSignal): NodeSignal = { x * (1 - x) } }
rdtaylor/SimpleANN
src/main/scala/com/korpisystems/SimpleANN/Activator.scala
Scala
mit
353
package de.metacoder.edwardthreadlocal.analysis.datamodel import de.metacoder.edwardthreadlocal.analysis.AnalysisSetup sealed trait ValueInstanceID { def refersToNull:Boolean } object ValueInstanceID { def of(value:AnyRef)(implicit setup:AnalysisSetup):ValueInstanceID = setup idOfValue value private[analysis] sealed case class ByClassAndSystemIndentityHashCode[A](clazz:Class[A], systemIdentityHashCode:Int) extends ValueInstanceID { def refersToNull = clazz == null override def toString = if (refersToNull) "null" else s"${clazz.getName}@$systemIdentityHashCode" } private[analysis] object ByClassAndSystemIndentityHashCode { def of(v:AnyRef) = v match { case null ⇒ ByClassAndSystemIndentityHashCode(null, 0) case notNull ⇒ ByClassAndSystemIndentityHashCode(v.getClass, System.identityHashCode(v)) } } }
metacoder/edward-tl
agent-impl/src/main/scala/de/metacoder/edwardthreadlocal/analysis/datamodel/ValueInstanceID.scala
Scala
apache-2.0
856
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import scala.collection.JavaConverters._ import org.apache.spark.annotation.{Experimental, InterfaceStability} import org.apache.spark.api.java.function._ import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder} import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, CreateStruct} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.streaming.InternalOutputModes import org.apache.spark.sql.execution.QueryExecution import org.apache.spark.sql.expressions.ReduceAggregator import org.apache.spark.sql.streaming.{GroupState, GroupStateTimeout, OutputMode} /** * :: Experimental :: * A [[Dataset]] has been logically grouped by a user specified grouping key. Users should not * construct a [[KeyValueGroupedDataset]] directly, but should instead call `groupByKey` on * an existing [[Dataset]]. * * @since 2.0.0 */ @Experimental @InterfaceStability.Evolving class KeyValueGroupedDataset[K, V] private[sql]( kEncoder: Encoder[K], vEncoder: Encoder[V], @transient val queryExecution: QueryExecution, private val dataAttributes: Seq[Attribute], private val groupingAttributes: Seq[Attribute]) extends Serializable { // Similar to [[Dataset]], we turn the passed in encoder to `ExpressionEncoder` explicitly. private implicit val kExprEnc = encoderFor(kEncoder) private implicit val vExprEnc = encoderFor(vEncoder) private def logicalPlan = queryExecution.analyzed private def sparkSession = queryExecution.sparkSession /** * Returns a new [[KeyValueGroupedDataset]] where the type of the key has been mapped to the * specified type. The mapping of key columns to the type follows the same rules as `as` on * [[Dataset]]. * * @since 1.6.0 */ def keyAs[L : Encoder]: KeyValueGroupedDataset[L, V] = new KeyValueGroupedDataset( encoderFor[L], vExprEnc, queryExecution, dataAttributes, groupingAttributes) /** * Returns a new [[KeyValueGroupedDataset]] where the given function `func` has been applied * to the data. The grouping key is unchanged by this. * * {{{ * // Create values grouped by key from a Dataset[(K, V)] * ds.groupByKey(_._1).mapValues(_._2) // Scala * }}} * * @since 2.1.0 */ def mapValues[W : Encoder](func: V => W): KeyValueGroupedDataset[K, W] = { val withNewData = AppendColumns(func, dataAttributes, logicalPlan) val projected = Project(withNewData.newColumns ++ groupingAttributes, withNewData) val executed = sparkSession.sessionState.executePlan(projected) new KeyValueGroupedDataset( encoderFor[K], encoderFor[W], executed, withNewData.newColumns, groupingAttributes) } /** * Returns a new [[KeyValueGroupedDataset]] where the given function `func` has been applied * to the data. The grouping key is unchanged by this. * * {{{ * // Create Integer values grouped by String key from a Dataset<Tuple2<String, Integer>> * Dataset<Tuple2<String, Integer>> ds = ...; * KeyValueGroupedDataset<String, Integer> grouped = * ds.groupByKey(t -> t._1, Encoders.STRING()).mapValues(t -> t._2, Encoders.INT()); * }}} * * @since 2.1.0 */ def mapValues[W](func: MapFunction[V, W], encoder: Encoder[W]): KeyValueGroupedDataset[K, W] = { implicit val uEnc = encoder mapValues { (v: V) => func.call(v) } } /** * Returns a [[Dataset]] that contains each unique key. This is equivalent to doing mapping * over the Dataset to extract the keys and then running a distinct operation on those. * * @since 1.6.0 */ def keys: Dataset[K] = { Dataset[K]( sparkSession, Distinct( Project(groupingAttributes, logicalPlan))) } /** * (Scala-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an iterator containing elements of an arbitrary type which will be returned * as a new [[Dataset]]. * * This function does not support partial aggregation, and as a result requires shuffling all * the data in the [[Dataset]]. If an application intends to perform an aggregation over each * key, it is best to use the reduce function or an * `org.apache.spark.sql.expressions#Aggregator`. * * Internally, the implementation will spill to disk if any given group is too large to fit into * memory. However, users must take care to avoid materializing the whole iterator for a group * (for example, by calling `toList`) unless they are sure that this is possible given the memory * constraints of their cluster. * * @since 1.6.0 */ def flatMapGroups[U : Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = { Dataset[U]( sparkSession, MapGroups( f, groupingAttributes, dataAttributes, logicalPlan)) } /** * (Java-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an iterator containing elements of an arbitrary type which will be returned * as a new [[Dataset]]. * * This function does not support partial aggregation, and as a result requires shuffling all * the data in the [[Dataset]]. If an application intends to perform an aggregation over each * key, it is best to use the reduce function or an * `org.apache.spark.sql.expressions#Aggregator`. * * Internally, the implementation will spill to disk if any given group is too large to fit into * memory. However, users must take care to avoid materializing the whole iterator for a group * (for example, by calling `toList`) unless they are sure that this is possible given the memory * constraints of their cluster. * * @since 1.6.0 */ def flatMapGroups[U](f: FlatMapGroupsFunction[K, V, U], encoder: Encoder[U]): Dataset[U] = { flatMapGroups((key, data) => f.call(key, data.asJava).asScala)(encoder) } /** * (Scala-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an element of arbitrary type which will be returned as a new [[Dataset]]. * * This function does not support partial aggregation, and as a result requires shuffling all * the data in the [[Dataset]]. If an application intends to perform an aggregation over each * key, it is best to use the reduce function or an * `org.apache.spark.sql.expressions#Aggregator`. * * Internally, the implementation will spill to disk if any given group is too large to fit into * memory. However, users must take care to avoid materializing the whole iterator for a group * (for example, by calling `toList`) unless they are sure that this is possible given the memory * constraints of their cluster. * * @since 1.6.0 */ def mapGroups[U : Encoder](f: (K, Iterator[V]) => U): Dataset[U] = { val func = (key: K, it: Iterator[V]) => Iterator(f(key, it)) flatMapGroups(func) } /** * (Java-specific) * Applies the given function to each group of data. For each unique group, the function will * be passed the group key and an iterator that contains all of the elements in the group. The * function can return an element of arbitrary type which will be returned as a new [[Dataset]]. * * This function does not support partial aggregation, and as a result requires shuffling all * the data in the [[Dataset]]. If an application intends to perform an aggregation over each * key, it is best to use the reduce function or an * `org.apache.spark.sql.expressions#Aggregator`. * * Internally, the implementation will spill to disk if any given group is too large to fit into * memory. However, users must take care to avoid materializing the whole iterator for a group * (for example, by calling `toList`) unless they are sure that this is possible given the memory * constraints of their cluster. * * @since 1.6.0 */ def mapGroups[U](f: MapGroupsFunction[K, V, U], encoder: Encoder[U]): Dataset[U] = { mapGroups((key, data) => f.call(key, data.asJava))(encoder) } /** * ::Experimental:: * (Scala-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See [[org.apache.spark.sql.streaming.GroupState]] for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def mapGroupsWithState[S: Encoder, U: Encoder]( func: (K, Iterator[V], GroupState[S]) => U): Dataset[U] = { val flatMapFunc = (key: K, it: Iterator[V], s: GroupState[S]) => Iterator(func(key, it, s)) Dataset[U]( sparkSession, FlatMapGroupsWithState[K, V, S, U]( flatMapFunc.asInstanceOf[(Any, Iterator[Any], LogicalGroupState[Any]) => Iterator[Any]], groupingAttributes, dataAttributes, OutputMode.Update, isMapGroupsWithState = true, GroupStateTimeout.NoTimeout, child = logicalPlan)) } /** * ::Experimental:: * (Scala-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See [[org.apache.spark.sql.streaming.GroupState]] for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * @param timeoutConf Timeout configuration for groups that do not receive data for a while. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def mapGroupsWithState[S: Encoder, U: Encoder]( timeoutConf: GroupStateTimeout)( func: (K, Iterator[V], GroupState[S]) => U): Dataset[U] = { val flatMapFunc = (key: K, it: Iterator[V], s: GroupState[S]) => Iterator(func(key, it, s)) Dataset[U]( sparkSession, FlatMapGroupsWithState[K, V, S, U]( flatMapFunc.asInstanceOf[(Any, Iterator[Any], LogicalGroupState[Any]) => Iterator[Any]], groupingAttributes, dataAttributes, OutputMode.Update, isMapGroupsWithState = true, timeoutConf, child = logicalPlan)) } /** * ::Experimental:: * (Java-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * @param stateEncoder Encoder for the state type. * @param outputEncoder Encoder for the output type. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def mapGroupsWithState[S, U]( func: MapGroupsWithStateFunction[K, V, S, U], stateEncoder: Encoder[S], outputEncoder: Encoder[U]): Dataset[U] = { mapGroupsWithState[S, U]( (key: K, it: Iterator[V], s: GroupState[S]) => func.call(key, it.asJava, s) )(stateEncoder, outputEncoder) } /** * ::Experimental:: * (Java-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * @param stateEncoder Encoder for the state type. * @param outputEncoder Encoder for the output type. * @param timeoutConf Timeout configuration for groups that do not receive data for a while. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def mapGroupsWithState[S, U]( func: MapGroupsWithStateFunction[K, V, S, U], stateEncoder: Encoder[S], outputEncoder: Encoder[U], timeoutConf: GroupStateTimeout): Dataset[U] = { mapGroupsWithState[S, U](timeoutConf)( (key: K, it: Iterator[V], s: GroupState[S]) => func.call(key, it.asJava, s) )(stateEncoder, outputEncoder) } /** * ::Experimental:: * (Scala-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * @param outputMode The output mode of the function. * @param timeoutConf Timeout configuration for groups that do not receive data for a while. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def flatMapGroupsWithState[S: Encoder, U: Encoder]( outputMode: OutputMode, timeoutConf: GroupStateTimeout)( func: (K, Iterator[V], GroupState[S]) => Iterator[U]): Dataset[U] = { if (outputMode != OutputMode.Append && outputMode != OutputMode.Update) { throw new IllegalArgumentException("The output mode of function should be append or update") } Dataset[U]( sparkSession, FlatMapGroupsWithState[K, V, S, U]( func.asInstanceOf[(Any, Iterator[Any], LogicalGroupState[Any]) => Iterator[Any]], groupingAttributes, dataAttributes, outputMode, isMapGroupsWithState = false, timeoutConf, child = logicalPlan)) } /** * ::Experimental:: * (Java-specific) * Applies the given function to each group of data, while maintaining a user-defined per-group * state. The result Dataset will represent the objects returned by the function. * For a static batch Dataset, the function will be invoked once per group. For a streaming * Dataset, the function will be invoked for each group repeatedly in every trigger, and * updates to each group's state will be saved across invocations. * See `GroupState` for more details. * * @tparam S The type of the user-defined state. Must be encodable to Spark SQL types. * @tparam U The type of the output objects. Must be encodable to Spark SQL types. * @param func Function to be called on every group. * @param outputMode The output mode of the function. * @param stateEncoder Encoder for the state type. * @param outputEncoder Encoder for the output type. * @param timeoutConf Timeout configuration for groups that do not receive data for a while. * * See [[Encoder]] for more details on what types are encodable to Spark SQL. * @since 2.2.0 */ @Experimental @InterfaceStability.Evolving def flatMapGroupsWithState[S, U]( func: FlatMapGroupsWithStateFunction[K, V, S, U], outputMode: OutputMode, stateEncoder: Encoder[S], outputEncoder: Encoder[U], timeoutConf: GroupStateTimeout): Dataset[U] = { val f = (key: K, it: Iterator[V], s: GroupState[S]) => func.call(key, it.asJava, s).asScala flatMapGroupsWithState[S, U](outputMode, timeoutConf)(f)(stateEncoder, outputEncoder) } /** * (Scala-specific) * Reduces the elements of each group of data using the specified binary function. * The given function must be commutative and associative or the result may be non-deterministic. * * @since 1.6.0 */ def reduceGroups(f: (V, V) => V): Dataset[(K, V)] = { val vEncoder = encoderFor[V] val aggregator: TypedColumn[V, V] = new ReduceAggregator[V](f)(vEncoder).toColumn agg(aggregator) } /** * (Java-specific) * Reduces the elements of each group of data using the specified binary function. * The given function must be commutative and associative or the result may be non-deterministic. * * @since 1.6.0 */ def reduceGroups(f: ReduceFunction[V]): Dataset[(K, V)] = { reduceGroups(f.call _) } /** * Internal helper function for building typed aggregations that return tuples. For simplicity * and code reuse, we do this without the help of the type system and then use helper functions * that cast appropriately for the user facing interface. */ protected def aggUntyped(columns: TypedColumn[_, _]*): Dataset[_] = { val encoders = columns.map(_.encoder) val namedColumns = columns.map(_.withInputType(vExprEnc, dataAttributes).named) val keyColumn = if (kExprEnc.flat) { assert(groupingAttributes.length == 1) groupingAttributes.head } else { Alias(CreateStruct(groupingAttributes), "key")() } val aggregate = Aggregate(groupingAttributes, keyColumn +: namedColumns, logicalPlan) val execution = new QueryExecution(sparkSession, aggregate) new Dataset( sparkSession, execution, ExpressionEncoder.tuple(kExprEnc +: encoders)) } /** * Computes the given aggregation, returning a [[Dataset]] of tuples for each unique key * and the result of computing this aggregation over all elements in the group. * * @since 1.6.0 */ def agg[U1](col1: TypedColumn[V, U1]): Dataset[(K, U1)] = aggUntyped(col1).asInstanceOf[Dataset[(K, U1)]] /** * Computes the given aggregations, returning a [[Dataset]] of tuples for each unique key * and the result of computing these aggregations over all elements in the group. * * @since 1.6.0 */ def agg[U1, U2](col1: TypedColumn[V, U1], col2: TypedColumn[V, U2]): Dataset[(K, U1, U2)] = aggUntyped(col1, col2).asInstanceOf[Dataset[(K, U1, U2)]] /** * Computes the given aggregations, returning a [[Dataset]] of tuples for each unique key * and the result of computing these aggregations over all elements in the group. * * @since 1.6.0 */ def agg[U1, U2, U3]( col1: TypedColumn[V, U1], col2: TypedColumn[V, U2], col3: TypedColumn[V, U3]): Dataset[(K, U1, U2, U3)] = aggUntyped(col1, col2, col3).asInstanceOf[Dataset[(K, U1, U2, U3)]] /** * Computes the given aggregations, returning a [[Dataset]] of tuples for each unique key * and the result of computing these aggregations over all elements in the group. * * @since 1.6.0 */ def agg[U1, U2, U3, U4]( col1: TypedColumn[V, U1], col2: TypedColumn[V, U2], col3: TypedColumn[V, U3], col4: TypedColumn[V, U4]): Dataset[(K, U1, U2, U3, U4)] = aggUntyped(col1, col2, col3, col4).asInstanceOf[Dataset[(K, U1, U2, U3, U4)]] /** * Returns a [[Dataset]] that contains a tuple with each key and the number of items present * for that key. * * @since 1.6.0 */ def count(): Dataset[(K, Long)] = agg(functions.count("*").as(ExpressionEncoder[Long]())) /** * (Scala-specific) * Applies the given function to each cogrouped data. For each unique group, the function will * be passed the grouping key and 2 iterators containing all elements in the group from * [[Dataset]] `this` and `other`. The function can return an iterator containing elements of an * arbitrary type which will be returned as a new [[Dataset]]. * * @since 1.6.0 */ def cogroup[U, R : Encoder]( other: KeyValueGroupedDataset[K, U])( f: (K, Iterator[V], Iterator[U]) => TraversableOnce[R]): Dataset[R] = { implicit val uEncoder = other.vExprEnc Dataset[R]( sparkSession, CoGroup( f, this.groupingAttributes, other.groupingAttributes, this.dataAttributes, other.dataAttributes, this.logicalPlan, other.logicalPlan)) } /** * (Java-specific) * Applies the given function to each cogrouped data. For each unique group, the function will * be passed the grouping key and 2 iterators containing all elements in the group from * [[Dataset]] `this` and `other`. The function can return an iterator containing elements of an * arbitrary type which will be returned as a new [[Dataset]]. * * @since 1.6.0 */ def cogroup[U, R]( other: KeyValueGroupedDataset[K, U], f: CoGroupFunction[K, V, U, R], encoder: Encoder[R]): Dataset[R] = { cogroup(other)((key, left, right) => f.call(key, left.asJava, right.asJava).asScala)(encoder) } }
mike0sv/spark
sql/core/src/main/scala/org/apache/spark/sql/KeyValueGroupedDataset.scala
Scala
apache-2.0
23,837
package collins.callbacks /** * Represents the results of a regular expression. * * This exists to capture the results of applying a regular expression against a source string. For * example assume the source string is: 'run_command --tag=<tag> --withStatus=<status>' * * The results of applying a regular expression could be stashed here where you might have * originalValue as '<tag>' and methodName as 'tag'. * * @param originalValue a string found in the original source string (e.g. <tag>) * @param methodName the match from the original source string (e.g. tag) * @param newValue the result of applying methodName on a class instance */ case class MethodReplacement( originalValue: String, methodName: String, newValue: String = "") extends MethodHelper { override val chattyFailures = true /** * Apply methodName to v, return a MethodReplacement with an updated newValue. * * @param v Any value (non-primitive) * @return a MethodReplacement with an updated newValue on success, or an empty newValue on * failure */ def runMethod(v: CallbackDatum): MethodReplacement = { getMethod(methodName, v).flatMap { method => invokeZeroArityMethod(method, v) .map(_.toString) .map(s => this.copy(newValue = s)) }.getOrElse(this) } }
discordianfish/collins
app/collins/callbacks/MethodReplacement.scala
Scala
apache-2.0
1,305
package retry import odelay.{ Delay, Timer } import scala.concurrent.{ Future, ExecutionContext } import scala.concurrent.duration.{ Duration, FiniteDuration } import scala.language.implicitConversions import scala.util.control.NonFatal import java.util.concurrent.TimeUnit // This case class and its implicit conversions allow us to accept both // `() => Future[T]` and `Future[T]`-by-name as Policy.apply arguments. // Note that these two types are the same after erasure. case class PromiseWrapper[T]( promise: () => Future[T] ) object PromiseWrapper { implicit def fromFuture[T](promise: () => Future[T]): PromiseWrapper[T] = PromiseWrapper(promise) implicit def toFuture[T](pw: PromiseWrapper[T]): () => Future[T] = pw.promise } object Directly { /** Retry immediately after failure forever */ def forever: Policy = new Policy { def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = retry(promise, promise) } /** Retry immediately after failure for a max number of times */ def apply(max: Int = 3): Policy = new CountingPolicy { def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { def run(max: Int): Future[T] = countdown(max, promise, run) run(max) } } } object Pause { /** Retry with a pause between attempts forever */ def forever(delay: FiniteDuration = Defaults.delay) (implicit timer: Timer): Policy = new Policy { self => def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = retry(promise, { () => Delay(delay)(self(promise)).future.flatMap(identity) }) } /** Retry with a pause between attempts for a max number of times */ def apply(max: Int = 4, delay: FiniteDuration = Defaults.delay) (implicit timer: Timer): Policy = new CountingPolicy { def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { def run(max: Int): Future[T] = countdown( max, promise, c => Delay(delay)(run(c)).future.flatMap(identity)) run(max) } } } object Backoff { /** Retry with exponential backoff forever */ def forever(delay: FiniteDuration = Defaults.delay, base: Int = 2) (implicit timer: Timer): Policy = new Policy { def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { def run(delay: FiniteDuration): Future[T] = retry(promise, { () => Delay(delay) { run(Duration(delay.length * base, delay.unit)) }.future.flatMap(identity) }) run(delay) } } /** Retry with exponential backoff for a max number of times */ def apply( max: Int = 8, delay: FiniteDuration = Defaults.delay, base: Int = 2) (implicit timer: Timer): Policy = new CountingPolicy { def apply[T] (promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { def run(max: Int, delay: FiniteDuration): Future[T] = countdown( max, promise, count => Delay(delay) { run(count, Duration(delay.length * base, delay.unit)) }.future.flatMap(identity)) run(max, delay) } } } /** A retry policy in which the a failure determines the way a future should be retried. * The partial function provided may define the domain of both the success OR exceptional * failure of a future fails explicitly. * * {{{ * val policy = retry.When { * case RetryAfter(retryAt) => retry.Pause(delay = retryAt) * } * val future = policy(issueRequest) * }}} * * If the result is not defined for the depends block, the future will not * be retried. */ object When { type Depends = PartialFunction[Any, Policy] def apply(depends: Depends): Policy = new Policy { def apply[T](promise: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { val fut = promise() fut.flatMap { res => if (success.predicate(res) || !depends.isDefinedAt(res)) fut else depends(res)(promise) }.recoverWith { case NonFatal(e) => if (depends.isDefinedAt(e)) depends(e)(promise) else fut } } } } /** Retry policy that incorporates a count */ trait CountingPolicy extends Policy { protected def countdown[T]( max: Int, promise: () => Future[T], orElse: Int => Future[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { // consider this successful if our predicate says so _or_ // we've reached the end out our countdown val countedSuccess = success.or(max < 1) retry(promise, () => orElse(max - 1), { f: Future[T] => if (max < 1) f else orElse(max - 1) })(countedSuccess, executor) } } /** A Policy defines an interface for applying a future with retry semantics * specific to implementations */ trait Policy { def apply[T](pw: PromiseWrapper[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] def apply[T](promise: => Future[T]) (implicit success: Success[T], executor: ExecutionContext): Future[T] = apply { () => promise } protected def retry[T]( promise: () => Future[T], orElse: () => Future[T], recovery: Future[T] => Future[T] = identity(_: Future[T])) (implicit success: Success[T], executor: ExecutionContext): Future[T] = { val fut = promise() fut.flatMap { res => if (success.predicate(res)) fut else orElse() }.recoverWith { case NonFatal(_) => recovery(fut) } } }
ExNexu/retry
src/main/scala/Policy.scala
Scala
mit
6,101
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import kafka.network.SocketServer import kafka.log.LogManager import kafka.utils._ import java.util.concurrent._ import atomic.AtomicBoolean import org.I0Itec.zkclient.ZkClient import kafka.controller.{ControllerStats, KafkaController} /** * Represents the lifecycle of a single Kafka broker. Handles all functionality required * to start up and shutdown a single Kafka node. */ class KafkaServer(val config: KafkaConfig, time: Time = SystemTime) extends Logging { this.logIdent = "[Kafka Server " + config.brokerId + "], " private var isShuttingDown = new AtomicBoolean(false) private var shutdownLatch = new CountDownLatch(1) var socketServer: SocketServer = null var requestHandlerPool: KafkaRequestHandlerPool = null var logManager: LogManager = null var kafkaZookeeper: KafkaZooKeeper = null var replicaManager: ReplicaManager = null private var apis: KafkaApis = null var kafkaController: KafkaController = null val kafkaScheduler = new KafkaScheduler(4) var zkClient: ZkClient = null /** * Start up API for bringing up a single instance of the Kafka server. * Instantiates the LogManager, the SocketServer and the request handlers - KafkaRequestHandlers */ def startup() { info("starting") isShuttingDown = new AtomicBoolean(false) shutdownLatch = new CountDownLatch(1) /* start scheduler */ kafkaScheduler.startup /* start log manager */ logManager = new LogManager(config, kafkaScheduler, time) logManager.startup() socketServer = new SocketServer(config.brokerId, config.port, config.numNetworkThreads, config.numQueuedRequests, config.maxSocketRequestSize) socketServer.startup /* start client */ kafkaZookeeper = new KafkaZooKeeper(config) // starting relevant replicas and leader election for partitions assigned to this broker kafkaZookeeper.startup info("Connecting to ZK: " + config.zkConnect) replicaManager = new ReplicaManager(config, time, kafkaZookeeper.getZookeeperClient, kafkaScheduler, logManager) kafkaController = new KafkaController(config, kafkaZookeeper.getZookeeperClient) apis = new KafkaApis(socketServer.requestChannel, replicaManager, kafkaZookeeper.getZookeeperClient, config.brokerId) requestHandlerPool = new KafkaRequestHandlerPool(config.brokerId, socketServer.requestChannel, apis, config.numIoThreads) Mx4jLoader.maybeLoad // start the replica manager replicaManager.startup() // start the controller kafkaController.startup() // register metrics beans registerStats() info("started") } /** * Forces some dynamic jmx beans to be registered on server startup. */ private def registerStats() { BrokerTopicStats.getBrokerAllTopicStats() ControllerStats.offlinePartitionRate ControllerStats.uncleanLeaderElectionRate } /** * Shutdown API for shutting down a single instance of the Kafka server. * Shuts down the LogManager, the SocketServer and the log cleaner scheduler thread */ def shutdown() { info("shutting down") val canShutdown = isShuttingDown.compareAndSet(false, true); if (canShutdown) { if(requestHandlerPool != null) Utils.swallow(requestHandlerPool.shutdown()) Utils.swallow(kafkaScheduler.shutdown()) if(apis != null) Utils.swallow(apis.close()) if(kafkaZookeeper != null) Utils.swallow(kafkaZookeeper.shutdown()) if(replicaManager != null) Utils.swallow(replicaManager.shutdown()) if(socketServer != null) Utils.swallow(socketServer.shutdown()) if(logManager != null) Utils.swallow(logManager.shutdown()) if(kafkaController != null) Utils.swallow(kafkaController.shutdown()) shutdownLatch.countDown() info("shut down completed") } } /** * After calling shutdown(), use this API to wait until the shutdown is complete */ def awaitShutdown(): Unit = shutdownLatch.await() def getLogManager(): LogManager = logManager }
dchenbecker/kafka-sbt
core/src/main/scala/kafka/server/KafkaServer.scala
Scala
apache-2.0
5,058
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This file is part of Rudder. * * Rudder is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU General Public License version 3, the copyright holders add * the following Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General * Public License version 3, when you create a Related Module, this * Related Module is not considered as a part of the work and may be * distributed under the license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * Rudder is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Rudder. If not, see <http://www.gnu.org/licenses/>. * ************************************************************************************* */ package com.normation.cfclerk.services import org.eclipse.jgit.api.Git import org.eclipse.jgit.lib.Repository import org.eclipse.jgit.lib.ObjectId /** * A service that gives access to the Git * porcelain API of the repository. */ trait GitRepositoryProvider { /** * Obtain access to JGit porcelain API. */ def git : Git def db : Repository } /** * A service that allows to know what is the * commit used as "the current one", and what * is the last available in the repository. * * A method also allowed to change the commit * to use as reference. * * All object Ids in that service are RevTree ids, * and can be used for example in: * <pre> * val tw = new TreeWalk(repository) * tw.reset(objectId) * </pre> */ trait GitRevisionProvider { /** * Return the last RevTree objectId that * is accessible in the repository. */ def getAvailableRevTreeId : ObjectId /** * Return the commit currently used as the * "current version". */ def currentRevTreeId : ObjectId /** * Update the reference to current commit * to the provided one. */ def setCurrentRevTreeId(id:ObjectId) : Unit }
armeniaca/rudder
rudder-core/src/main/scala/com/normation/cfclerk/services/GitUtils.scala
Scala
gpl-3.0
2,846
package mypipe.util import org.slf4j.LoggerFactory object Lists { protected val log = LoggerFactory.getLogger(getClass) def processList[T]( list: List[T], listOp: (T) ⇒ Boolean, onError: (List[T], T) ⇒ Boolean ): Boolean = { list.forall(item ⇒ { val res = try { listOp(item) } catch { case e: Exception ⇒ log.error("Unhandled exception while processing list", e) onError(list, item) } if (!res) { // fail-fast if the error handler returns false onError(list, item) } else true }) } }
mardambey/mypipe
mypipe-api/src/main/scala/mypipe/util/Lists.scala
Scala
apache-2.0
600
package scuff.web import scuff.Document import scuff.js._ import scuff.concurrent.{ ResourcePool, Threads, UnboundedResourcePool } import java.io.Writer import java.net.URL import java.util.concurrent.ScheduledFuture import java.util.concurrent.ScheduledExecutorService import javax.script._ import scala.concurrent.duration._ import scala.util.control.NonFatal object CoffeeScriptServlet { import CoffeeScriptCompiler._ def LegacyConfig(engineCtor: () => ScriptEngine) = new Config( version = Version.Legacy, options = Map(bare -> false), newEngine = engineCtor, useDirective = Use.Strict) def CS2Config(engineCtor: () => ScriptEngine) = new Config( version = Version.CS2, options = Map(bare -> false), newEngine = engineCtor, useDirective = Use.Strict, compiler = Version.CS2.compiler _) def IcedConfig(engineCtor: () => ScriptEngine) = new Config( version = Version.Iced, options = Map(bare -> false, runtime -> "window"), newEngine = engineCtor, useDirective = Use.Strict, compiler = Version.Iced.compiler _) } /** * Perform on-the-fly conversion of CoffeeScript to JavaScript. * * Use with [[scuff.web.Ice]] for Iced CoffeeScript. */ abstract class CoffeeScriptServlet extends FileServlet { private lazy val ScriptEngineMgr = new ScriptEngineManager /** Number of compilers to initialize at startup. */ protected def initCompilers = 0 protected def engineName = "javascript" protected def newJavascriptEngine() = ScriptEngineMgr.getEngineByName(engineName) protected def newCoffeeCompiler() = new CoffeeScriptCompiler(CoffeeScriptServlet.CS2Config(newJavascriptEngine _)) private[this] val compilerPool = { implicit val lifecycle = ResourcePool.onEviction(onCompilerTimeout) { case NonFatal(_) => false } new UnboundedResourcePool(createCompiler, minResources = 0, description = engineName) } private def createCompiler = { val started = System.currentTimeMillis() val comp = newCoffeeCompiler() val dur = System.currentTimeMillis() - started log(s"Initialized $comp in $dur ms.") comp } private def onCompilerTimeout(comp: CoffeeScriptCompiler): Unit = { log(s"$comp instance removed from pool. ${compilerPool.availableCount} available.") } /** Compiler pool eviction scheduler. */ protected def evictionScheduler: Option[ScheduledExecutorService] @volatile private[this] var eviction: Option[ScheduledFuture[_]] = None override def init(): Unit = { super.init() val initCompilers = this.initCompilers if (initCompilers > 0) { Threads.onBlockingThread(s"$engineName-initializer") { for (_ <- 1 to initCompilers) { compilerPool push createCompiler } }.failed.foreach(th => log("Failure during compiler initialization", th))(Threads.PiggyBack) } this.eviction = evictionScheduler.map(compilerPool.startEviction(120.minutes, _)) } override def destroy(): Unit = { eviction.foreach(_.cancel(true)) compilerPool.drain() super.destroy() } protected def coffeeCompilation(coffeeScript: String, filename: String): String = compilerPool.use(_.compile(coffeeScript, filename)) private def compile(path: String, url: URL): String = { val started = System.currentTimeMillis() val script = url.openStream() try coffeeCompilation(script, path) finally { script.close() val dur = System.currentTimeMillis() - started log(s"Compiled $path in $dur ms.") } } protected def toDocument(path: String, url: URL): Document = new Document { def dump(out: Writer): Unit = out write compile(path, url) def mimeType: String = "application/javascript" def encoding: String = "UTF-8" } } trait Ice { self: CoffeeScriptServlet => final override def newCoffeeCompiler() = new CoffeeScriptCompiler(CoffeeScriptServlet.IcedConfig(newJavascriptEngine _)) }
nilskp/scuff
src/main/scala/scuff/web/CoffeeScriptServlet.scala
Scala
mit
3,929
package doodle import org.scalacheck.{Arbitrary, Gen} object arbitrary { import doodle.core._ import doodle.core.Color.{RGBA,HSLA} val genAngle: Gen[Angle] = Gen.choose(-36.0, 36.0).map { angle => Angle(angle) } val genPoint: Gen[Point] = for { x <- Gen.choose(-1000.0, 1000.0) y <- Gen.choose(-1000.0, 1000.0) } yield Point.cartesian(x, y) val genNormalized: Gen[Normalized] = Gen.choose(0.0, 1.0) map Normalized.clip val genUnsignedByte: Gen[UnsignedByte] = Gen.choose(0, 255) map (UnsignedByte.clip _) val genHSLA: Gen[HSLA] = for { h <- genAngle s <- genNormalized l <- genNormalized a <- genNormalized } yield HSLA(h.normalize, s, l, a) val genRGBA: Gen[RGBA] = for { r <- genUnsignedByte g <- genUnsignedByte b <- genUnsignedByte a <- genNormalized } yield RGBA(r, g, b, a) final case class Translate(x: Double, y: Double) final case class Scale(x: Double, y: Double) final case class Rotate(angle: Angle) /** The dimensions of a screen: positive integers greater than 0 and less than or equal to 4000. */ final case class Screen(width: Int, height: Int) val genScreen: Gen[Screen] = for { w <- Gen.choose(1, 4000) h <- Gen.choose(1, 4000) } yield Screen(w, h) implicit val arbitraryPoint: Arbitrary[Point] = Arbitrary( genPoint ) implicit val arbitraryAngle: Arbitrary[Angle] = Arbitrary( genAngle ) implicit val arbitraryScale: Arbitrary[Scale] = Arbitrary( genPoint.map { pt => Scale(pt.x, pt.y) } ) implicit val arbitraryRotate: Arbitrary[Rotate] = Arbitrary( genAngle.map { d => Rotate(d) } ) implicit val arbitraryTranslate: Arbitrary[Translate] = Arbitrary( genPoint.map { pt => Translate(pt.x, pt.y) } ) implicit val arbitraryScreen: Arbitrary[Screen] = Arbitrary( genScreen ) implicit val arbitraryHSLA: Arbitrary[HSLA] = Arbitrary( genHSLA ) implicit val arbitraryRGBA: Arbitrary[RGBA] = Arbitrary( genRGBA ) }
underscoreio/doodle
core/shared/src/test/scala/doodle/arbitrary.scala
Scala
apache-2.0
2,053
/* * Copyright (C) 2005, The Beangle Software. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.beangle.commons.conversion.converter import org.beangle.commons.conversion.Converter import org.beangle.commons.conversion.impl.ConverterFactory import java.{ util => ju } import scala.collection.{ immutable, mutable } object MapConverterFactory extends ConverterFactory[ju.Map[_, _], scala.collection.Map[_, _]] { register(classOf[mutable.Map[_, _]], new MapConverter(false)) register(classOf[collection.Map[_, _]], new MapConverter(false)) register(classOf[immutable.Map[_, _]], new MapConverter(true)) import scala.jdk.javaapi.CollectionConverters.asScala class MapConverter(immutable: Boolean) extends Converter[ju.Map[_, _], collection.Map[_, _]] { override def apply(it: ju.Map[_, _]): collection.Map[_, _] = { val result: collection.Map[_, _] = it match { case cm: ju.concurrent.ConcurrentMap[_, _] => asScala(cm) case p: ju.Properties => asScala(p) case m: ju.Map[_, _] => asScala(m) case null => null.asInstanceOf[collection.Map[_, _]] } if (immutable) if (null == result) null else result.toMap else result } } }
beangle/commons
core/src/main/scala/org/beangle/commons/conversion/converter/MapConverterFactory.scala
Scala
lgpl-3.0
1,872
package im.actor.server.bot import akka.actor._ import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } import akka.http.scaladsl.util.FastFuture import akka.pattern.pipe import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ ActorMaterializer, OverflowStrategy } import akka.util.Timeout import im.actor.botkit.BotBase import im.actor.config.ActorConfig import im.actor.server.dialog.DialogExtension import scala.concurrent.Future import scala.util.Failure private object InternalBot { final case class Initialized(authId: Long, authSid: Int) def start(props: Props)(implicit system: ActorSystem) = system.actorOf(ClusterSingletonManager.props( props, PoisonPill, ClusterSingletonManagerSettings(system) )) } abstract class InternalBot(userId: Int, nickname: String, name: String, isAdmin: Boolean) extends BotBase { import InternalBot._ import context.dispatcher private implicit val mat = ActorMaterializer()(context.system) override protected implicit val timeout = Timeout(ActorConfig.defaultTimeout) protected val botExt = BotExtension(context.system) protected val dialogExt = DialogExtension(context.system) init() def receive = { case Initialized(authId, authSid) ⇒ log.warning("Initialized bot {} {} {}", userId, nickname, name) val rqSource = Source.actorRef(100, OverflowStrategy.fail) .via(botExt.botServerBlueprint.flow(userId, authId, authSid)) .to(Sink.actorRef(self, Kill)) .run() setRqSource(rqSource) context become workingBehavior case Status.Failure(e) ⇒ log.error(e, "Failed to initialize bot") } private def init() = { log.warning("Initiating bot {} {} {}", userId, nickname, name) val existence = botExt.exists(userId) flatMap { exists ⇒ if (exists) { log.warning("Bot already exists") FastFuture.successful(()) } else { log.warning("Creating user {}", userId) botExt.create(userId, nickname, name, isAdmin) map (_ ⇒ ()) andThen { case Failure(e) ⇒ log.error(e, "Failed to create bot user") } } } (for { _ ← existence session ← botExt.getAuthSession(userId) } yield Initialized(session.authId, session.id)) pipeTo self } override protected def onStreamFailure(cause: Throwable): Unit = { log.error(cause, "Bot stream failure") throw cause } }
ufosky-server/actor-platform
actor-server/actor-bots/src/main/scala/im/actor/server/bot/InternalBot.scala
Scala
agpl-3.0
2,486
package basinet import scala.annotation.tailrec object LineWire { def isNewline(c: Character) = { val code = c.toInt if(code >= 10 && code <= 13) true else false } } package line { final class Source { private[this] var _builder = new StringBuilder private[this] var _complete = false private[this] var _endsWithCarriageReturn = false def pushable = !_complete def push(value: Character) { if(LineWire.isNewline(value)) { if(_endsWithCarriageReturn && value == '\\n') _endsWithCarriageReturn = false else { _complete = true if(value.toInt == 13) _endsWithCarriageReturn = true } } else { _builder.append(value); _endsWithCarriageReturn = false } } def push(buffer: java.nio.CharBuffer) { _builder.append(buffer) } def poppable = _complete def pop = { val result = _builder.toString _builder = new StringBuilder _complete = false result } def write(source: nio.BufferSource[java.nio.CharBuffer, Character]) { if(!pushable) return if(LineWire.isNewline(source.get(0))) push(source.pop) else { val buffer = source.buffer val start = source.buffer.position var position = start while(position < buffer.limit && !LineWire.isNewline(buffer.get(position))) position += 1 val limitedBuffer = buffer.duplicate limitedBuffer.limit(position) push(limitedBuffer) source.drop(position - start) } } } class Sink(newline: String) { private[this] var _string: String = null private[this] var _position = 0 def pushable = (_string == null) def push(value: String) { _string = value.concat(newline); _position = 0 } def poppable = (_string != null) def pop(buffer: java.nio.CharBuffer) = { val writeLength = scala.math.min(buffer.limit - buffer.position, _string.length - _position) buffer.append(_string, _position, _position + writeLength) _position += writeLength if(_position == _string.length) _string = null writeLength } } } class LineWriter extends Wire[nio.BufferSource[java.nio.CharBuffer, Character], any.BufferSink[String]] { type Source = nio.BufferSource[java.nio.CharBuffer, Character] type Sink = any.BufferSink[String] private[this] val buffer = new line.Source @tailrec private[this] def _write(from: Source, to: Sink): basinet.Result = { if(!to.pushable) return Result.OVERFLOW if(!from.poppable) return Result.UNDERFLOW buffer.write(from) if(buffer.poppable) to.push(buffer.pop) _write(from, to) } override def _convert(from: Source, to: Sink) = _write(from, to) } class LineReader(newline: String) extends Wire[any.BufferSource[String], nio.BufferSink[java.nio.CharBuffer, Character]] { type Source = any.BufferSource[String] type Sink = nio.BufferSink[java.nio.CharBuffer, Character] private[this] val buffer = new line.Sink(newline) @tailrec private[this] def _write(from: Source, to: Sink): Result = { if(!to.pushable) return Result.OVERFLOW if(!from.poppable && !buffer.poppable) return Result.UNDERFLOW if(buffer.pushable) buffer.push(from.pop) val writen = buffer.pop(to.buffer.duplicate) to.drop(writen) _write(from, to) } override def _convert(from: Source, to: Sink) = _write(from, to) }
sopindm/basinet
scala/LineWire.scala
Scala
epl-1.0
3,442
package wow.realm.protocol.payloads import wow.realm.protocol._ import scodec.Codec import scodec.codecs._ /** * Server time sync request */ case class ServerTimeSyncRequest(number: Long) extends Payload with ServerSide { require(number >= 0) } object ServerTimeSyncRequest { implicit val opCodeProvider: OpCodeProvider[ServerTimeSyncRequest] = OpCodes.STimeSyncRequest implicit val codec: Codec[ServerTimeSyncRequest] = ("number" | uint32L).as[ServerTimeSyncRequest] }
SKNZ/SpinaciCore
wow/core/src/main/scala/wow/realm/protocol/payloads/ServerTimeSyncRequest.scala
Scala
mit
484
package chandu0101.scalajs.react.components.util import japgolly.scalajs.react.TopNode import org.scalajs.dom.Event import scala.scalajs.js import scala.scalajs.js.Dynamic.{global => g} object CssEvents { def testSupportedProps(props: Map[String, String]) = { val el = g.document.createElement("div") props.view.filter { case (key, value) => !js.isUndefined(el.style.selectDynamic(key)) }.head._2 } def transitionEndEventName = testSupportedProps(Map( "transition" -> "transitionend", "OTransition" -> "otransitionend", "MozTransition" -> "transitionend", "WebkitTransition" -> "webkitTransitionEnd" )) def animationEndEventName = testSupportedProps(Map( "animation" -> "animationend", "-o-animation" -> "oAnimationEnd", "-moz-animation" -> "animationend", "-webkit-animation" -> "webkitAnimationEnd" )) def onTransitionEnd(element: TopNode, callback: Function1[Event, _]) = { Events.once(element, transitionEndEventName, callback) } def onAnimationEnd(element: TopNode, callback: Function1[Event, _]) = { Events.once(element, animationEndEventName, callback) } }
mproch/scalajs-react-components
core/src/main/scala/chandu0101/scalajs/react/components/util/CssEvents.scala
Scala
apache-2.0
1,143
package latis.dm import latis.data.Data import latis.metadata.Metadata import java.nio.ByteBuffer import latis.util.DataUtils /** * A single variable (Scalar) that represents an arbitrary binary 'blob'. */ trait Binary extends Scalar object Binary { /** * Construct a Binary variable from a ByteBuffer. * Make sure it has been flipped in case the client was lazy. * Update the length metadata assuming that the 8 byte * termination marker (DataUtils.nullMark) is present. */ def apply(buffer: ByteBuffer): Binary = { //If the position is not 0, assume that the buffer hasn't had its limit set. //Set the limit and rewind. if (buffer.position() > 0) buffer.flip //define length in metadata //Note: assume bytes already have termination marker //TODO: check before assuming, but further slow down val length = buffer.capacity - DataUtils.nullMark.length //length is the maximum number of valid bytes (not counting the termination marker) val md = Metadata(Map("length" -> length.toString)) new AbstractScalar(md, Data(buffer)) with Binary } /** * Construct a Binary variable from an array of bytes. */ def apply(bytes: Array[Byte]): Binary = Binary(ByteBuffer.wrap(bytes)) /** * Construct a Binary variable from a String. */ def apply(string: String): Binary = Binary(string.getBytes) /** * Construct a Binary variable with no data. * Used as a template during Dataset construction. */ def apply(md: Metadata): Binary = new AbstractScalar(md) with Binary /** * Construct a Binary variable with Metadata and Data. * This will trust whatever length is defined in the Metadata * so the user has the ability to break this, but also the flexibility * on whether to include the byte array termination marker (DataUtils.nullMark). */ def apply(md: Metadata, data: Data): Binary = { new AbstractScalar(md, data) with Binary } /** * Construct a Binary variable from a ByteBuffer with Metadata. * Make sure it has been flipped in case the client was lazy. * This will trust whatever length is defined in the Metadata * so the user has the ability to break this, but also the flexibility * on whether to include the byte array termination marker (DataUtils.nullMark). */ def apply(md: Metadata, buffer: ByteBuffer): Binary = { //If the position is not 0, assume that the buffer hasn't had its limit set. //Set the limit and rewind. if (buffer.position() > 0) buffer.flip new AbstractScalar(md, Data(buffer)) with Binary } /** * Expose the data represented by this Variable as an array of bytes. */ def unapply(b: Binary): Option[Array[Byte]] = { val bb = b.getData.getByteBuffer bb.rewind //make sure we are starting at the beginning val bytes = bb.array //copy to protect immutability, potential performance hit Some(bytes.clone) } }
dlindhol/LaTiS
src/main/scala/latis/dm/Binary.scala
Scala
epl-1.0
2,953
package com.scrumwall.dao.item import com.scrumwall.domain.Item import com.scrumwall.domain.Column import com.scrumwall.dao.BaseDao import com.scrumwall.identifiers.ItemRemoveMode import java.util.List trait ItemDao extends BaseDao { def get(itemId: Int) : Item /** * Based on whether the item has id or not, either saves or updates it. */ def save(item: Item) : Item def getItems(columnId: Int) : List[Item] def getForSprint(sprintId: Int) : List[Item] def remove(id: Int) /** * @see ItemService.moveFromColumn() */ def moveFromColumn(column: Column, direction: ItemRemoveMode) def removeFromColumn(columnId: Int) }
johnnyyen/scrumwall
src/main/java/com/scrumwall/dao/item/ItemDao.scala
Scala
gpl-3.0
646
package my.delite.framework.codegen.cuda import my.delite.framework.codegen.MyTarget trait MyTargetCuda extends MyTarget { import IR._ val name = "Cuda" }
alsam/simple-delite-project
src/cudagen/simple/mydelite/MyTargetCuda.scala
Scala
mit
162
import scala.util.parsing.combinator._ class ExprParser extends RegexParsers { val number = "[0-9]+".r def expr: Parser[Int] = term ~ rep( ("+" | "-") ~ term ^^ { case "+" ~ t => t case "-" ~ t => -t }) ^^ { case t ~ r => t + r.sum } def term: Parser[Int] = factor into { first => rep("*" ~> factor) ^^ { first * _.product } } def factor: Parser[Int] = log(number)("number") ^^ { _.toInt } | "(" ~> expr <~ ")" } object Main extends App { val parser = new ExprParser val result = parser.parseAll(parser.expr, "3-4*5") if (result.successful) println(result.get) }
yeahnoob/scala-impatient-2e-code
src/ch20/sec07/ExprParser.scala
Scala
gpl-3.0
609
package de.frosner.ddq.reporters import java.io.PrintStream import de.frosner.ddq.core.CheckResult /** * A class which produces a markdown report of [[CheckResult]]. * * @param stream The [[java.io.PrintStream]] to put the output. The stream will not be closed internally and can * be reused. **/ case class MarkdownReporter(stream: PrintStream) extends HumanReadableReporter { /** * Output markdown report of a given checkResult to the stream passed to the constructor * @param checkResult The [[CheckResult]] to be reported */ override def report(checkResult: CheckResult, header: String, prologue: String): Unit = { stream.println(s"**$header**\n") stream.println(s"$prologue\n") if (checkResult.constraintResults.nonEmpty) { checkResult.constraintResults.foreach { case (_, constraintResult) => stream.println(s"- *${constraintResult.status.stringValue.toUpperCase}*: " + constraintResult.message) } } else { stream.println("Nothing to check!") } stream.println("") } }
FRosner/drunken-data-quality
src/main/scala/de/frosner/ddq/reporters/MarkdownReporter.scala
Scala
apache-2.0
1,073
package com.twitter.finagle.filter import com.twitter.concurrent.AsyncSemaphore import com.twitter.finagle._ import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver} import com.twitter.util.{Future, Return, Throw} object RequestSemaphoreFilter { val role = Stack.Role("RequestConcurrencyLimit") case class Param(sem: Option[AsyncSemaphore]) { def mk(): (Param, Stack.Param[Param]) = (this, Param.param) } object Param { implicit val param = Stack.Param(Param(None)) } } /** * A [[com.twitter.finagle.Filter]] that restricts request concurrency according * to the given [[com.twitter.concurrent.AsyncSemaphore]]. Requests that are * unable to acquire a permit are failed immediately with a [[com.twitter.finagle.Failure]] * that signals a restartable or idempotent process. */ class RequestSemaphoreFilter[Req, Rep](sem: AsyncSemaphore, stats: StatsReceiver) extends SimpleFilter[Req, Rep] { def this(sem: AsyncSemaphore) = this(sem, NullStatsReceiver) private[this] val requestConcurrency = { val max = sem.numInitialPermits stats.addGauge("request_concurrency") { max - sem.numPermitsAvailable } } private[this] val requestQueueSize = stats.addGauge("request_queue_size") { sem.numWaiters } def apply(req: Req, service: Service[Req, Rep]): Future[Rep] = sem.acquire().transform { case Return(permit) => service(req).ensure { permit.release() } case Throw(noPermit) => Future.exception(Failure.rejected(noPermit)) } }
luciferous/finagle
finagle-core/src/main/scala/com/twitter/finagle/filter/RequestSemaphoreFilter.scala
Scala
apache-2.0
1,518
package org.jetbrains.plugins.scala package lang package psi package api package expr import com.intellij.psi.PsiElement import org.jetbrains.plugins.scala.lang.psi.api.statements.params._ /** * @author Alexander Podkhalyuzin, ilyas */ trait ScFunctionExpr extends ScExpression with ScControlFlowOwner { def parameters: Seq[ScParameter] def params: ScParameters def result: Option[ScExpression] def hasParentheses: Boolean def leftParen: Option[PsiElement] def rightParen: Option[PsiElement] def isContext: Boolean override protected def acceptScala(visitor: ScalaElementVisitor): Unit = { visitor.visitFunctionExpression(this) } } object ScFunctionExpr { def unapply(it: ScFunctionExpr): Some[(Seq[ScParameter], Option[ScExpression])] = Some(it.parameters, it.result) }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScFunctionExpr.scala
Scala
apache-2.0
813
package org.workcraft.dom.visual.connections import java.awt.Color import java.awt.Graphics2D import java.awt.Shape import java.awt.geom.Point2D import java.awt.geom.Rectangle2D import org.workcraft.graphics.ColorisableGraphicalContent import org.workcraft.graphics.Touchable import org.workcraft.scala.Expressions._ import org.workcraft.graphics.DrawRequest import org.workcraft.graphics.Coloriser import org.workcraft.graphics.TouchableC import org.workcraft.graphics.BoundingBox import org.workcraft.graphics.PartialCurveInfo import org.workcraft.graphics.VisualCurveProperties import java.awt.geom.Path2D import java.awt.geom.AffineTransform import java.awt.BasicStroke import org.workcraft.graphics.Geometry.buildConnectionCurveInfo import org.workcraft.graphics.BoundedColorisableGraphicalContent import org.workcraft.graphics.GraphicalContent import org.workcraft.graphics.Java2DDecoration._ import org.workcraft.graphics.Colorisation object VisualConnectionGui { val HitThreshold = 0.2 def drawArrowHead(g: Graphics2D, color: Color, headPosition: Point2D, orientation: Double, length: Double, width: Double) = { val arrowShape = new Path2D.Double() arrowShape.moveTo(-length, -width / 2) arrowShape.lineTo(-length, width / 2) arrowShape.lineTo(0, 0) arrowShape.closePath() val arrowTransform = new AffineTransform() arrowTransform.translate(headPosition.getX(), headPosition.getY()) arrowTransform.rotate(orientation) val transformedArrowShape = arrowTransform.createTransformedShape(arrowShape) g.setColor(color) g.setStroke(new BasicStroke(width.toFloat)) g.fill(transformedArrowShape) } def arrowHead(color: Color, headPosition: Point2D, orientation: Double, length: Double, width: Double) = { val arrowShape = new Path2D.Double() arrowShape.moveTo(-length, -width / 2) arrowShape.lineTo(-length, width / 2) arrowShape.lineTo(0, 0) arrowShape.closePath() val arrowTransform = new AffineTransform() arrowTransform.translate(headPosition.getX(), headPosition.getY()) arrowTransform.rotate(orientation) val transformedArrowShape = arrowTransform.createTransformedShape(arrowShape) BoundedColorisableGraphicalContent( ColorisableGraphicalContent(colorisation => GraphicalContent(g => { g.setColor(Coloriser.colorise(color, colorisation.foreground)); g.setStroke(new BasicStroke(width.toFloat)); g.fill(transformedArrowShape) })), BoundingBox(transformedArrowShape.getBounds2D())) } case class ExprConnectionGui( shape: Expression[Touchable], graphicalContent: Expression[ColorisableGraphicalContent], parametricCurve: Expression[org.workcraft.graphics.ParametricCurve]) def makeConnectionTouchable(curve: org.workcraft.graphics.ParametricCurve, partial: PartialCurveInfo): TouchableC = new TouchableC(new Touchable { override def hitTest(point: Point2D.Double) = { val nearestT = curve.nearestPointT(point) nearestT < partial.tEnd && nearestT > partial.tStart && (curve.pointOnCurve(nearestT).distance(point) < HitThreshold) } override def boundingBox = BoundingBox(curve.boundingBox) }, curve.pointOnCurve(0.5)) def makeGraphicalContent(cInfo: PartialCurveInfo, connProps: VisualCurveProperties, connectionShape: Shape) = { new ColorisableGraphicalContent { override def draw(r: DrawRequest) { val g = r.graphics val color = Coloriser.colorise(connProps.color, r.colorisation.foreground) g.setColor(color) g.setStroke(connProps.stroke) g.draw(connectionShape) connProps.arrow.foreach(arrow => drawArrowHead(g, color, cInfo.arrowHeadPosition, cInfo.arrowOrientation, arrow.length, arrow.width)) } } } def getConnectionGui(properties: VisualCurveProperties, context: VisualConnectionContext, data: StaticVisualConnectionData) = { val curve = data match { case Polyline(cps) => PolylineGui.makeCurve(properties, context, cps) case Bezier(cp1, cp2) => BezierGui.makeCurve(context, cp1, cp2) } val curveInfo = buildConnectionCurveInfo(properties.arrow, context.c1.touchable, context.c2.touchable, curve, 0) val visiblePath = curve.shape(curveInfo.tStart, curveInfo.tEnd) val gc = makeGraphicalContent(curveInfo, properties, visiblePath) val touchable = makeConnectionTouchable(curve, curveInfo) new ConnectionGui(touchable, gc, curve) } }
tuura/workcraft-2.2
ScalaGraphEditorUtil/src/main/scala/org/workcraft/dom/visual/connections/VisualConnectionGui.scala
Scala
gpl-3.0
4,458
package org.jetbrains.plugins.scala.lang.parameterInfo.functionParameterInfo class FunctionParameterInfoConstructorsTest extends FunctionParameterInfoTestBase { override def getTestDataPath: String = s"${super.getTestDataPath}constructors/" def testAnnotations(): Unit = doTest() def testMemberModifiers(): Unit = doTest() def testCaseClass(): Unit = doTest() def testGenericScalaConstructor(): Unit = doTest() def testJavaConstructor(): Unit = doTest() def testNamingCaseClass(): Unit = doTest() def testScalaConstructor(): Unit = doTest() def testScalaConstructorA(): Unit = doTest() def testScalaConstructorB(): Unit = doTest() def testScalaConstructorC(): Unit = doTest() def testScalaConstructorD(): Unit = doTest() def testScalaConstructorE(): Unit = doTest() def testSelfInvocation(): Unit = doTest() def testThisScalaConstructor(): Unit = doTest() def testAliasedConstructor(): Unit = doTest() }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/parameterInfo/functionParameterInfo/FunctionParameterInfoConstructorsTest.scala
Scala
apache-2.0
956
object ScalaHelloWorld extends App{ println("Hello World from Scala!!!") }
javavsscala/compare
day-1/src/main/scala/ScalaHelloWorld.scala
Scala
apache-2.0
78
package com.arcusys.valamis.lesson.service import java.io.File import com.arcusys.valamis.lesson.model.Lesson /** * Created by mminin on 16.02.16. */ trait CustomPackageUploader { def isValidPackage(fileName: String, packageFile: File): Boolean def upload(title: String, description: String, packageFile: File, courseId: Long, userId: Long, fileName: String): Lesson }
arcusys/Valamis
valamis-lesson/src/main/scala/com/arcusys/valamis/lesson/service/CustomPackageUploader.scala
Scala
gpl-3.0
447
package io.buoyant.linkerd package protocol import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.core.{JsonParser, TreeNode} import com.fasterxml.jackson.databind.annotation.JsonDeserialize import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonNode} import com.twitter.conversions.storage._ import com.twitter.finagle.buoyant.h2.{LinkerdHeaders, Request, Response} import com.twitter.finagle.buoyant.h2.param._ import com.twitter.finagle.client.StackClient import com.twitter.finagle.{Path, Stack, param} import com.twitter.util.Monitor import io.buoyant.config.PolymorphicConfig import io.buoyant.linkerd.protocol.h2.ResponseClassifiers import io.buoyant.router.{ClassifiedRetries, H2, RoutingFactory} import io.netty.handler.ssl.ApplicationProtocolNames import scala.collection.JavaConverters._ import scala.util.control.NonFatal class H2Initializer extends ProtocolInitializer.Simple { val name = "h2" val configClass = classOf[H2Config] override val experimentalRequired = true protected type Req = Request protected type Rsp = Response protected val defaultRouter = { // retries can't share header mutations val pathStack = H2.router.pathStack .insertAfter(ClassifiedRetries.role, h2.DupRequest.module) .prepend(LinkerdHeaders.Dst.PathFilter.module) // I think we can safely ignore the DelayedRelease module (as // applied by finagle-http), since we don't ever run in // FactoryToService mode? // // .replace(StackClient.Role.prepFactory, DelayedRelease.module) val boundStack = H2.router.boundStack .prepend(LinkerdHeaders.Dst.BoundFilter.module) val clientStack = H2.router.clientStack .insertAfter(StackClient.Role.prepConn, LinkerdHeaders.Ctx.clientModule) // .replace(HttpTraceInitializer.role, HttpTraceInitializer.clientModule) // .insertAfter(Retries.Role, http.StatusCodeStatsFilter.module) H2.router .withPathStack(pathStack) .withBoundStack(boundStack) .withClientStack(clientStack) } private[this] val monitor = Monitor.mk { case NonFatal(_) => true } protected val defaultServer = H2.server.withStack(H2.server.stack .prepend(LinkerdHeaders.Ctx.serverModule) .prepend(h2.ErrorReseter.module)) .configured(param.Monitor(monitor)) override def clearServerContext(stk: ServerStack): ServerStack = { // Does NOT use the ClearContext module that forcibly clears the // context. Instead, we just strip out headers on inbound requests. stk.replace(LinkerdHeaders.Ctx.serverModule.role, LinkerdHeaders.Ctx.clearServerModule) } override def defaultServerPort: Int = 4142 } object H2Initializer extends H2Initializer class H2Config extends RouterConfig { var client: Option[H2ClientConfig] = None var servers: Seq[H2ServerConfig] = Nil @JsonDeserialize(using = classOf[H2IdentifierConfigDeserializer]) var identifier: Option[Seq[H2IdentifierConfig]] = None @JsonIgnore override def baseResponseClassifier = ResponseClassifiers.NonRetryableServerFailures .orElse(super.baseResponseClassifier) // TODO: gRPC (trailers-aware) @JsonIgnore override def responseClassifier = ResponseClassifiers.NonRetryableStream(super.responseClassifier) @JsonIgnore override val protocol: ProtocolInitializer = H2Initializer @JsonIgnore override def routerParams: Stack.Params = super.routerParams + identifierParam private[this] def identifierParam: H2.Identifier = identifier match { case None => h2.HeaderTokenIdentifier.param case Some(configs) => H2.Identifier { params => val identifiers = configs.map(_.newIdentifier(params)) RoutingFactory.Identifier.compose(identifiers) } } } trait H2EndpointConfig { var initialStreamWindowBytes: Option[Int] = None var headerTableBytes: Option[Int] = None var maxFrameBytes: Option[Int] = None var maxHeaderListBytes: Option[Int] = None var windowUpdateRatio: Option[Double] = None def withEndpointParams(params: Stack.Params): Stack.Params = params .maybeWith(windowUpdateRatio.map(r => FlowControl.WindowUpdateRatio(r.toFloat))) .maybeWith(headerTableBytes.map(s => Settings.HeaderTableSize(Some(s.bytes)))) .maybeWith(initialStreamWindowBytes.map(s => Settings.InitialStreamWindowSize(Some(s.bytes)))) .maybeWith(maxFrameBytes.map(s => Settings.MaxFrameSize(Some(s.bytes)))) .maybeWith(maxHeaderListBytes.map(s => Settings.MaxHeaderListSize(Some(s.bytes)))) } class H2ClientConfig extends ClientConfig with H2EndpointConfig { @JsonIgnore override def clientParams = withEndpointParams(super.clientParams) } class H2ServerConfig extends ServerConfig with H2EndpointConfig { var maxConcurrentStreamsPerConnection: Option[Int] = None @JsonIgnore override val alpnProtocols: Option[Seq[String]] = Some(Seq(ApplicationProtocolNames.HTTP_2)) override def withEndpointParams(params: Stack.Params): Stack.Params = super.withEndpointParams(params) .maybeWith(maxConcurrentStreamsPerConnection.map(c => Settings.MaxConcurrentStreams(Some(c.toLong)))) @JsonIgnore override def serverParams = withEndpointParams(super.serverParams) } abstract class H2IdentifierConfig extends PolymorphicConfig { @JsonIgnore def newIdentifier(params: Stack.Params): RoutingFactory.Identifier[Request] } class H2IdentifierConfigDeserializer extends JsonDeserializer[Option[Seq[H2IdentifierConfig]]] { override def deserialize( p: JsonParser, _c: DeserializationContext ): Option[Seq[H2IdentifierConfig]] = { val codec = p.getCodec val klass = classOf[H2IdentifierConfig] codec.readTree[TreeNode](p) match { case n: JsonNode if n.isArray => Some(n.asScala.toList.map(codec.treeToValue(_, klass))) case node => Some(Seq(codec.treeToValue(node, klass))) } } override def getNullValue(_c: DeserializationContext): Option[Seq[H2IdentifierConfig]] = None }
hhtpcd/linkerd
linkerd/protocol/h2/src/main/scala/io/buoyant/linkerd/protocol/H2Config.scala
Scala
apache-2.0
5,996
package org.ucombinator.jaam.main import org.rogach.scallop._ import java.io._ import scala.collection.JavaConverters._ import org.ucombinator.jaam.util.Log class Main(args : Seq[String]) extends ScallopConf(args = args) with JaamConf { shortSubcommandsHelp(true) // TODO: version: jvm version? Jaam-file header hash? banner("Usage: jaam-tools [subcommand] [options]") // TODO: short summary of each subcommand (w/ no options) in --help addSubcommand(Visualizer) addSubcommand(Interpreter) addSubcommand(Agent) addSubcommand(Cat) addSubcommand(App) addSubcommand(Coverage) addSubcommand(Coverage2) addSubcommand(Decompile) addSubcommand(FindSubcommand$) addSubcommand(Info) addSubcommand(ListItems) addSubcommand(Loop3) addSubcommand(LoopConditions) addSubcommand(LoopIdentifier) addSubcommand(RegExDriver) addSubcommand(MissingReturns) addSubcommand(Print) addSubcommand(Validate) addSubcommand(DecompileToFile) addSubcommand(Taint3) addSubcommand(SystemProperties) verify() } object Main extends App { def conf: Main = new Main(this.args) conf.subcommand.asInstanceOf[Option[Subcommand]] match { case None => println("ERROR: No subcommand specified") case Some(m : Subcommand) => if (m.waitForUser()) { print("Press enter to start.") scala.io.StdIn.readLine() } Log.setLogging(m.logLevel()) Log.color = m.color() m.run() } } abstract class Subcommand(name: String) extends org.rogach.scallop.Subcommand(name) with JaamConf { // TODO: describe() def run(): Unit val waitForUser: ScallopOption[Boolean] = toggle( // TODO: note: `descrYes` is useful for debugging and profiling descrYes = "wait for user to press enter before starting (default: off)", noshort = true, default = Some(false)) val color: ScallopOption[Boolean] = toggle(default = Some(true)) val logLevel: ScallopOption[Log.Level] = enum( descr = "the level of logging verbosity", default = Some("warn"), argType = "log level", elems = scala.collection.immutable.ListMap(Log.levels.map(l => l.name -> l) :_*)) } /**************** * Subcommands * ****************/ object Visualizer extends Subcommand("visualizer") { val input = inputOpt(required = false) def run() { import javafx.application.Application Application.launch(classOf[org.ucombinator.jaam.visualizer.main.Main], input():_*) } } object Interpreter extends Subcommand("interpreter") { // TODO: banner("") // TODO: how do we turn off sorting of options by name? import org.ucombinator.jaam.interpreter.AbstractState import scala.collection.immutable val classpath = opt[String](required = true, short = 'P', descr = "the TODO class directory") val rtJar = opt[String](required = true, short = 'J', descr = "the rt.jar file") val mainClass = opt[String](required = true, short = 'c', descr = "the main class") val method = opt[String](required = true, short = 'm', descr = "the main method", default = Some("main")) val libClasses = opt[String](short = 'L', descr = "app's library classes") val _outfile = opt[String](name = "outfile", short = 'o', descr = "the output file for the serialized data") def outfile() = _outfile.getOrElse(mainClass() + ".jaam") // TODO: extend scallop to do this for us val globalSnowflakeAddrLast = toggle( descrYes = "process states that read from the `GlobalSnowflakeAddr` last (default: on)", noshort = true, default = Some(true)) val stateOrdering = enum[Boolean => Ordering[AbstractState]]( default = Some("max"), argType = "ordering", elems = immutable.ListMap( "none" -> StateOrdering.none, "max" -> StateOrdering.max // TODO: state orderings: min insertion reverseInsertion )) val maxSteps = opt[Int](descr = "maximum number of interpretation steps") val stringTop = toggle(default = Some(true)) val snowflakeLibrary = toggle(default = Some(true)) val exceptions = toggle(default = Some(true)) val initialStore = opt[java.io.File]() val analyzer = enum[Analyzer]( descr = "which analyzer to use", default = Some("cha"), argType = "analyzer", elems = immutable.ListMap( "aam" -> AAM, "cha" -> CHA)) sealed trait Analyzer {} final case object AAM extends Analyzer {} final case object CHA extends Analyzer {} object StateOrdering { def none(b: Boolean) = new Ordering[AbstractState] { override def compare(x: AbstractState, y: AbstractState): Int = 0 } def max(b: Boolean) = new Ordering[AbstractState] { override def compare(x: AbstractState, y: AbstractState): Int = // TODO: use `b` if (x.id < y.id) { -1 } else if (x.id == y.id) { 0 } else { +1 } } } def run() { analyzer() match { // TODO: as separate commands case AAM => org.ucombinator.jaam.interpreter.Main.aam() case CHA => org.ucombinator.jaam.interpreter.Main.cha() } } } object Agent extends Subcommand("agent") { val arguments = trailArg[List[String]](default = Some(List())) def run() { // Agent launches a new process because we want to load only core JVM classes var java = System.getProperty("java.home") + File.separator + "bin" + File.separator + "java" if (System.getProperty("os.name").toLowerCase().contains("win")) { java += ".exe" } val agentClass = classOf[org.ucombinator.jaam.agent.Main] val jar = agentClass.getProtectionDomain.getCodeSource.getLocation.toURI.getPath val cmd = List( java, "-javaagent:" + jar, "-classpath", jar, agentClass.getCanonicalName) ++ arguments() System.exit(new ProcessBuilder(cmd.asJava).inheritIO().start().waitFor()) } } object Cat extends Subcommand("cat") { banner("Combine multile JAAM files into a single, cohesive file.") footer("") val input = inputOpt() val output = outputOpt() def run() { org.ucombinator.jaam.tools.cat.Cat.concatenateFiles(input(), output()) } } object App extends Subcommand("app") { banner("TODO") footer("") val input = opt[List[String]](descr = "class files, or directories (origin is auto-detected)", default = Some(List())) val app = opt[List[String]](short = 'a', descr = "application jars, class files, or directories", default = Some(List())) val lib = opt[List[String]](short = 'l', descr = "library jars, class files, or directories", default = Some(List())) val jvm = opt[List[String]](short = 'r', descr = "Java runtime jars, class files, or directories", default = Some(List())) val appPackage = opt[List[String]](short = 'p', descr = "package prefixes contain application code (the empty list allows all)", default = Some(List())) val defaultJvm = toggle(default = Some(true)) val detectMain = toggle(default = Some(true)) val mainClass = opt[String](short = 'c', descr = "the main class") val mainMethod = opt[String](short = 'm', descr = "the main method") val output = outputOpt() // TODO: val java-8-rt (in resource?) def run() { org.ucombinator.jaam.tools.app.Main.main(input(), app(), lib(), jvm(), appPackage(), defaultJvm(), detectMain(), mainClass.toOption, mainMethod.toOption, output()) } } object Coverage extends Subcommand("coverage") { banner("Analyze a JAAM file against target JAR files to find JAAM coverage.") footer("") val jaamFile = trailArg[java.io.File](descr = "The JAAM file to analyze") val jars = trailArg[String](descr = "Colon-separated list of JAR files to directly compare coverage against") val additionalJars = opt[String](descr = "Colon-separated list of JAR files to complete class loading for inspection JAR files") def run() { org.ucombinator.jaam.tools.coverage.Coverage.findCoverage(jaamFile().toString, jars().split(":"), JaamConf.extractSeqFromOptString(additionalJars)) } } object Coverage2 extends Subcommand("coverage2") { banner("Analyze a JAAM file against target JAR files to find JAAM coverage.") footer("") val rtJar = trailArg[String](descr = "The RT.jar file to use for analysis") val jaamFile = trailArg[java.io.File](descr = "The JAAM file to analyze") val mainClass = trailArg[String](descr = "The name of the main class in the JAAM file") val jars = trailArg[String](descr = "Colon separated list of JAR files to directly compare coverage against") val additionalJars = opt[String](descr = "Colon-separated list of JAR files to complete class loading for inspection JAR files") def run() { org.ucombinator.jaam.tools.coverage2.Coverage2.main(rtJar(), jaamFile().toString, mainClass(), jars().split(":"), JaamConf.extractSeqFromOptString(additionalJars)) } } object Decompile extends Subcommand("decompile") { banner("TODO") footer("") // val append = toggle( // descrYes = "wait for user to press enter before starting (default: off)", // noshort = true, prefix = "no-", default = Some(false)) val jvm = toggle( descrYes = "wait for user to press enter before starting (default: off)", noshort = true, default = Some(false)) val lib = toggle( descrYes = "wait for user to press enter before starting (default: off)", noshort = true, default = Some(false)) val app = toggle( descrYes = "wait for user to press enter before starting (default: true)", noshort = true, default = Some(true)) val ignoreOverflow = toggle( noshort = true, default = Some(false)) val exclude = opt[List[String]](descr = "Class names to omit", default = Some(List())) val input = inputOpt() val output = outputOpt() def run() { org.ucombinator.jaam.tools.decompile.Main.main(input(), output(), exclude(), jvm(), lib(), app(), ignoreOverflow()) } } object FindSubcommand$ extends Subcommand("find-main") { banner("Attempt to find the Main class from which to run the JAR file") footer("") val showerrs = opt[Boolean](name = "show-errors", short = 's', descr = "Show errors for unloadable classes") val force = opt[Boolean](name = "force-possibilities", short = 'f', descr = "Show all possibilities found manually, even if a main class is found in the manifest") val verifymanual = opt[Boolean](name = "validate", short = 'v', descr = "Check potential Main classes for a valid `main` method") val anyClass = opt[Boolean](descr = "Check all classes not just those named Main") val jars = trailArg[String](descr = "Colon-separated list of JAR files to directly search for `main` methods") def run() { org.ucombinator.jaam.tools.findmain.FindMain.main(jars().split(":"), showerrs(), force(), verifymanual(), anyClass()) } } object Info extends Subcommand("info") { banner("Get simple information about a JAAM interpretation.") footer("") val file = trailArg[java.io.File](descr = "a .jaam file to be analyzed") def run() { org.ucombinator.jaam.tools.info.Info.analyzeForInfo(file().toString) } } object ListItems extends Subcommand("list") { banner("List all classes and methods in the JAR file") footer("") val noclasses = opt[Boolean](descr = "Do not print all classes") val nomethods = opt[Boolean](descr = "Do not print all methods") val jarFile = trailArg[java.io.File](descr = "The .jar file to analyze") def run() { org.ucombinator.jaam.tools.listitems.ListItems.main(jarFile().toString, org.ucombinator.jaam.tools.listitems.ListPrintOption(!noclasses(), !nomethods())) } } object Loop3 extends Subcommand("loop3") { //val classpath = opt[List[String]](descr = "TODO") val input = inputOpt() val output = outputOpt() val prune = toggle( descrYes = "Remove methods without outgoing edges from graph", descrNo = "Do not remove methods without outgoing edges from graph", default = Some(true)) val shrink = toggle(descrYes = "Skip methods without loops", descrNo = "Include methods without loops", default = Some(true)) val prettyPrint = toggle(descrYes = "Pretty print found loops", default = Some(false)) def run() { //Main.main(classpath.getOrElse(List())) org.ucombinator.jaam.tools.loop3.Main.main(input(), output(), prune(), shrink(), prettyPrint()) } } object LoopConditions extends Subcommand("loop-conditions") { val input = inputOpt() val `class` = opt[String]() def run() { org.ucombinator.jaam.tools.loopConditions.Main.main(input(), `class`.toOption) } } object LoopIdentifier extends Subcommand("loopident") { val input = inputOpt() val printBodies = toggle( descrYes = "Print out the bodies of methods which contain loops", descrNo = "Do not print the bodies of methods [default]", default = Some(false) ) val printStatements = toggle( descrYes = "Print out the statements belonging to each loop", descrNo = "Do not print out the statements for each loop [default]", default = Some(false) ) val skipExceptions = toggle( descrYes = "Skip false loops generated by certain types of exception handling [default]", descrNo = "Include analysis for false loops generated by certain types of exception handling", default = Some(true) ) def run(): Unit = { org.ucombinator.jaam.tools.loopidentifier.Main.main(input(), printBodies(), printStatements(), skipExceptions()) } } object RegExDriver extends Subcommand("driver") { val input = inputOpt() val className = opt[String](descr = "") val methodName = opt[String](descr = "") val showStmts = opt[Boolean](descr = "") val showUnidentified = opt[Boolean](descr = "") def run(): Unit = { org.ucombinator.jaam.tools.regex_driver.Main.main(input(), className.toOption, methodName.toOption, showStmts(), showUnidentified()) } } object MissingReturns extends Subcommand("missing-returns") { banner("Find calls with no matching return") footer("") val jaamFile = trailArg[java.io.File](descr = "The JAAM file to analyze") def run() { org.ucombinator.jaam.tools.missingreturns.MissingReturns.missingReturns(jaamFile().toString) } } object Print extends Subcommand("print") { banner("Print a JAAM file in human-readable format") footer("") val state = opt[Int](argName = "state id", descr = "a specific state ID to print") val file = trailArg[java.io.File](descr = "a .jaam file to be printed") def run() { state.toOption match { case None => org.ucombinator.jaam.tools.printer.Print.printFile(file().toString) case Some(st) => org.ucombinator.jaam.tools.printer.Print.printNodeFromFile(file().toString, st) } } } object Validate extends Subcommand("validate") { banner("Amend an aborted JAAM serialization to allow reading.") footer("") val fixEof = opt[Boolean](descr = "whether to amend a JAAM file that ends abruptly") val addMissingStates = opt[Boolean](descr = "find hanging edges and add MissingState states so they go somewhere") val removeMissingStates = opt[Boolean](descr = "remove any MissingState states found in the serialization; overrides --addMissingStates") val targetFile = opt[String](descr = "the .jaam file to output a corrected version, if desired") val file = trailArg[java.io.File](descr = "a .jaam file to be truncated") def run() { org.ucombinator.jaam.tools.validate.Validate.validateFile( jaamFile = file().toString, targetFile = targetFile.toOption, shouldAppendMissingEOF = fixEof(), shouldAddMissingStates = addMissingStates(), shouldRemoveMissingStates = removeMissingStates()) } } object DecompileToFile extends Subcommand("decompile-to-file") { banner("TODO") footer("") val input = opt[List[String]](default = Some(List())) val output = opt[String](required = true) def run() { org.ucombinator.jaam.tools.decompileToFile.DecompileToFile.main( input = input(), output = output()) } } object Taint3 extends Subcommand("taint3") { banner("TODO") val input = inputOpt() val output = outputOpt() def run() { org.ucombinator.jaam.tools.taint3.Taint3.main( input = input(), output = output()) } } object SystemProperties extends Subcommand("system-properties") { def run() { for ((k, v) <- java.lang.System.getProperties.asScala.toList.sorted) { println(f"$k: $v") } } }
Ucombinator/jaam
src/main/scala/org/ucombinator/jaam/main/Main.scala
Scala
bsd-2-clause
16,259
package uk.gov.gds.ier.mustache import uk.gov.gds.ier.guice.{WithRemoteAssets, WithConfig} trait ErrorPageMustache extends InheritedGovukMustache { self: WithRemoteAssets with WithConfig => object ErrorPage { case class ServerError() extends ArticleMustachio ("error/serverError") case class NotFound(url: String) extends ArticleMustachio ("error/notFound") case class Timeout( timeout: Int, override val startUrl: String ) extends ArticleMustachio ("error/timeout") } }
michaeldfallen/ier-frontend
app/uk/gov/gds/ier/mustache/ErrorPageMustache.scala
Scala
mit
519
package ml.combust.mleap.core.util import ml.combust.mleap.tensor.{DenseTensor, SparseTensor} import org.scalatest.FunSpec class VectorConvertersSpec extends FunSpec { describe("mleapTensorToSparkVector works when") { it("using a Sparse Tensor") { val vec = Seq(1.0, 0, 3.0) val indices = Seq(0, 2).map(Seq(_)) val values = indices.map((i: Seq[Int]) => vec(i.head)).toArray val tensor = SparseTensor(indices, values, Seq(vec.length)) assert(VectorConverters.mleapTensorToSparkVector(tensor).toArray sameElements vec) } it("using a Sparse Tensor with non-increasing indices") { val vec = Seq(1.0, 0, 3.0) val indices = Seq(2, 0).map(Seq(_)) val values = indices.map((i: Seq[Int]) => vec(i.head)).toArray val tensor = SparseTensor(indices, values, Seq(vec.length)) assert(VectorConverters.mleapTensorToSparkVector(tensor).toArray sameElements vec) } it("using a Dense Tensor") { val vec = Seq(1.0, 2.0, 3.0).toArray val tensor = DenseTensor(vec, Seq(vec.length)) assert(VectorConverters.mleapTensorToSparkVector(tensor).toArray sameElements vec) } } }
combust/mleap
mleap-core/src/test/scala/ml/combust/mleap/core/util/VectorConvertersSpec.scala
Scala
apache-2.0
1,160
package org.jetbrains.plugins.scala.lang.psi.api.statements import com.intellij.psi.PsiComment import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScDocCommentOwner import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocComment /** * @author Nikolay.Tropin */ trait ScCommentOwner { self: ScalaPsiElement => def simpleComment: Option[PsiComment] = { self.children.collectFirst { case c: PsiComment if !c.isInstanceOf[ScDocComment] => c } } private def scDocComment: Option[ScDocComment] = self match { case dco: ScDocCommentOwner => dco.docComment case _ => None } def allComments: Seq[PsiComment] = scDocComment.toSeq ++ simpleComment def hasComments = allComments.nonEmpty }
triggerNZ/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScCommentOwner.scala
Scala
apache-2.0
802
package models.services import java.util.UUID import javax.inject.Inject import com.mohiva.play.silhouette.api.LoginInfo import com.mohiva.play.silhouette.impl.providers.CommonSocialProfile import models.{DefaultRole, User} import models.daos.UserDAO import play.api.libs.concurrent.Execution.Implicits._ import scala.concurrent.Future /** * Handles actions to users. * * @param userDAO The user DAO implementation. */ class UserServiceImpl @Inject() (userDAO: UserDAO) extends UserService { /** * Retrieves a user that matches the specified login info. * * @param loginInfo The login info to retrieve a user. * @return The retrieved user or None if no user could be retrieved for the given login info. */ def retrieve(loginInfo: LoginInfo): Future[Option[User]] = userDAO.find(loginInfo) /** * Saves a user. * * @param user The user to save. * @return The saved user. */ def save(user: User) = userDAO.save(user) /** * Saves the social profile for a user. * * If a user exists for this profile then update the user, otherwise create a new user with the given profile. * * @param profile The social profile to save. * @return The user for whom the profile was saved. */ def save(profile: CommonSocialProfile) = { userDAO.find(profile.loginInfo).flatMap { case Some(user) => // Update user with profile userDAO.save(user.copy( firstName = profile.firstName, lastName = profile.lastName, fullName = profile.fullName, email = profile.email, avatarURL = profile.avatarURL )) case None => // Insert a new user userDAO.save(User( userID = UUID.randomUUID(), role = DefaultRole(), loginInfo = profile.loginInfo, firstName = profile.firstName, lastName = profile.lastName, fullName = profile.fullName, email = profile.email, avatarURL = profile.avatarURL )) } } }
OpenCompare/OpenCompare
org.opencompare/play-app/app/models/services/UserServiceImpl.scala
Scala
apache-2.0
2,014
import com.hypertino.binders.json.JsonBinders import org.scalatest.{FlatSpec, Matchers} case class TestBoolean(booleanVal: Boolean) case class TestBooleanN(booleanValN1: Option[Boolean], booleanValN2: Option[Boolean]) case class TestBooleanArray(booleanArray: Seq[Boolean]) case class TestBooleanArrayN(booleanArrayN: Seq[Option[Boolean]]) class TestBooleanJsonSerializer extends FlatSpec with Matchers { import JsonBinders._ "Json " should " serialize class with Boolean" in { val t = TestBoolean(booleanVal = true) val str = t.toJson assert (str === """{"booleanVal":true}""") } "Json " should " deserialize class with Boolean" in { val o = """{"booleanVal":true}""".parseJson[TestBoolean] val t = TestBoolean(booleanVal = true) assert (o === t) } "Json " should " serialize class with array of Boolean" in { val t = TestBooleanArray(List(true,false,true)) val str = t.toJson assert (str === """{"booleanArray":[true,false,true]}""") } "Json " should " deserialize class with array of Boolean" in { val o = """{"booleanArray":[true,false,true]}""".parseJson[TestBooleanArray] val t = TestBooleanArray(List(true,false,true)) assert (o === t) } "Json " should " serialize class with array of Option[Boolean]" in { val t = TestBooleanArrayN(List(Some(true),None,Some(false))) val str = t.toJson assert (str === """{"booleanArrayN":[true,null,false]}""") } "Json " should " deserialize class with array of Option[Boolean]" in { val o = """{"booleanArrayN":[true,null,false]}""".parseJson[TestBooleanArrayN] val t = TestBooleanArrayN(List(Some(true),None,Some(false))) assert (o === t) } "Json " should " serialize class with Nullable Boolean" in { val t = TestBooleanN(Some(true), Some(false)) val str = t.toJson assert (str === """{"booleanValN1":true,"booleanValN2":false}""") val t2 = TestBooleanN(Some(true),None) val str2 = t2.toJson assert (str2 === """{"booleanValN1":true,"booleanValN2":null}""") } }
hypertino/json-binders
jsonBinders/shared/src/test/scala/TestBooleanJsonSerializer.scala
Scala
bsd-3-clause
2,045
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Copyright (C) 2009-2016 Lightbend Inc. <http://www.lightbend.com> */ package org.squbs.streams.circuitbreaker.impl import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong} import java.util.concurrent.TimeUnit import akka.actor.{ActorSystem, Scheduler} import akka.util.Unsafe import com.codahale.metrics.MetricRegistry import com.typesafe.config.Config import org.squbs.streams.circuitbreaker._ import org.squbs.metrics.MetricsExtension import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import org.squbs.util.DurationConverters._ object AtomicCircuitBreakerState { /** * Create a new CircuitBreaker. * * Callbacks run in caller's thread when using withSyncCircuitBreaker, and in same ExecutionContext as the passed * in Future when using withCircuitBreaker. To use another ExecutionContext for the callbacks you can specify the * executor in the constructor. * * @param maxFailures Maximum number of failures before opening the circuit * @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit * @param maxResetTimeout the upper bound of resetTimeout * @param exponentialBackoffFactor The exponential amount that the wait time will be increased */ def apply(name: String, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration, maxResetTimeout: FiniteDuration = 36500.days, exponentialBackoffFactor: Double = 1.0) (implicit executor: ExecutionContext, scheduler: Scheduler): CircuitBreakerState = new AtomicCircuitBreakerState(name, scheduler, maxFailures, callTimeout, resetTimeout, maxResetTimeout, exponentialBackoffFactor) /** * Create a new [[AtomicCircuitBreakerState]] from configuration. * * @param name The unique name of this circuit breaker instance. * @param config Configuration to look for the settings * @param system ActorSystem */ def apply(name: String, config: Config)(implicit system: ActorSystem): CircuitBreakerState = { val configWithDefaults = config.withFallback(system.settings.config.getConfig("squbs.circuit-breaker")) apply(name, configWithDefaults.getInt("max-failures"), Duration(configWithDefaults.getString("call-timeout")).asInstanceOf[FiniteDuration], Duration(configWithDefaults.getString("reset-timeout")).asInstanceOf[FiniteDuration], Duration(configWithDefaults.getString("max-reset-timeout")).asInstanceOf[FiniteDuration], configWithDefaults.getDouble("exponential-backoff-factor") )(system.dispatcher, system.scheduler) .withMetricRegistry(MetricsExtension(system).metrics) } /** * Java API * * Create a new [[AtomicCircuitBreakerState]]. * * @param maxFailures Maximum number of failures before opening the circuit * @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit */ def create(name: String, maxFailures: Int, callTimeout: java.time.Duration, resetTimeout: java.time.Duration, executor: ExecutionContext, scheduler: Scheduler): CircuitBreakerState = apply(name, maxFailures, toScala(callTimeout), toScala(resetTimeout))(executor, scheduler) /** * Java API * * Create a new [[AtomicCircuitBreakerState]] with exponential backoff strategy. * * The `resetTimeout` will be increased exponentially for each failed attempt to close the circuit. * * @param maxFailures Maximum number of failures before opening the circuit * @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit * @param maxResetTimeout the upper bound of resetTimeout * @param exponentialBackoffFactor The exponential amount that the wait time will be increased */ def create(name: String, maxFailures: Int, callTimeout: java.time.Duration, resetTimeout: java.time.Duration, maxResetTimeout: java.time.Duration, exponentialBackoffFactor: Double, executor: ExecutionContext, scheduler: Scheduler): CircuitBreakerState = apply(name, maxFailures, toScala(callTimeout), toScala(resetTimeout), toScala(maxResetTimeout), exponentialBackoffFactor)(executor, scheduler) /** * Java API * * Create a new Circuit Breaker from configuration. * * @param name The unique name of this circuit breaker instance. * @param config Configuration to look for the settings * @param system ActorSystem */ def create(name: String, config: Config, system: ActorSystem): CircuitBreakerState = apply(name, config)(system) } /** * A [[CircuitBreakerState]] implementation based on `Atomic` variables. * * @param name used to differentiate circuit breakers in metrics * @param scheduler used to schedule reset attempt * @param maxFailures maximum number of failures before opening the circuit * @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit * @param maxResetTimeout the upper bound of resetTimeout * @param exponentialBackoffFactor The exponential amount that the wait time will be increased * @param executor [[scala.concurrent.ExecutionContext]] used for execution of the scheduler */ class AtomicCircuitBreakerState(val name: String, scheduler: Scheduler, val maxFailures: Int, val callTimeout: FiniteDuration, val resetTimeout: FiniteDuration, val maxResetTimeout: FiniteDuration, val exponentialBackoffFactor: Double, val metricRegistry: MetricRegistry = new MetricRegistry()) (implicit executor: ExecutionContext) extends AbstractAtomicCircuitBreakerLogic with CircuitBreakerState { require(exponentialBackoffFactor >= 1.0, "factor must be >= 1.0") /** * @inheritdoc */ def withMetricRegistry(metricRegistry: MetricRegistry): AtomicCircuitBreakerState = { new AtomicCircuitBreakerState( name, scheduler, maxFailures, callTimeout, resetTimeout, maxResetTimeout, exponentialBackoffFactor, metricRegistry)(executor) } /** * Holds reference to current state of CircuitBreaker - *access only via helper methods* */ @volatile private[this] var _currentStateDoNotCallMeDirectly: AtomicState = AtomicClosed /** * Holds reference to current resetTimeout of CircuitBreaker - *access only via helper methods* */ @volatile private[this] var _currentResetTimeoutDoNotCallMeDirectly: FiniteDuration = resetTimeout /** * Helper method for access to underlying state via Unsafe * * @param oldState Previous state on transition * @param newState Next state on transition * @return Whether the previous state matched correctly */ @inline private[this] def swapAtomicState(oldState: AtomicState, newState: AtomicState): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractAtomicCircuitBreakerLogic.stateOffset, oldState, newState) /** * Helper method for accessing underlying state via Unsafe * * @return Reference to current state */ @inline private[this] def currentAtomicState: AtomicState = Unsafe.instance.getObjectVolatile(this, AbstractAtomicCircuitBreakerLogic.stateOffset).asInstanceOf[AtomicState] /** * Helper method for updating the underlying resetTimeout via Unsafe */ @inline private[this] def swapResetTimeout(oldResetTimeout: FiniteDuration, newResetTimeout: FiniteDuration): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractAtomicCircuitBreakerLogic.resetTimeoutOffset, oldResetTimeout, newResetTimeout) /** * Helper method for accessing to the underlying resetTimeout via Unsafe */ @inline private[this] def currentResetTimeout: FiniteDuration = Unsafe.instance.getObjectVolatile(this, AbstractAtomicCircuitBreakerLogic.resetTimeoutOffset).asInstanceOf[FiniteDuration] /** * Mark a successful call through CircuitBreaker. */ def succeeds(): Unit = currentAtomicState.succeeds() /** * Mark a failed call through CircuitBreaker. */ def fails(): Unit = currentAtomicState.fails() /** * Check if circuit should be short circuited. */ def isShortCircuited: Boolean = currentAtomicState.isShortCircuited /** * Get the current state of the Circuit Breaker. */ def currentState = mapFromAtomicStateToState(currentAtomicState) private val mapToAtomicState = Map( Closed -> AtomicClosed, Open -> AtomicOpen, HalfOpen -> AtomicHalfOpen ) private val mapFromAtomicStateToState: Map[AtomicState, State] = mapToAtomicState.map(_.swap) /** * Implements consistent transition between states. Throws IllegalStateException if an invalid transition is attempted. * * @param fromState State being transitioning from * @param toState State being transitioning from */ override def transitionImpl(fromState: State, toState: State): Boolean = { val fromAtomicState = mapToAtomicState(fromState) val toAtomicState = mapToAtomicState(toState) val isTransitioned = swapAtomicState(fromAtomicState, toAtomicState) if (isTransitioned) toAtomicState.enter() // else some other thread already swapped state isTransitioned } /** * Internal state abstraction */ private sealed trait AtomicState { /** * Check if circuit should be short circuited. * * @return */ def isShortCircuited: Boolean /** * Invoked when call succeeds * */ def succeeds(): Unit /** * Invoked when call fails * */ def fails(): Unit /** * Invoked on the transitioned-to state during transition. */ def enter(): Unit } /** * Concrete implementation of Closed state */ private object AtomicClosed extends AtomicInteger with AtomicState { /** * Implementation of isShortCircuited, which simply returns false * * @return false */ override def isShortCircuited: Boolean = false /** * On successful call, the failure count is reset to 0 * * @return */ override def succeeds(): Unit = set(0) /** * On failed call, the failure count is incremented. The count is checked against the configured maxFailures, and * the breaker is tripped if we have reached maxFailures. * * @return */ override def fails(): Unit = if (incrementAndGet() == maxFailures) tripBreaker(Closed) /** * On entry of this state, failure count and resetTimeout is reset. * * @return */ override def enter(): Unit = { set(0) swapResetTimeout(currentResetTimeout, resetTimeout) } /** * Override for more descriptive toString * * @return */ override def toString: String = "AtomicClosed with failure count = " + get() } /** * Concrete implementation of half-open state */ private object AtomicHalfOpen extends AtomicBoolean(true) with AtomicState { /** * Allows a single call through, during which all other callers fail-fast. If the call fails, the breaker reopens. * If the call succeeds the breaker closes. * * @return true if already returned false once. */ override def isShortCircuited: Boolean = !compareAndSet(true, false) /** * Reset breaker on successful call. * * @return */ override def succeeds(): Unit = resetBreaker() /** * Reopen breaker on failed call. * * @return */ override def fails(): Unit = tripBreaker(HalfOpen) /** * On entry, guard should be reset for that first call to get in * * @return */ override def enter(): Unit = set(true) /** * Override for more descriptive toString * * @return */ override def toString: String = "Half-AtomicOpen currently testing call for success = " + get() } /** * Concrete implementation of Open state */ private object AtomicOpen extends AtomicLong with AtomicState { /** * Fail-fast on any invocation. * * @return true */ override def isShortCircuited: Boolean = true /** * No-op for open, calls are never executed so cannot succeed or fail * * @return */ override def succeeds(): Unit = () /** * No-op for open, calls are never executed so cannot succeed or fail * * @return */ override def fails(): Unit = () /** * On entering this state, schedule an attempted reset via [[akka.actor.Scheduler]] and store the entry time. * * @return */ override def enter(): Unit = { set(System.nanoTime()) scheduler.scheduleOnce(currentResetTimeout) { attemptReset() } val nextResetTimeout = currentResetTimeout * exponentialBackoffFactor match { case f: FiniteDuration ⇒ f case _ ⇒ currentResetTimeout } if (nextResetTimeout < maxResetTimeout) swapResetTimeout(currentResetTimeout, nextResetTimeout) } /** * Override for more descriptive toString * * @return */ override def toString: String = "AtomicOpen" } }
anilgursel/squbs
squbs-ext/src/main/scala/org/squbs/streams/circuitbreaker/impl/AtomicCircuitBreakerState.scala
Scala
apache-2.0
14,850
/* * Copyright 2014 Frugal Mechanic (http://frugalmechanic.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package fm.lazyseq import java.io.Closeable trait LazySeqIterator[+A] extends scala.collection.BufferedIterator[A] with Closeable { def close(): Unit def hasNext: Boolean def head: A final override def headOption: Option[A] = if (hasNext) Some(head) else None def next: A }
frugalmechanic/fm-lazyseq
src/main/scala-2.12/fm/lazyseq/LazySeqIterator.scala
Scala
apache-2.0
912
/* * sbt * Copyright 2011 - 2018, Lightbend, Inc. * Copyright 2008 - 2010, Mark Harrah * Licensed under Apache License 2.0 (see LICENSE) */ package sbt import scala.concurrent.{ Promise => XPromise } final class PromiseWrap[A] { private[sbt] val underlying: XPromise[A] = XPromise() def complete(result: Result[A]): Unit = result match { case Inc(cause) => underlying.failure(cause) case Value(value) => underlying.success(value) } def success(value: A): Unit = underlying.success(value) def failure(cause: Throwable): Unit = underlying.failure(cause) def isCompleted: Boolean = underlying.isCompleted }
xuwei-k/xsbt
main-settings/src/main/scala/sbt/PromiseWrap.scala
Scala
apache-2.0
644
package org.jetbrains.sbt.project.data import java.io.File import java.net.URI import com.intellij.openapi.externalSystem.model.project.AbstractExternalEntityData import com.intellij.openapi.externalSystem.model.{Key, ProjectKeys} import org.jetbrains.plugins.scala.project.Version import org.jetbrains.sbt.project.SbtProjectSystem import org.jetbrains.sbt.project.data.SbtEntityData._ import org.jetbrains.sbt.project.structure.Play2Keys.AllKeys.ParsedValue import org.jetbrains.sbt.resolvers.SbtResolver abstract class SbtEntityData extends AbstractExternalEntityData(SbtProjectSystem.Id) with Product { // need to manually specify equals/hashCode here because it is not generated for case classes inheriting from // AbstractExternalEntityData override def equals(obj: scala.Any): Boolean = obj match { case data: SbtEntityData => //noinspection CorrespondsUnsorted this.canEqual(data) && runtime.ScalaRunTime._equals(this, data) case _ => false } override def hashCode(): Int = runtime.ScalaRunTime._hashCode(this) } object SbtEntityData { def datakey[T](clazz: Class[T], weight: Int = ProjectKeys.MODULE.getProcessingWeight + 1 ): Key[T] = new Key(clazz.getName, weight) } /** * Data describing a "build" module: The IDEA-side representation of the sbt meta-project * @author Pavel Fatin */ @SerialVersionUID(1) case class SbtBuildModuleData(imports: Seq[String], resolvers: Set[SbtResolver]) extends SbtEntityData object SbtBuildModuleData { val Key: Key[SbtBuildModuleData] = datakey(classOf[SbtBuildModuleData]) } /** Data describing a project which is part of an sbt build. */ @SerialVersionUID(1) case class SbtModuleData(id: String, buildURI: URI) extends SbtEntityData object SbtModuleData { val Key: Key[SbtModuleData] = datakey(classOf[SbtModuleData]) } @SerialVersionUID(1) case class SbtProjectData(basePackages: Seq[String], jdk: Option[Sdk], javacOptions: Seq[String], sbtVersion: String, projectPath: String ) extends SbtEntityData object SbtProjectData { val Key: Key[SbtProjectData] = datakey(classOf[SbtProjectData]) } sealed trait SbtNamedKey { val name: String } @SerialVersionUID(1) case class SbtSettingData(name: String, description: String, rank: Int, value: String) extends SbtEntityData with SbtNamedKey object SbtSettingData { val Key: Key[SbtSettingData] = datakey(classOf[SbtSettingData]) } @SerialVersionUID(1) case class SbtTaskData(name: String, description: String, rank: Int) extends SbtEntityData with SbtNamedKey object SbtTaskData { val Key: Key[SbtTaskData] = datakey(classOf[SbtTaskData]) } @SerialVersionUID(1) case class SbtCommandData(name: String, help: Seq[(String,String)]) extends SbtEntityData with SbtNamedKey object SbtCommandData { val Key: Key[SbtCommandData] = datakey(classOf[SbtCommandData]) } @SerialVersionUID(1) case class ModuleExtData(scalaOrganization: String, scalaVersion: Option[Version], scalacClasspath: Seq[File], scalacOptions: Seq[String], jdk: Option[Sdk], javacOptions: Seq[String] ) extends SbtEntityData object ModuleExtData { val Key: Key[ModuleExtData] = datakey(classOf[ModuleExtData], ProjectKeys.LIBRARY_DEPENDENCY.getProcessingWeight + 1) } @SerialVersionUID(1) case class Play2ProjectData(projectKeys: Map[String, Map[String, ParsedValue[_]]]) extends SbtEntityData object Play2ProjectData { val Key: Key[Play2ProjectData] = datakey(classOf[Play2ProjectData], ProjectKeys.PROJECT.getProcessingWeight + 1) } @SerialVersionUID(1) case class AndroidFacetData(version: String, manifest: File, apk: File, res: File, assets: File, gen: File, libs: File, isLibrary: Boolean, proguardConfig: Seq[String]) extends SbtEntityData object AndroidFacetData { val Key: Key[AndroidFacetData] = datakey(classOf[AndroidFacetData], ProjectKeys.LIBRARY_DEPENDENCY.getProcessingWeight + 1) }
loskutov/intellij-scala
src/org/jetbrains/sbt/project/data/dataObjects.scala
Scala
apache-2.0
4,201
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ui.jobs import java.util.Locale import javax.servlet.http.HttpServletRequest import scala.collection.mutable.{Buffer, ListBuffer} import scala.xml.{Node, NodeSeq, Unparsed, Utility} import org.apache.commons.lang3.StringEscapeUtils import org.apache.spark.JobExecutionStatus import org.apache.spark.scheduler._ import org.apache.spark.status.AppStatusStore import org.apache.spark.status.api.v1 import org.apache.spark.ui._ /** Page showing statistics and stage list for a given job */ private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIPage("job") { private val STAGES_LEGEND = <div class="legend-area"><svg width="150px" height="85px"> <rect class="completed-stage-legend" x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="17px">Completed</text> <rect class="failed-stage-legend" x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="42px">Failed</text> <rect class="active-stage-legend" x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="67px">Active</text> </svg></div>.toString.filter(_ != '\\n') private val EXECUTORS_LEGEND = <div class="legend-area"><svg width="150px" height="55px"> <rect class="executor-added-legend" x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="17px">Added</text> <rect class="executor-removed-legend" x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="42px">Removed</text> </svg></div>.toString.filter(_ != '\\n') private def makeStageEvent(stageInfos: Seq[v1.StageData]): Seq[String] = { stageInfos.map { stage => val stageId = stage.stageId val attemptId = stage.attemptId val name = stage.name val status = stage.status.toString val submissionTime = stage.submissionTime.get.getTime() val completionTime = stage.completionTime.map(_.getTime()) .getOrElse(System.currentTimeMillis()) // The timeline library treats contents as HTML, so we have to escape them. We need to add // extra layers of escaping in order to embed this in a Javascript string literal. val escapedName = Utility.escape(name) val jsEscapedName = StringEscapeUtils.escapeEcmaScript(escapedName) s""" |{ | 'className': 'stage job-timeline-object ${status}', | 'group': 'stages', | 'start': new Date(${submissionTime}), | 'end': new Date(${completionTime}), | 'content': '<div class="job-timeline-content" data-toggle="tooltip"' + | 'data-placement="top" data-html="true"' + | 'data-title="${jsEscapedName} (Stage ${stageId}.${attemptId})<br>' + | 'Status: ${status.toUpperCase(Locale.ROOT)}<br>' + | 'Submitted: ${UIUtils.formatDate(submissionTime)}' + | '${ if (status != "running") { s"""<br>Completed: ${UIUtils.formatDate(completionTime)}""" } else { "" } }">' + | '${jsEscapedName} (Stage ${stageId}.${attemptId})</div>', |} """.stripMargin } } def makeExecutorEvent(executors: Seq[v1.ExecutorSummary]): Seq[String] = { val events = ListBuffer[String]() executors.foreach { e => val addedEvent = s""" |{ | 'className': 'executor added', | 'group': 'executors', | 'start': new Date(${e.addTime.getTime()}), | 'content': '<div class="executor-event-content"' + | 'data-toggle="tooltip" data-placement="bottom"' + | 'data-title="Executor ${e.id}<br>' + | 'Added at ${UIUtils.formatDate(e.addTime)}"' + | 'data-html="true">Executor ${e.id} added</div>' |} """.stripMargin events += addedEvent e.removeTime.foreach { removeTime => val removedEvent = s""" |{ | 'className': 'executor removed', | 'group': 'executors', | 'start': new Date(${removeTime.getTime()}), | 'content': '<div class="executor-event-content"' + | 'data-toggle="tooltip" data-placement="bottom"' + | 'data-title="Executor ${e.id}<br>' + | 'Removed at ${UIUtils.formatDate(removeTime)}' + | '${ e.removeReason.map { reason => s"""<br>Reason: ${reason.replace("\\n", " ")}""" }.getOrElse("") }"' + | 'data-html="true">Executor ${e.id} removed</div>' |} """.stripMargin events += removedEvent } } events.toSeq } private def makeTimeline( stages: Seq[v1.StageData], executors: Seq[v1.ExecutorSummary], appStartTime: Long): Seq[Node] = { val stageEventJsonAsStrSeq = makeStageEvent(stages) val executorsJsonAsStrSeq = makeExecutorEvent(executors) val groupJsonArrayAsStr = s""" |[ | { | 'id': 'executors', | 'content': '<div>Executors</div>${EXECUTORS_LEGEND}', | }, | { | 'id': 'stages', | 'content': '<div>Stages</div>${STAGES_LEGEND}', | } |] """.stripMargin val eventArrayAsStr = (stageEventJsonAsStrSeq ++ executorsJsonAsStrSeq).mkString("[", ",", "]") <span class="expand-job-timeline"> <span class="expand-job-timeline-arrow arrow-closed"></span> <a data-toggle="tooltip" title={ToolTips.STAGE_TIMELINE} data-placement="right"> Event Timeline </a> </span> ++ <div id="job-timeline" class="collapsed"> <div class="control-panel"> <div id="job-timeline-zoom-lock"> <input type="checkbox"></input> <span>Enable zooming</span> </div> </div> </div> ++ <script type="text/javascript"> {Unparsed(s"drawJobTimeline(${groupJsonArrayAsStr}, ${eventArrayAsStr}, " + s"${appStartTime}, ${UIUtils.getTimeZoneOffset()});")} </script> } def render(request: HttpServletRequest): Seq[Node] = { // stripXSS is called first to remove suspicious characters used in XSS attacks val parameterId = UIUtils.stripXSS(request.getParameter("id")) require(parameterId != null && parameterId.nonEmpty, "Missing id parameter") val jobId = parameterId.toInt val jobData = store.asOption(store.job(jobId)).getOrElse { val content = <div id="no-info"> <p>No information to display for job {jobId}</p> </div> return UIUtils.headerSparkPage( s"Details for Job $jobId", content, parent) } val isComplete = jobData.status != JobExecutionStatus.RUNNING val stages = jobData.stageIds.map { stageId => // This could be empty if the listener hasn't received information about the // stage or if the stage information has been garbage collected store.stageData(stageId).lastOption.getOrElse { new v1.StageData( v1.StageStatus.PENDING, stageId, 0, 0, 0, 0, 0, 0, 0, 0L, 0L, None, None, None, None, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, "Unknown", None, "Unknown", null, Nil, Nil, None, None, Map()) } } val activeStages = Buffer[v1.StageData]() val completedStages = Buffer[v1.StageData]() // If the job is completed, then any pending stages are displayed as "skipped": val pendingOrSkippedStages = Buffer[v1.StageData]() val failedStages = Buffer[v1.StageData]() for (stage <- stages) { if (stage.submissionTime.isEmpty) { pendingOrSkippedStages += stage } else if (stage.completionTime.isDefined) { if (stage.status == v1.StageStatus.FAILED) { failedStages += stage } else { completedStages += stage } } else { activeStages += stage } } val basePath = "jobs/job" val pendingOrSkippedTableId = if (isComplete) { "pending" } else { "skipped" } val activeStagesTable = new StageTableBase(store, request, activeStages, "active", "activeStage", parent.basePath, basePath, parent.isFairScheduler, killEnabled = parent.killEnabled, isFailedStage = false) val pendingOrSkippedStagesTable = new StageTableBase(store, request, pendingOrSkippedStages, pendingOrSkippedTableId, "pendingStage", parent.basePath, basePath, parent.isFairScheduler, killEnabled = false, isFailedStage = false) val completedStagesTable = new StageTableBase(store, request, completedStages, "completed", "completedStage", parent.basePath, basePath, parent.isFairScheduler, killEnabled = false, isFailedStage = false) val failedStagesTable = new StageTableBase(store, request, failedStages, "failed", "failedStage", parent.basePath, basePath, parent.isFairScheduler, killEnabled = false, isFailedStage = true) val shouldShowActiveStages = activeStages.nonEmpty val shouldShowPendingStages = !isComplete && pendingOrSkippedStages.nonEmpty val shouldShowCompletedStages = completedStages.nonEmpty val shouldShowSkippedStages = isComplete && pendingOrSkippedStages.nonEmpty val shouldShowFailedStages = failedStages.nonEmpty val summary: NodeSeq = <div> <ul class="unstyled"> <li> <Strong>Status:</Strong> {jobData.status} </li> { if (jobData.jobGroup.isDefined) { <li> <strong>Job Group:</strong> {jobData.jobGroup.get} </li> } } { if (shouldShowActiveStages) { <li> <a href="#active"><strong>Active Stages:</strong></a> {activeStages.size} </li> } } { if (shouldShowPendingStages) { <li> <a href="#pending"> <strong>Pending Stages:</strong> </a>{pendingOrSkippedStages.size} </li> } } { if (shouldShowCompletedStages) { <li> <a href="#completed"><strong>Completed Stages:</strong></a> {completedStages.size} </li> } } { if (shouldShowSkippedStages) { <li> <a href="#skipped"><strong>Skipped Stages:</strong></a> {pendingOrSkippedStages.size} </li> } } { if (shouldShowFailedStages) { <li> <a href="#failed"><strong>Failed Stages:</strong></a> {failedStages.size} </li> } } </ul> </div> var content = summary val appStartTime = store.applicationInfo().attempts.head.startTime.getTime() content ++= makeTimeline(activeStages ++ completedStages ++ failedStages, store.executorList(false), appStartTime) content ++= UIUtils.showDagVizForJob( jobId, store.operationGraphForJob(jobId)) if (shouldShowActiveStages) { content ++= <h4 id="active">Active Stages ({activeStages.size})</h4> ++ activeStagesTable.toNodeSeq } if (shouldShowPendingStages) { content ++= <h4 id="pending">Pending Stages ({pendingOrSkippedStages.size})</h4> ++ pendingOrSkippedStagesTable.toNodeSeq } if (shouldShowCompletedStages) { content ++= <h4 id="completed">Completed Stages ({completedStages.size})</h4> ++ completedStagesTable.toNodeSeq } if (shouldShowSkippedStages) { content ++= <h4 id="skipped">Skipped Stages ({pendingOrSkippedStages.size})</h4> ++ pendingOrSkippedStagesTable.toNodeSeq } if (shouldShowFailedStages) { content ++= <h4 id ="failed">Failed Stages ({failedStages.size})</h4> ++ failedStagesTable.toNodeSeq } UIUtils.headerSparkPage(s"Details for Job $jobId", content, parent, showVisualization = true) } }
ron8hu/spark
core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala
Scala
apache-2.0
13,359
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.utils import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.tuple.Tuple3 import org.apache.flink.api.java.typeutils.RowTypeInfo import org.apache.flink.api.scala.typeutils.Types import org.apache.flink.table.api.ValidationException import org.apache.flink.table.functions.python.{PythonEnv, PythonFunction} import org.apache.flink.table.functions.{FunctionContext, ScalarFunction, TableFunction} import org.apache.flink.types.Row import org.junit.Assert import java.lang.Boolean import scala.annotation.varargs case class SimpleUser(name: String, age: Int) @SerialVersionUID(1L) class TableFunc0 extends TableFunction[SimpleUser] { // make sure input element's format is "<string>#<int>" def eval(user: String): Unit = { if (user.contains("#")) { val splits = user.split("#") collect(SimpleUser(splits(0), splits(1).toInt)) } } } @SerialVersionUID(1L) class TableFunc1 extends TableFunction[String] { def eval(str: String): Unit = { if (str.contains("#")){ str.split("#").foreach(collect) } } def eval(str: String, prefix: String): Unit = { if (str.contains("#")) { str.split("#").foreach(s => collect(prefix + s)) } } } @SerialVersionUID(1L) class TableFunc2 extends TableFunction[Row] { def eval(str: String): Unit = { if (str.contains("#")) { str.split("#").foreach({ s => val row = new Row(2) row.setField(0, s) row.setField(1, s.length) collect(row) }) } } override def getResultType(): TypeInformation[Row] = new RowTypeInfo(Types.STRING, Types.INT) } @SerialVersionUID(1L) class TableFunc3(data: String, conf: Map[String, String]) extends TableFunction[SimpleUser] { def this(data: String) { this(data, null) } def eval(user: String): Unit = { if (user.contains("#")) { val splits = user.split("#") if (null != data) { if (null != conf && conf.size > 0) { val it = conf.keys.iterator while (it.hasNext) { val key = it.next() val value = conf.get(key).get collect( SimpleUser( data.concat("_key=") .concat(key) .concat("_value=") .concat(value) .concat("_") .concat(splits(0)), splits(1).toInt)) } } else { collect(SimpleUser(data.concat(splits(0)), splits(1).toInt)) } } else { collect(SimpleUser(splits(0), splits(1).toInt)) } } } } @SerialVersionUID(1L) class MockPythonTableFunction extends TableFunction[Row] with PythonFunction { def eval(x: Int, y: Int) = ??? override def getResultType: TypeInformation[Row] = new RowTypeInfo(Types.INT, Types.INT) override def getSerializedPythonFunction: Array[Byte] = Array[Byte](0) override def getPythonEnv: PythonEnv = null } //TODO support dynamic type //class UDTFWithDynamicType extends TableFunction[Row] { // // def eval(str: String, column: Int): Unit = { // if (str.contains("#")) { // str.split("#").foreach({ s => // val row = new Row(column) // row.setField(0, s) // var i = 0 // for (i <- 1 until column) { // row.setField(i, s.length) // } // collect(row) // }) // } // } // // override def getResultType( // arguments: Array[AnyRef], // typeInfos: Array[Class[_]]): TypeInformation[_] = { // assert(typeInfos(1).isPrimitive) // assert(typeInfos(1).equals(325.getClass)) // val column = arguments(1).asInstanceOf[Int] // val basicTypeInfos = Array.fill[TypeInformation[_]](column)(Types.INT) // basicTypeInfos(0) = Types.STRING // new RowTypeInfo(basicTypeInfos: _*) // } //} // //class UDTFWithDynamicType0 extends TableFunction[Row] { // // def eval(str: String, cols: String): Unit = { // val columns = cols.split(",") // // if (str.contains("#")) { // str.split("#").foreach({ s => // val row = new Row(columns.length) // row.setField(0, s) // for (i <- 1 until columns.length) { // if (columns(i).equals("string")) { // row.setField(i, s.length.toString) // } else if (columns(i).equals("int")) { // row.setField(i, s.length) // } // } // collect(row) // }) // } // } // // override def getResultType( // arguments: Array[AnyRef], // typeInfos: Array[Class[_]]): TypeInformation[_] = { // assert(typeInfos(1).equals(Class.forName("java.lang.String"))) // val columnStr = arguments(1).asInstanceOf[String] // val columns = columnStr.split(",") // // val basicTypeInfos = for (c <- columns) yield c match { // case "string" => Types.STRING // case "int" => Types.INT // } // new RowTypeInfo(basicTypeInfos: _*) // } //} // //class UDTFWithDynamicType1 extends TableFunction[Row] { // // def eval(col: String): Unit = { // val row = new Row(1) // col match { // case "string" => row.setField(0, "string") // case "int" => row.setField(0, 4) // case "double" => row.setField(0, 3.25) // case "boolean" => row.setField(0, true) // case "timestamp" => row.setField(0, new Timestamp(325L)) // } // collect(row) // } // // override def getResultType( // arguments: Array[AnyRef], // typeInfos: Array[Class[_]]): DataType = { // assert(typeInfos(0).equals(Class.forName("java.lang.String"))) // val columnStr = arguments(0).asInstanceOf[String] // columnStr match { // case "string" => new RowTypeInfo(Types.STRING) // case "int" => new RowTypeInfo(Types.INT) // case "double" => new RowTypeInfo(Types.DOUBLE) // case "boolean" => new RowTypeInfo(Types.BOOLEAN) // case "timestamp" => new RowTypeInfo(Types.TIMESTAMP) // case _ => new RowTypeInfo(Types.INT) // } // } //} // //class UDTFWithDynamicTypeAndRexNodes extends TableFunction[Row] { // // def eval(str: String, i: Int, si: Int, bi: Int, flt: Double, real: Double, d: Double, // b: Boolean, ts: Timestamp): // Unit = { // val row = new Row(9) // row.setField(0, str) // row.setField(1, i) // row.setField(2, si) // row.setField(3, bi) // row.setField(4, flt) // row.setField(5, real) // row.setField(6, d) // row.setField(7, b) // row.setField(8, ts) // collect(row) // } // // override def getResultType( // arguments: Array[AnyRef], // typeInfos: Array[Class[_]]): DataType = { // // Test for the transformRexNodes() // // No assertion here, argument 0 is not literal // val str = arguments(0).asInstanceOf[String] // if (null != str) { // throw new RuntimeException("The first column should be null") // } // // assert(typeInfos(1).isPrimitive) // assert(typeInfos(1).equals(325.getClass)) // val i = arguments(1).asInstanceOf[Int] // if (i <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(2).isPrimitive) // assert(typeInfos(2).equals(325.getClass)) // val si = arguments(2).asInstanceOf[Int] // if (si <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(3).isPrimitive) // assert(typeInfos(3).equals(325.getClass)) // val bi = arguments(3).asInstanceOf[Int] // if (bi <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(4).isPrimitive) // assert(typeInfos(4).equals(3.25.getClass)) // val float = arguments(4).asInstanceOf[Double] // if (float <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(5).isPrimitive) // assert(typeInfos(5).equals(3.25.getClass)) // val real = arguments(5).asInstanceOf[Double] // if (real <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(6).isPrimitive) // assert(typeInfos(6).equals(3.25.getClass)) // val d = arguments(6).asInstanceOf[Double] // if (d <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // assert(typeInfos(7).equals(Class.forName("java.lang.Boolean"))) // val b = arguments(7).asInstanceOf[Boolean] // if (!b) { // throw new RuntimeException("The arguments should be true") // } // // assert(typeInfos(8).equals(Class.forName("java.sql.Timestamp"))) // val ts = arguments(8).asInstanceOf[Timestamp] // if (ts.getTime <= 0) { // throw new RuntimeException("The arguments should be greater than zero") // } // // new RowTypeInfo( // Types.STRING, // Types.INT, // Types.INT, // Types.INT, // Types.DOUBLE, // Types.DOUBLE, // Types.DOUBLE, // Types.BOOLEAN, // Types.TIMESTAMP // ) // } //} // //class UDTFWithDynamicTypeAndVariableArgs extends TableFunction[Row] { // // def eval(value: Int): Unit = { // val v = new Integer(value) // collect(Row.of(v, v)) // collect(Row.of(v, v)) // } // // @varargs // def eval(str: String, cols: String, fields: AnyRef*): Unit = { // val columns = cols.split(",") // // if (str.contains("#")) { // str.split("#").foreach({ s => // val row = new Row(columns.length) // row.setField(0, s) // for (i <- 1 until columns.length) { // if (columns(i).equals("string")) { // row.setField(i, s.length.toString) // } else if (columns(i).equals("int")) { // row.setField(i, s.length) // } // } // collect(row) // }) // } // } // // override def getResultType( // arguments: Array[AnyRef], // typeInfos: Array[Class[_]]): DataType = { // if (typeInfos.length == 1) { // new RowTypeInfo(Types.INT, Types.INT) // } else { // assert(typeInfos(1).equals(Class.forName("java.lang.String"))) // val columnStr = arguments(1).asInstanceOf[String] // val columns = columnStr.split(",") // // val basicTypeInfos = for (c <- columns) yield c match { // case "string" => Types.STRING // case "int" => Types.INT // } // new RowTypeInfo(basicTypeInfos: _*) // } // } //} @SerialVersionUID(1L) class TableFunc4 extends TableFunction[Row] { def eval(b: Byte, s: Short, f: Float): Unit = { collect(Row.of("Byte=" + b, "Short=" + s, "Float=" + f)) } override def getResultType: TypeInformation[Row] = { new RowTypeInfo(Types.STRING, Types.STRING, Types.STRING) } } @SerialVersionUID(1L) class TableFunc6 extends TableFunction[Row] { def eval(row: Row): Unit = { collect(row) } override def getParameterTypes(signature: Array[Class[_]]): Array[TypeInformation[_]] = Array(new RowTypeInfo(Types.INT, Types.INT, Types.INT)) override def getResultType: TypeInformation[Row] = { new RowTypeInfo(Types.INT, Types.INT, Types.INT) } } @SerialVersionUID(1L) class TableFunc7 extends TableFunction[Row] { def eval(row: Row): Unit = { } def eval(row: java.util.List[Row]): Unit = { } } @SerialVersionUID(1L) class RF extends ScalarFunction { def eval(x: Int): java.util.List[Row] = { java.util.Collections.emptyList() } } @SerialVersionUID(1L) class VarArgsFunc0 extends TableFunction[String] { @varargs def eval(str: String*): Unit = { str.foreach(collect) } } @SerialVersionUID(1L) class HierarchyTableFunction extends SplittableTableFunction[Boolean, Integer] { def eval(user: String) { if (user.contains("#")) { val splits = user.split("#") val age = splits(1).toInt collect(new Tuple3[String, Boolean, Integer](splits(0), age >= 20, age)) } } } abstract class SplittableTableFunction[A, B] extends TableFunction[Tuple3[String, A, B]] {} @SerialVersionUID(1L) class PojoTableFunc extends TableFunction[PojoUser] { def eval(user: String) { if (user.contains("#")) { val splits = user.split("#") collect(new PojoUser(splits(0), splits(1).toInt)) } } } class PojoUser() { var name: String = _ var age: Int = 0 def this(name: String, age: Int) { this() this.name = name this.age = age } } // ---------------------------------------------------------------------------------------------- // Invalid Table Functions // ---------------------------------------------------------------------------------------------- // this is used to check whether scala object is forbidden @SerialVersionUID(1L) object ObjectTableFunction extends TableFunction[Integer] { def eval(a: Int, b: Int): Unit = { collect(a) collect(b) } } @SerialVersionUID(1L) class RichTableFunc0 extends TableFunction[String] { var openCalled = false var closeCalled = false override def open(context: FunctionContext): Unit = { super.open(context) if (closeCalled) { Assert.fail("Close called before open.") } openCalled = true } def eval(str: String): Unit = { if (!openCalled) { Assert.fail("Open was not called before eval.") } if (closeCalled) { Assert.fail("Close called before eval.") } if (!str.contains("#")) { collect(str) } } override def close(): Unit = { super.close() if (!openCalled) { Assert.fail("Open was not called before close.") } closeCalled = true } } @SerialVersionUID(1L) class RichTableFunc1 extends TableFunction[String] { var separator: Option[String] = None override def open(context: FunctionContext): Unit = { separator = Some(context.getJobParameter("word_separator", "")) } def eval(str: String): Unit = { if (str.contains(separator.getOrElse(throw new ValidationException(s"no separator")))) { str.split(separator.get).foreach(collect) } } override def close(): Unit = { separator = None } }
tillrohrmann/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/utils/UserDefinedTableFunctions.scala
Scala
apache-2.0
14,832
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ignition.core.testsupport.spark import org.apache.spark.{SparkConf, SparkContext} import org.scalatest.{BeforeAndAfterAll, Suite} /** Shares a local `SparkContext` between all tests in a suite and closes it at the end */ trait SharedSparkContext extends BeforeAndAfterAll { self: Suite => @transient private var _sc: SparkContext = _ def sc: SparkContext = _sc var conf = new SparkConf(false) override def beforeAll() { //Logger.getRootLogger().removeAllAppenders(); //Logger.getRootLogger().addAppender(new NullAppender()); _sc = new SparkContext("local", "test", conf) _sc.setLogLevel("OFF") super.beforeAll() } override def afterAll() { LocalSparkContext.stop(_sc) _sc = null super.afterAll() } }
chaordic/ignition-core
src/main/scala/ignition/core/testsupport/spark/SharedSparkContext.scala
Scala
mit
1,563
package lila.pref import play.api.data._ import play.api.data.Forms._ import lila.user.User private[pref] final class DataForm { val pref = Form(mapping( "autoQueen" -> number.verifying(Pref.AutoQueen.choices.toMap contains _), "autoThreefold" -> number.verifying(Pref.AutoThreefold.choices.toMap contains _), "takeback" -> number.verifying(Pref.Takeback.choices.toMap contains _), "clockTenths" -> number.verifying(Pref.ClockTenths.choices.toMap contains _), "clockBar" -> number.verifying(Set(0, 1) contains _), "clockSound" -> number.verifying(Set(0, 1) contains _), "follow" -> number.verifying(Set(0, 1) contains _), "highlight" -> number.verifying(Set(0, 1) contains _), "destination" -> number.verifying(Set(0, 1) contains _), "coords" -> number.verifying(Pref.Coords.choices.toMap contains _), "replay" -> number.verifying(Pref.Replay.choices.toMap contains _), "blindfold" -> number.verifying(Pref.Blindfold.choices.toMap contains _), "challenge" -> number.verifying(Pref.Challenge.choices.toMap contains _), "premove" -> number.verifying(Set(0, 1) contains _), "animation" -> number.verifying(Set(0, 1, 2, 3) contains _), "submitMove" -> number.verifying(Set(0, 1, 2) contains _), "coachShare" -> number.verifying(Set(0, 1, 2) contains _), "captured" -> number.verifying(Set(0, 1) contains _) )(PrefData.apply)(PrefData.unapply)) case class PrefData( autoQueen: Int, autoThreefold: Int, takeback: Int, clockTenths: Int, clockBar: Int, clockSound: Int, follow: Int, highlight: Int, destination: Int, coords: Int, replay: Int, blindfold: Int, challenge: Int, premove: Int, animation: Int, submitMove: Int, coachShare: Int, captured: Int) { def apply(pref: Pref) = pref.copy( autoQueen = autoQueen, autoThreefold = autoThreefold, takeback = takeback, clockTenths = clockTenths, clockBar = clockBar == 1, clockSound = clockSound == 1, follow = follow == 1, highlight = highlight == 1, destination = destination == 1, coords = coords, replay = replay, blindfold = blindfold, challenge = challenge, premove = premove == 1, animation = animation, submitMove = submitMove, coachShare = coachShare, captured = captured == 1) } object PrefData { def apply(pref: Pref): PrefData = PrefData( autoQueen = pref.autoQueen, autoThreefold = pref.autoThreefold, takeback = pref.takeback, clockTenths = pref.clockTenths, clockBar = pref.clockBar.fold(1, 0), clockSound = pref.clockSound.fold(1, 0), follow = pref.follow.fold(1, 0), highlight = pref.highlight.fold(1, 0), destination = pref.destination.fold(1, 0), coords = pref.coords, replay = pref.replay, blindfold = pref.blindfold, challenge = pref.challenge, premove = pref.premove.fold(1, 0), animation = pref.animation, submitMove = pref.submitMove, coachShare = pref.coachShare, captured = pref.captured.fold(1, 0)) } def prefOf(p: Pref): Form[PrefData] = pref fill PrefData(p) val miniPref = Form(mapping( "autoQueen" -> number.verifying(Pref.AutoQueen.choices.toMap contains _), "blindfold" -> number.verifying(Pref.Blindfold.choices.toMap contains _), "clockTenths" -> number.verifying(Pref.ClockTenths.choices.toMap contains _), "submitMove" -> number.verifying(Set(0, 1, 2) contains _) )(MiniPrefData.apply)(MiniPrefData.unapply)) case class MiniPrefData( autoQueen: Int, blindfold: Int, clockTenths: Int, submitMove: Int) { def apply(pref: Pref) = pref.copy( autoQueen = autoQueen, blindfold = blindfold, clockTenths = clockTenths, submitMove = submitMove) } object MiniPrefData { def apply(pref: Pref): MiniPrefData = MiniPrefData( autoQueen = pref.autoQueen, blindfold = pref.blindfold, clockTenths = pref.clockTenths, submitMove = pref.submitMove) } def miniPrefOf(p: Pref): Form[MiniPrefData] = miniPref fill MiniPrefData(p) val theme = Form(single( "theme" -> nonEmptyText.verifying(Theme contains _) )) val pieceSet = Form(single( "set" -> nonEmptyText.verifying(PieceSet contains _) )) val theme3d = Form(single( "theme" -> nonEmptyText.verifying(Theme3d contains _) )) val pieceSet3d = Form(single( "set" -> nonEmptyText.verifying(PieceSet3d contains _) )) val soundSet = Form(single( "set" -> nonEmptyText.verifying(SoundSet contains _) )) val bg = Form(single( "bg" -> text.verifying(List("light", "dark", "transp") contains _) )) val bgImg = Form(single( "bgImg" -> nonEmptyText )) val is3d = Form(single( "is3d" -> text.verifying(List("true", "false") contains _) )) }
pavelo65/lila
modules/pref/src/main/DataForm.scala
Scala
mit
4,948
/* * Copyright 2015 Philip L. McMahon * * Philip L. McMahon licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package rascql.postgresql.stream import scala.collection.immutable import akka.stream.FlowShape import akka.stream.scaladsl._ import rascql.postgresql.protocol._ /** * Initiates the client/server communication via a [[StartupMessage]] and then * handles any subsequent [[AuthenticationRequest]]s. * * {{{ * . . . . . . . . . . . . . . . . . . . . . . . . . * . . * . +---------------+ . * . | | . * . | initial | . * . | | . * . +------[o]------+ . * . | . * . v . * . +---------------+ +------[i]------+ . * . | | | | . * BackendMessage --> [i] authn [o] --> [i] concat [o] --> FrontendMessage * . | | | | . * . +---------------+ +---------------+ . * . . * . . . . . . . . . . . . . . . . . . . . . . . . . * * @author Philip L. McMahon */ object Startup { type Parameters = immutable.Map[String, String] def apply(username: String, password: String, parameters: Parameters): Flow[BackendMessage, FrontendMessage, Unit] = Flow[BackendMessage].transform(() => new AuthenticationStage(username, password)). prepend(Source.single[FrontendMessage](StartupMessage(username, parameters))). named("Startup") }
rascql/rascql
src/main/scala/rascql/postgresql/stream/Startup.scala
Scala
apache-2.0
2,490
package commons.utils import slick.dbio.DBIO object DbioUtils { def optionToDbio[A](someVal: Option[A], failureReason: Throwable = new NoSuchElementException): DBIO[A] = { someVal match { case Some(v) => DBIO.successful(v) case None => DBIO.failed(failureReason) } } }
Dasiu/play-framework-test-project
app/commons/utils/DbioUtils.scala
Scala
mit
295
package codegen import protocbridge.codegen._ import com.google.protobuf.compiler.PluginProtos.CodeGeneratorResponse.File object LocalGen extends CodeGenApp { def process(request: CodeGenRequest): CodeGenResponse = { CodeGenResponse.succeed( Seq( File.newBuilder .setName("LocalGen.scala") .setContent("object LocalGen { val version = 1 } ") .build ) ) } }
thesamet/sbt-protoc
src/sbt-test/settings/caching/codegen/src/main/scala/codegen/LocalGen.scala
Scala
apache-2.0
421
package br.unb.cic.poo.gol.base import scala.io.StdIn.{readInt, readLine} import com.badlogic.gdx.ApplicationListener; import com.badlogic.gdx.InputProcessor; import com.badlogic.gdx.Gdx; import com.badlogic.gdx.graphics.Color; import com.badlogic.gdx.graphics.GL20; import com.badlogic.gdx.graphics.glutils.ShapeRenderer; import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType; import com.badlogic.gdx.graphics.g2d.SpriteBatch; import com.badlogic.gdx.backends.lwjgl.LwjglApplication; import com.badlogic.gdx.backends.lwjgl.LwjglApplicationConfiguration; /** * Representa o componente View do GoL * * @author Breno Xavier (baseado na implementacao Java de rbonifacio@unb.br */ class Coordinates(var x: Int, var y: Int) /* * Tela padrão */ class GolScreen(var width: Int, var height: Int, cellSize: Coordinates) extends ApplicationListener { private var batch: SpriteBatch = null private var shapeRenderer: ShapeRenderer = null override def create() { batch = new SpriteBatch shapeRenderer = new ShapeRenderer } override def dispose() { batch.dispose } override def render() { Gdx.gl.glClearColor(0,0,0, 1) Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT) batch.begin() for(i <- (0 until height)) { for(j <- (0 until width)) { shapeRenderer.begin(ShapeType.Filled); if(GameEngine.isCellAlive(i, j)){ shapeRenderer.setColor(0.11f, 0.32f, 0.47f, 1); shapeRenderer.rect((j)*cellSize.x, (height-1-i)*cellSize.y, cellSize.x, cellSize.y); } shapeRenderer.end(); } } batch.end() } override def resize(width: Int, height: Int) { } override def pause() { } override def resume() { } } /* * Processador de entradas */ class MyInputProcessor extends InputProcessor { var auto: Boolean = false def keyDown (keycode: Int): Boolean = false def keyUp (keycode: Int): Boolean = { if(keycode == 22) GameController.nextGeneration if(keycode == 21) GameController.backGeneration return false } def keyTyped (character: Char): Boolean = { println(character) if(character == 'a' && !auto) { GameController.automatico auto = true } if(character == 's' && auto) { GameController.updateTask.cancel() auto = false } if(character == 'q') GameController.halt return false } def touchDown (x: Int, y: Int, pointer: Int, button: Int): Boolean = false def touchUp (x: Int, y: Int, pointer: Int, button: Int): Boolean = false def mouseMoved (x: Int, y: Int): Boolean = false def scrolled (amount: Int): Boolean = false def touchDragged (x: Int, y: Int, pointer: Int): Boolean = { GameEngine.makeCellAlive( GameEngine.posicao((y/GameView.cellSize.y).toInt, GameEngine.height), GameEngine.posicao((x/GameView.cellSize.x).toInt, GameEngine.width) ) return false; } } object GameView { private final val LINE = "+-----+" private final val DEAD_CELL = "| |" private final val ALIVE_CELL = "| o |" private final val INVALID_OPTION = 0 private final val MAKE_CELL_ALIVE = 1 private final val NEXT_GENERATION = 2 private final val HALT = 3 private final val BACK_GENERATION = 4 private final val AUTOMATICO = 5 var cellSize = new Coordinates(20,20) def obterLargura = GameEngine.width def obterAltura = GameEngine.height var inputProcessor: MyInputProcessor = null /** * Atualiza o componente view (representado pela classe GameBoard), * possivelmente como uma resposta a uma atualiza��o do jogo. */ def createBoard { val config: LwjglApplicationConfiguration = new LwjglApplicationConfiguration config.title = "Oiiiiiiii" config.width = GameEngine.width*cellSize.x config.height = GameEngine.height*cellSize.y config.useGL30 = true; new LwjglApplication(new GolScreen(GameEngine.width,GameEngine.height,cellSize), config) inputProcessor = new MyInputProcessor(); Gdx.input.setInputProcessor(inputProcessor); /* printFirstRow printLine for(i <- (0 until GameEngine.height)) { for(j <- (0 until GameEngine.width)) { print(if (GameEngine.isCellAlive(i, j)) ALIVE_CELL else DEAD_CELL); } println(" " + i) printLine }*/ } def update { //printOptions } /* private def printOptions { var option = 0 println("\\n\\n") do{ println("Select one of the options: \\n \\n"); println("[1] Make a cell alive"); println("[2] Next generation"); println("[3] Halt"); println("[4] Back generation"); println("[5] Automatico"); print("\\n \\n Option: "); option = parseOption(readLine) }while(option == 0) option match { case MAKE_CELL_ALIVE => makeCellAlive case NEXT_GENERATION => nextGeneration case HALT => halt case BACK_GENERATION => backGeneration case AUTOMATICO => automatico } }*/ /*private def makeCellAlive { var i = 0 var j = 0 do { print("\\n Inform the row number (0 - " + (GameEngine.height - 1) + "): ") i = parseRowandColumn(readLine).getOrElse(-1) print("\\n Inform the column number (0 - " + (GameEngine.width - 1) + "): ") j = parseRowandColumn(readLine).getOrElse(-1) } while(!validPosition(i,j)) GameController.makeCellAlive(i, j) }*/ /*private def nextGeneration = GameController.nextGeneration private def backGeneration = GameController.backGeneration private def automatico = GameController.automatico private def halt = GameController.halt*/ private def validPosition(i: Int, j: Int): Boolean = { i >= 0 && i < GameEngine.height && j >= 0 && j < GameEngine.width } /*def parseOption(option: String): Int = option match { case "1" => MAKE_CELL_ALIVE case "2" => NEXT_GENERATION case "3" => HALT case "4" => BACK_GENERATION case "5" => AUTOMATICO case _ => INVALID_OPTION }*/ /* Imprime uma linha usada como separador das linhas do tabuleiro */ /*private def printLine() { for(j <- (0 until GameEngine.width)) { print(LINE) } println() }*/ /* * Imprime os identificadores das colunas na primeira linha do tabuleiro */ /*private def printFirstRow { println("\\n \\n"); for(j <- (0 until GameEngine.width)) { print(" " + j + " ") } println() }*/ def parseRowandColumn(x: String): Option[Int] = { try { Some(x.toInt) } catch { case e: Exception => None } } def showRules(rules: List[Regra]) { var i = 1 println("Selecione o modo de jogo:") rules.foreach( a => {println("["+i+"] " + a.nome); i+=1}) val valor = parseRowandColumn(readLine).getOrElse(0) if(valor > 0 && valor <= rules.length) GameController.setRule(rules(valor-1)) else {println("Opção inválida"); showRules(rules)} } }
labm1997/Gol
base/src/main/scala/br/unb/cic/poo/gol/base/GameView.scala
Scala
gpl-3.0
6,952
package org.refptr.iscala package object msg { type Metadata = Map[String, String] val Metadata = Map }
nkhuyu/IScala
src/main/scala/Types.scala
Scala
mit
113
package permute import scala.Predef.intWrapper import scala.Predef.augmentString import scala.{Int, Boolean, Unit} import java.lang.String object PermuteBenchmark extends communitybench.Benchmark { val inputOutput: (String, String) = ("6", "720") def run(input: String): Int = { val size = input.toInt val permIter = (0 until size).toList.permutations var count = 0 while (permIter.hasNext) { permIter.next() count += 1 } count } private def factorial(i: Int): Int = { var n = i var fact = 1 while (n > 0) { fact *= n n -= 1 } fact } }
sjrd/scalajs-benchmarks
permute/src/main/scala/permute/PermuteBenchmark.scala
Scala
bsd-3-clause
626
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.controller import kafka.admin.AdminUtils import kafka.api.LeaderAndIsr import kafka.log.LogConfig import kafka.utils.Logging import kafka.common.{LeaderElectionNotNeededException, TopicAndPartition, StateChangeFailedException, NoReplicaOnlineException} import kafka.server.{ConfigType, KafkaConfig} trait PartitionLeaderSelector { /** * @param topicAndPartition The topic and partition whose leader needs to be elected * @param currentLeaderAndIsr The current leader and isr of input partition read from zookeeper * @throws NoReplicaOnlineException If no replica in the assigned replicas list is alive * @return The leader and isr request, with the newly selected leader and isr, and the set of replicas to receive * the LeaderAndIsrRequest. */ def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) } /** * Select the new leader, new isr and receiving replicas (for the LeaderAndIsrRequest): * 1. If at least one broker from the isr is alive, it picks a broker from the live isr as the new leader and the live * isr as the new isr. * 2. Else, if unclean leader election for the topic is disabled, it throws a NoReplicaOnlineException. * 3. Else, it picks some alive broker from the assigned replica list as the new leader and the new isr. * 4. If no broker in the assigned replica list is alive, it throws a NoReplicaOnlineException * Replicas to receive LeaderAndIsr request = live assigned replicas * Once the leader is successfully registered in zookeeper, it updates the allLeaders cache */ class OfflinePartitionLeaderSelector(controllerContext: ControllerContext, config: KafkaConfig) extends PartitionLeaderSelector with Logging { this.logIdent = "[OfflinePartitionLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { controllerContext.partitionReplicaAssignment.get(topicAndPartition) match { case Some(assignedReplicas) => val liveAssignedReplicas = assignedReplicas.filter(r => controllerContext.liveBrokerIds.contains(r)) val liveBrokersInIsr = currentLeaderAndIsr.isr.filter(r => controllerContext.liveBrokerIds.contains(r)) val currentLeaderEpoch = currentLeaderAndIsr.leaderEpoch val currentLeaderIsrZkPathVersion = currentLeaderAndIsr.zkVersion val newLeaderAndIsr = if (liveBrokersInIsr.isEmpty) { // Prior to electing an unclean (i.e. non-ISR) leader, ensure that doing so is not disallowed by the configuration // for unclean leader election. if (!LogConfig.fromProps(config.originals, AdminUtils.fetchEntityConfig(controllerContext.zkUtils, ConfigType.Topic, topicAndPartition.topic)).uncleanLeaderElectionEnable) { throw new NoReplicaOnlineException(("No broker in ISR for partition " + "%s is alive. Live brokers are: [%s],".format(topicAndPartition, controllerContext.liveBrokerIds)) + " ISR brokers are: [%s]".format(currentLeaderAndIsr.isr.mkString(","))) } debug("No broker in ISR is alive for %s. Pick the leader from the alive assigned replicas: %s" .format(topicAndPartition, liveAssignedReplicas.mkString(","))) if (liveAssignedReplicas.isEmpty) { throw new NoReplicaOnlineException(("No replica for partition " + "%s is alive. Live brokers are: [%s],".format(topicAndPartition, controllerContext.liveBrokerIds)) + " Assigned replicas are: [%s]".format(assignedReplicas)) } else { ControllerStats.uncleanLeaderElectionRate.mark() val newLeader = liveAssignedReplicas.head warn("No broker in ISR is alive for %s. Elect leader %d from live brokers %s. There's potential data loss." .format(topicAndPartition, newLeader, liveAssignedReplicas.mkString(","))) new LeaderAndIsr(newLeader, currentLeaderEpoch + 1, List(newLeader), currentLeaderIsrZkPathVersion + 1) } } else { val liveReplicasInIsr = liveAssignedReplicas.filter(r => liveBrokersInIsr.contains(r)) val newLeader = liveReplicasInIsr.head debug("Some broker in ISR is alive for %s. Select %d from ISR %s to be the leader." .format(topicAndPartition, newLeader, liveBrokersInIsr.mkString(","))) new LeaderAndIsr(newLeader, currentLeaderEpoch + 1, liveBrokersInIsr.toList, currentLeaderIsrZkPathVersion + 1) } info("Selected new leader and ISR %s for offline partition %s".format(newLeaderAndIsr.toString(), topicAndPartition)) (newLeaderAndIsr, liveAssignedReplicas) case None => throw new NoReplicaOnlineException("Partition %s doesn't have replicas assigned to it".format(topicAndPartition)) } } } /** * New leader = a live in-sync reassigned replica * New isr = current isr * Replicas to receive LeaderAndIsr request = reassigned replicas */ class ReassignedPartitionLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { this.logIdent = "[ReassignedPartitionLeaderSelector]: " /** * The reassigned replicas are already in the ISR when selectLeader is called. */ def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val reassignedInSyncReplicas = controllerContext.partitionsBeingReassigned(topicAndPartition).newReplicas val currentLeaderEpoch = currentLeaderAndIsr.leaderEpoch val currentLeaderIsrZkPathVersion = currentLeaderAndIsr.zkVersion val aliveReassignedInSyncReplicas = reassignedInSyncReplicas.filter(r => controllerContext.liveBrokerIds.contains(r) && currentLeaderAndIsr.isr.contains(r)) val newLeaderOpt = aliveReassignedInSyncReplicas.headOption newLeaderOpt match { case Some(newLeader) => (new LeaderAndIsr(newLeader, currentLeaderEpoch + 1, currentLeaderAndIsr.isr, currentLeaderIsrZkPathVersion + 1), reassignedInSyncReplicas) case None => reassignedInSyncReplicas.size match { case 0 => throw new NoReplicaOnlineException("List of reassigned replicas for partition " + " %s is empty. Current leader and ISR: [%s]".format(topicAndPartition, currentLeaderAndIsr)) case _ => throw new NoReplicaOnlineException("None of the reassigned replicas for partition " + "%s are in-sync with the leader. Current leader and ISR: [%s]".format(topicAndPartition, currentLeaderAndIsr)) } } } } /** * New leader = preferred (first assigned) replica (if in isr and alive); * New isr = current isr; * Replicas to receive LeaderAndIsr request = assigned replicas */ class PreferredReplicaPartitionLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { this.logIdent = "[PreferredReplicaPartitionLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val assignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) val preferredReplica = assignedReplicas.head // check if preferred replica is the current leader val currentLeader = controllerContext.partitionLeadershipInfo(topicAndPartition).leaderAndIsr.leader if (currentLeader == preferredReplica) { throw new LeaderElectionNotNeededException("Preferred replica %d is already the current leader for partition %s" .format(preferredReplica, topicAndPartition)) } else { info("Current leader %d for partition %s is not the preferred replica.".format(currentLeader, topicAndPartition) + " Trigerring preferred replica leader election") // check if preferred replica is not the current leader and is alive and in the isr if (controllerContext.liveBrokerIds.contains(preferredReplica) && currentLeaderAndIsr.isr.contains(preferredReplica)) { (new LeaderAndIsr(preferredReplica, currentLeaderAndIsr.leaderEpoch + 1, currentLeaderAndIsr.isr, currentLeaderAndIsr.zkVersion + 1), assignedReplicas) } else { throw new StateChangeFailedException("Preferred replica %d for partition ".format(preferredReplica) + "%s is either not alive or not in the isr. Current leader and ISR: [%s]".format(topicAndPartition, currentLeaderAndIsr)) } } } } /** * New leader = replica in isr that's not being shutdown; * New isr = current isr - shutdown replica; * Replicas to receive LeaderAndIsr request = live assigned replicas */ class ControlledShutdownLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { this.logIdent = "[ControlledShutdownLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { val currentLeaderEpoch = currentLeaderAndIsr.leaderEpoch val currentLeaderIsrZkPathVersion = currentLeaderAndIsr.zkVersion val currentLeader = currentLeaderAndIsr.leader val assignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition) val liveOrShuttingDownBrokerIds = controllerContext.liveOrShuttingDownBrokerIds val liveAssignedReplicas = assignedReplicas.filter(r => liveOrShuttingDownBrokerIds.contains(r)) val newIsr = currentLeaderAndIsr.isr.filter(brokerId => !controllerContext.shuttingDownBrokerIds.contains(brokerId)) liveAssignedReplicas.find(newIsr.contains) match { case Some(newLeader) => debug("Partition %s : current leader = %d, new leader = %d".format(topicAndPartition, currentLeader, newLeader)) (LeaderAndIsr(newLeader, currentLeaderEpoch + 1, newIsr, currentLeaderIsrZkPathVersion + 1), liveAssignedReplicas) case None => throw new StateChangeFailedException(("No other replicas in ISR %s for %s besides" + " shutting down brokers %s").format(currentLeaderAndIsr.isr.mkString(","), topicAndPartition, controllerContext.shuttingDownBrokerIds.mkString(","))) } } } /** * Essentially does nothing. Returns the current leader and ISR, and the current * set of replicas assigned to a given topic/partition. */ class NoOpLeaderSelector(controllerContext: ControllerContext) extends PartitionLeaderSelector with Logging { this.logIdent = "[NoOpLeaderSelector]: " def selectLeader(topicAndPartition: TopicAndPartition, currentLeaderAndIsr: LeaderAndIsr): (LeaderAndIsr, Seq[Int]) = { warn("I should never have been asked to perform leader election, returning the current LeaderAndIsr and replica assignment.") (currentLeaderAndIsr, controllerContext.partitionReplicaAssignment(topicAndPartition)) } }
flange/drift-dev
kafka/00-kafka_2.11-0.10.1.0/libs/tmp/kafka/controller/PartitionLeaderSelector.scala
Scala
apache-2.0
11,876
package fpinscala.errorhandling import scala.{Option => _, Some => _, Either => _, _} // hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter sealed trait Option[+A] { def map[B](f: A => B): Option[B] = this match { case Some(a) => Some(f(a)) case none => None } def flatMap[B](f: A => Option[B]): Option[B] = map(f).getOrElse(None) def getOrElse[B >: A](default: => B): B = this match { case Some(a) => a case None => default } def orElse[B >: A](ob: => Option[B]): Option[B] = map(x => Some(x)).getOrElse(ob) def filter(f: A => Boolean): Option[A] = flatMap(x => if (f(x)) Some(x) else None) } case class Some[+A](get: A) extends Option[A] case object None extends Option[Nothing] object Option { def apply[A](a: => A): Option[A] = try Some(a) catch { case e: Exception => None } def failingFn(i: Int): Int = { val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`. try { val x = 42 + 5 x + y } catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43. } def failingFn2(i: Int): Int = { try { val x = 42 + 5 x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int` } catch { case e: Exception => 43 } } def mean(xs: Seq[Double]): Option[Double] = if (xs.isEmpty) None else Some(xs.sum / xs.length) import java.util.regex._ def pattern(s: String): Option[Pattern] = try { Some(Pattern.compile(s)) } catch { case e: PatternSyntaxException => None } def mkMatcher(pat: String): Option[String => Boolean] = pattern(pat) map (p => (s: String) => p.matcher(s).matches) // The details of this API don't matter too much, but `p.matcher(s).matches` will check if the string `s` matches the pattern `p`. def mkMatcher_1(pat: String): Option[String => Boolean] = for { p <- pattern(pat) } yield ((s: String) => p.matcher(s).matches) def doesMatch(pat: String, s: String): Option[Boolean] = for { p <- mkMatcher_1(pat) } yield p(s) def bothMatch(pat: String, pat2: String, s: String): Option[Boolean] = for { f <- mkMatcher(pat) g <- mkMatcher(pat2) } yield f(s) && g(s) def bothMatch_1(pat: String, pat2: String, s: String): Option[Boolean] = mkMatcher(pat) flatMap (f => mkMatcher(pat2) map (g => f(s) && g(s))) def variance(xs: Seq[Double]): Option[Double] = for {meanOfxSquared <- mean(xs.map(x => x*x)) meanOfX <- mean(xs)} yield meanOfxSquared - (meanOfX * meanOfX) def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] = a.flatMap { a_ => b.map { b_ => f(a_,b_)}} def bothMatch_2(pat1: String, pat2: String, s: String): Option[Boolean] = sys.error("todo") def sequence[A](a: List[Option[A]]): Option[List[A]] = a.foldRight(Some(List.empty[A]): Option[List[A]]) { (opt, acc) => opt.flatMap( x => acc.map(x :: _)) } def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] = a.foldRight(Some(List.empty[B]) : Option[List[B]]) { (item, acc) => acc.flatMap( ls => f(item).map(x => x +: ls)) } }
HolyHaddock/fpinscala
exercises/src/main/scala/fpinscala/errorhandling/Option.scala
Scala
mit
3,585
package org.jetbrains.plugins.scala package lang package psi package stubs import api.base.ScFieldId import com.intellij.psi.stubs.{NamedStub} /** * User: Alexander Podkhalyuzin * Date: 19.07.2009 */ trait ScFieldIdStub extends NamedStub[ScFieldId] { }
consulo/consulo-scala
src/org/jetbrains/plugins/scala/lang/psi/stubs/ScFieldIdStub.scala
Scala
apache-2.0
259
package es.weso.shex import es.weso.shex.ShapeSyntax._ import es.weso.rdfgraph.nodes._ import es.weso.rdfgraph.statements._ import es.weso.rdfgraph._ import org.scalatest._ import org.scalatest.prop.PropertyChecks import org.scalatest.prop.Checkers import es.weso.shex.Typing._ import es.weso.shex.Context._ import es.weso.rdf._ class ShapeValidatorRule extends FunSpec with ShapeValidatorBacktracking with Matchers with Checkers { describe("Shape Validator Rule") { /* it("Should validate optional with empty") { val strShape = "prefix : <http://example.org/>\\n" + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>\\n" + ":Item { :a xsd:integer ? } " val strRDF = "prefix : <http://example.org/>\\n" val schema = Schema.fromString(strShape).get._1 info("Schema: " + schema) val rdf = RDFTriples.parse(strRDF).get val matcher = Matcher(schema,rdf,false,false) val result = matcher.matchIRI_AllLabels(IRI("http://example.org/a")) info("Result:\\n" + result.toList.toString) result.isValid should be(true) } it("Should not validate one or more with empty") { val strShape = "prefix : <http://example.org/>\\n" + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>\\n" + ":Item { :a xsd:integer + } " val strRDF = "prefix : <http://example.org/>\\n" val schema = Schema.fromString(strShape).get._1 info("Schema: " + schema) val rdf = RDFTriples.parse(strRDF).get val matcher = Matcher(schema,rdf,false,false) val result = matcher.matchAllIRIs_AllLabels() info("Result:\\n" + result.toList.toString) result.isValid should be(false) } */ it("Should not validate range 2 4 with 4") { val strShape = "prefix : <http://example.org/>\\n" + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>\\n" + ":Item [ :a xsd:integer {2,4} ] " val strRDF = "prefix : <http://example.org/>\\n" + ":item :a 1, 2, 3, 4 ." val schema = Schema.fromString(strShape).get._1 info("Schema: " + schema) val rdf = RDFTriples.parse(strRDF).get val matcher = Matcher(schema, rdf, false, false) val result = matcher.matchAllIRIs_AllLabels() info("Result:\\n" + result.toList.toString) result.isValid should be(true) } it("Should validate range 2 4 with 2") { val strShape = "prefix : <http://example.org/>\\n" + "prefix xsd: <http://www.w3.org/2001/XMLSchema#>\\n" + ":Item [ :a xsd:integer {2,4} ] " val strRDF = "prefix : <http://example.org/>\\n" + ":item :a 1, 2 ." val schema = Schema.fromString(strShape).get._1 info("Schema: " + schema) val rdf = RDFTriples.parse(strRDF).get val matcher = Matcher(schema, rdf, false, false) val result = matcher.matchAllIRIs_AllLabels() info("Result:\\n" + result.toList.toString) result.isValid should be(true) } } }
jorgeyp/ShExcala
src/test/scala/es/weso/shex/ShapeValidatorRule.scala
Scala
mit
2,982
package chandu0101.scalajs.react.components package materialui import chandu0101.macros.tojs.JSMacro import japgolly.scalajs.react._ import scala.scalajs.js import scala.scalajs.js.`|` /** * This file is generated - submit issues instead of PR against it */ case class MuiGridList( key: js.UndefOr[String] = js.undefined, ref: js.UndefOr[String] = js.undefined, /* Number of px for one cell height.*/ cellHeight: js.UndefOr[Int] = js.undefined, /* Number of columns.*/ cols: js.UndefOr[Int] = js.undefined, /* Number of px for the padding/spacing between items.*/ padding: js.UndefOr[Int] = js.undefined, /* Override the inline-styles of the root element.*/ style: js.UndefOr[CssProperties] = js.undefined){ /** * @param children Grid Tiles that will be in Grid List. */ def apply(children: ReactNode*) = { val props = JSMacro[MuiGridList](this) val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.GridList) if (children.isEmpty) f(props).asInstanceOf[ReactComponentU_] else if (children.size == 1) f(props, children.head).asInstanceOf[ReactComponentU_] else f(props, children.toJsArray).asInstanceOf[ReactComponentU_] } }
elacin/scalajs-react-components
core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiGridList.scala
Scala
apache-2.0
1,279
/** * Copyright (C) 2014 TU Berlin (peel@dima.tu-berlin.de) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.peelframework.hadoop.beans.system import java.util.regex.Pattern import com.samskivert.mustache.Mustache import org.peelframework.core.beans.system.Lifespan.Lifespan import org.peelframework.core.beans.system.{LogCollection, SetUpTimeoutException, System} import org.peelframework.core.config.{Model, SystemConfig} import org.peelframework.core.util.shell import scala.collection.JavaConverters._ import scala.util.matching.Regex /** Wrapper class for Yarn. * * Implements Yarn as a Peel `System` and provides setup and teardown methods. * * @param version Version of the system (e.g. "7.1") * @param configKey The system configuration resides under `system.\\${configKey}` * @param lifespan `Lifespan` of the system * @param dependencies Set of dependencies that this system needs * @param mc The moustache compiler to compile the templates that are used to generate property files for the system */ class Yarn( version : String, configKey : String, lifespan : Lifespan, dependencies : Set[System] = Set(), mc : Mustache.Compiler) extends System("yarn", version, configKey, lifespan, dependencies, mc) with LogCollection { // --------------------------------------------------- // LogCollection. // --------------------------------------------------- /** The patterns of the log files to watch. */ override protected def logFilePatterns(): Seq[Regex] = { // TODO: rework based on http://hortonworks.com/blog/simplifying-user-logs-management-and-access-in-yarn/ val user = Pattern.quote(config.getString(s"system.$configKey.user")) config.getStringList(s"system.$configKey.config.slaves").asScala.map(Pattern.quote).map(slave => s"yarn-$user-resourcemanager-$slave\\\\.log".r) } // --------------------------------------------------- // System. // --------------------------------------------------- override def configuration() = SystemConfig(config, { val conf = config.getString(s"system.$configKey.path.config") List( SystemConfig.Entry[Model.Hosts](s"system.$configKey.config.slaves", s"$conf/slaves", templatePath("conf/hosts"), mc), SystemConfig.Entry[Model.Env](s"system.$configKey.config.env", s"$conf/hadoop-env.sh", templatePath("conf/hadoop-env.sh"), mc), SystemConfig.Entry[Model.Site](s"system.$configKey.config.core", s"$conf/core-site.xml", templatePath("conf/site.xml"), mc), SystemConfig.Entry[Model.Site](s"system.$configKey.config.yarn", s"$conf/yarn-site.xml", templatePath("conf/site.xml"), mc), SystemConfig.Entry[Model.Site](s"system.$configKey.config.mapred", s"$conf/mapred-site.xml", templatePath("conf/site.xml"), mc) ) }) override def start(): Unit = { val user = config.getString(s"system.$configKey.user") val logDir = config.getString(s"system.$configKey.path.log") var failedStartUpAttempts = 0 while(!isUp) { try { val total = config.getStringList(s"system.$configKey.config.slaves").size() // yarn does not reset the resourcemanagers log at startup val init = Integer.parseInt((shell !! s"""cat $logDir/yarn-$user-resourcemanager-*.log | grep 'registered with capability:' | wc -l""").trim()) shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/start-yarn.sh" logger.info(s"Waiting for nodes to connect") var curr = init var cntr = config.getInt(s"system.$configKey.startup.polling.counter") while (curr - init < total) { logger.info(s"Connected ${curr - init} from $total nodes") // wait a bit Thread.sleep(config.getInt(s"system.$configKey.startup.polling.interval")) // get new values curr = Integer.parseInt((shell !! s"""cat $logDir/yarn-$user-resourcemanager-*.log | grep 'registered with capability:' | wc -l""").trim()) // timeout if counter goes below zero cntr = cntr - 1 if (cntr < 0) throw new SetUpTimeoutException(s"Cannot start system '$toString'; node connection timeout at system ") } logger.info(s"Connected ${curr - init} from $total nodes") isUp = true } catch { case e: SetUpTimeoutException => failedStartUpAttempts = failedStartUpAttempts + 1 if (failedStartUpAttempts < config.getInt(s"system.$configKey.startup.max.attempts")) { stop() logger.info(s"Could not bring system '$toString' up in time, trying again...") } else { throw e } } } } override def stop(): Unit = { shell ! s"${config.getString(s"system.$configKey.path.home")}/sbin/stop-yarn.sh" isUp = false } def isRunning = { (shell ! s""" ps -p `cat ${config.getString(s"system.$configKey.config.env.YARN_PID_DIR")}/yarn-*-resourcemanager.pid` """) == 0 || (shell ! s""" ps -p `cat ${config.getString(s"system.$configKey.config.env.YARN_PID_DIR")}/yarn-*-nodemanager.pid` """) == 0 } }
peelframework/peel
peel-extensions/src/main/scala/org/peelframework/hadoop/beans/system/Yarn.scala
Scala
apache-2.0
5,659
package org.jetbrains.plugins.scala package codeInspection.collections import java.awt.{Component, GridLayout} import java.util import javax.swing._ import javax.swing.border import javax.swing.event.{ChangeEvent, ChangeListener} import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder} import com.intellij.openapi.ui.{InputValidator, Messages} import com.intellij.openapi.wm.IdeFocusManager import com.intellij.psi.PsiElement import com.intellij.ui._ import org.jetbrains.plugins.scala.codeInspection.collections.OperationOnCollectionInspectionBase._ import org.jetbrains.plugins.scala.codeInspection.{AbstractInspection, InspectionBundle} import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.settings.{ScalaApplicationSettings, ScalaProjectSettingsUtil} import org.jetbrains.plugins.scala.util.JListCompatibility /** * Nikolay.Tropin * 5/17/13 */ object OperationOnCollectionInspectionBase { val inspectionId = InspectionBundle.message("operation.on.collection.id") val inspectionName = InspectionBundle.message("operation.on.collection.name") val likeOptionClassesDefault = Array("scala.Option", "scala.Some", "scala.None") val likeCollectionClassesDefault = Array("scala.collection._", "scala.Array", "scala.Option", "scala.Some", "scala.None") private val likeOptionKey = "operation.on.collection.like.option" private val likeCollectionKey = "operation.on.collection.like.collection" private val inputMessages = Map( likeCollectionKey -> InspectionBundle.message("operation.on.collection.like.collection.input.message"), likeOptionKey -> InspectionBundle.message("operation.on.collection.like.option.input.message") ) private val inputTitles = Map( likeCollectionKey -> InspectionBundle.message("operation.on.collection.like.collection.input.title"), likeOptionKey -> InspectionBundle.message("operation.on.collection.like.option.input.title") ) private val panelTitles = Map( likeCollectionKey -> InspectionBundle.message("operation.on.collection.like.collection.panel.title"), likeOptionKey -> InspectionBundle.message("operation.on.collection.like.option.panel.title") ) } abstract class OperationOnCollectionInspectionBase extends AbstractInspection(inspectionId, inspectionName){ private val settings = ScalaApplicationSettings.getInstance() def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = { case expr: ScExpression => for (s <- simplifications(expr)) { holder.registerProblem(s.exprToReplace.getElement, s.hint, highlightType, s.rangeInParent, new OperationOnCollectionQuickFix(expr, s)) } } def highlightType: ProblemHighlightType = ProblemHighlightType.GENERIC_ERROR_OR_WARNING private def simplifications(expr: ScExpression): Array[Simplification] = { def simplificationTypes = for { (st, idx) <- possibleSimplificationTypes.zipWithIndex if getSimplificationTypesEnabled(idx) } yield st simplificationTypes.flatMap(st => st.getSimplifications(expr) ++ st.getSimplification(expr)) } def getLikeCollectionClasses: Array[String] = settings.getLikeCollectionClasses def getLikeOptionClasses: Array[String] = settings.getLikeOptionClasses def setLikeCollectionClasses(values: Array[String]): Unit = settings.setLikeCollectionClasses(values) def setLikeOptionClasses(values: Array[String]): Unit = settings.setLikeOptionClasses(values) def possibleSimplificationTypes: Array[SimplificationType] def getSimplificationTypesEnabled: Array[java.lang.Boolean] def setSimplificationTypesEnabled(values: Array[java.lang.Boolean]) private val patternLists = Map( likeCollectionKey -> getLikeCollectionClasses _, likeOptionKey -> getLikeOptionClasses _ ) private val setPatternLists = { Map( likeCollectionKey -> setLikeCollectionClasses _, likeOptionKey -> setLikeOptionClasses _ ) } override def createOptionsPanel: JComponent = { def checkBoxesPanel(): JComponent = { val innerPanel = new JPanel() innerPanel.setLayout(new BoxLayout(innerPanel, BoxLayout.Y_AXIS)) for (i <- possibleSimplificationTypes.indices) { val enabled: Array[java.lang.Boolean] = getSimplificationTypesEnabled val checkBox = new JCheckBox(possibleSimplificationTypes(i).description, enabled(i)) checkBox.getModel.addChangeListener(new ChangeListener { def stateChanged(e: ChangeEvent) { enabled(i) = checkBox.isSelected setSimplificationTypesEnabled(enabled) } }) innerPanel.add(checkBox) } val extPanel = new JPanel() extPanel.setLayout(new BoxLayout(extPanel, BoxLayout.X_AXIS)) extPanel.add(innerPanel) extPanel.add(Box.createHorizontalGlue()) extPanel } def createPatternListPanel(parent: JComponent, patternListKey: String): JComponent = { val patternList: Array[String] = patternLists(patternListKey)() val listModel = JListCompatibility.createDefaultListModel() patternList.foreach(JListCompatibility.add(listModel, listModel.size, _)) val patternJBList = JListCompatibility.createJBListFromModel(listModel) def resetValues() { val newArray = listModel.toArray collect {case s: String => s} setPatternLists(patternListKey)(newArray) } val panel = ToolbarDecorator.createDecorator(patternJBList).setAddAction(new AnActionButtonRunnable { def addPattern(pattern: String) { if (pattern == null) return val index: Int = - util.Arrays.binarySearch (listModel.toArray, pattern) - 1 if (index < 0) return JListCompatibility.add(listModel, index, pattern) resetValues() patternJBList.setSelectedValue (pattern, true) ScrollingUtil.ensureIndexIsVisible(patternJBList, index, 0) IdeFocusManager.getGlobalInstance.requestFocus(patternJBList, false) } def run(button: AnActionButton) { val validator: InputValidator = ScalaProjectSettingsUtil.getPatternValidator val inputMessage = inputMessages(patternListKey) val inputTitle = inputTitles(patternListKey) val newPattern: String = Messages.showInputDialog(parent, inputMessage, inputTitle, Messages.getWarningIcon, "", validator) addPattern(newPattern) } }).setRemoveAction(new AnActionButtonRunnable { def run(t: AnActionButton) { patternJBList.getSelectedIndices.foreach(listModel.removeElementAt) resetValues() } }).disableUpDownActions.createPanel val title = panelTitles(patternListKey) val border = BorderFactory.createTitledBorder(title) panel.setBorder(border) panel } def patternsPanel(): JComponent = { val panel = new JPanel(new GridLayout(1,2)) val likeCollectionPanel = createPatternListPanel(panel, likeCollectionKey) val likeOptionPanel = createPatternListPanel(panel, likeOptionKey) panel.add(likeCollectionPanel) panel.add(likeOptionPanel) panel } val panel = new JPanel() panel.setLayout(new BoxLayout(panel, BoxLayout.Y_AXIS)) if (possibleSimplificationTypes.length > 1) { val chbPanel = checkBoxesPanel() chbPanel.setAlignmentX(Component.LEFT_ALIGNMENT) panel.add(checkBoxesPanel()) } panel.add(Box.createVerticalGlue()) panel.add(patternsPanel()) panel } }
ilinum/intellij-scala
src/org/jetbrains/plugins/scala/codeInspection/collections/OperationOnCollectionInspectionBase.scala
Scala
apache-2.0
7,508
package org.bitcoins.core.script.stack import org.bitcoins.core.script.{ ExecutedScriptProgram, ScriptProgram } import org.bitcoins.core.script.bitwise.OP_EQUAL import org.bitcoins.core.script.constant._ import org.bitcoins.core.util.{ BitcoinSUtil, ScriptProgramTestUtil, TestUtil } import org.scalatest.{ FlatSpec, MustMatchers } import org.bitcoins.core.script.result._ /** * Created by chris on 1/6/16. */ class StackInterpreterTest extends FlatSpec with MustMatchers { val element1 = ScriptConstant("1234") val element2 = ScriptConstant("abcd") val stack = List(element1, element2) val SI = StackInterpreter "StackInterpreter" must "duplicate elements on top of the stack" in { val script = List(OP_DUP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opDup(program) newProgram.stack.head must be(element1) newProgram.stack(1) must be(element1) newProgram.stack(2) must be(element2) } it must "throw an exception when calling opDup without an OP_DUP on top of the script stack" in { intercept[IllegalArgumentException] { val script = List() val program = ScriptProgram(TestUtil.testProgram, stack, script) SI.opDup(program) } } it must "mark the script invalid when calling opDup without an element on top of the stack" in { val stack = List() val script = List(OP_DUP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) ScriptProgramTestUtil.toExecutedScriptProgram(SI.opDup(program)).error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate the OP_DEPTH operator correctly" in { val script = List(OP_DEPTH) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opDepth(program) newProgram.stack.head.hex must be(BitcoinSUtil.encodeHex(stack.size.toByte)) } it must "evaluate OP_DEPTH operator correctly when there are zero items on the stack" in { val stack = List() val script = List(OP_DEPTH) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opDepth(program) newProgram.stack.head must be(ScriptNumber.zero) } it must "evaluate an OP_TOALTSTACK operator correctly" in { val stack = List(OP_0) val script = List(OP_TOALTSTACK) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opToAltStack(program) newProgram.stack.isEmpty must be(true) newProgram.script.isEmpty must be(true) newProgram.altStack must be(List(OP_0)) } it must "evaluate an OP_DROP operator correctly" in { val stack = List(OP_0) val script = List(OP_DROP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opDrop(program) newProgram.stack.isEmpty must be(true) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid for an OP_DROP if we do not have a stack element" in { val stack = List() val script = List(OP_DROP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opDrop(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_IFDUP correctly" in { val stack = List(ScriptNumber.zero) val script = List(OP_IFDUP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opIfDup(program) newProgram.stack must be(stack) newProgram.script.isEmpty must be(true) val stack1 = List(OP_1) val program1 = ScriptProgram(TestUtil.testProgram, stack1, script) val newProgram1 = SI.opIfDup(program1) newProgram1.stack must be(List(OP_1, OP_1)) newProgram1.script.isEmpty must be(true) } it must "evaluate an OP_NIP correctly" in { val stack = List(OP_0, OP_1) val script = List(OP_NIP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opNip(program) newProgram.stack must be(List(OP_0)) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid if there is less than 2 elements on the stack for OP_NIP" in { val stack = List(OP_0) val script = List(OP_NIP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opNip(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "throw an exception if there is no elements on the stack for OP_NIP" in { val stack = List() val script = List(OP_NIP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opNip(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_OVER correctly" in { val stack = List(OP_0, OP_1) val script = List(OP_OVER) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opOver(program) newProgram.stack must be(List(OP_1, OP_0, OP_1)) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid if there is less than 2 elements on the stack for OP_OVER" in { val stack = List(OP_0) val script = List(OP_OVER) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opOver(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "mark the script as invalid if there is no elements on the stack for OP_OVER" in { val stack = List() val script = List(OP_OVER) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opOver(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_PICK correctly" in { val stack = List(ScriptNumber.zero, ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16")) val script = List(OP_PICK) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opPick(program) newProgram.stack must be(List(ScriptConstant("14"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"))) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid for OP_PICK if we do not have enough items on the stack for the first number" in { val stack = List(ScriptNumber.one) val script = List(OP_PICK) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opPick(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_ROLL correctly" in { val stack = List(ScriptNumber.zero, ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16")) val script = List(OP_ROLL) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opRoll(program) newProgram.stack must be(List( ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"))) newProgram.script.isEmpty must be(true) } it must "mark an OP_ROLL as invalid if we do not have the enough stack elements indicated by the stack top" in { val stack = List(ScriptNumber.one) val script = List(OP_ROLL) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opRoll(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "mark an OP_ROLL as invalid if the script number is not minimall encoded" in { val stack = List(ScriptNumber("0100")) val script = List(OP_ROLL) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opRoll(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_ROT correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16")) val script = List(OP_ROT) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opRot(program) newProgram.stack must be(List(ScriptConstant("16"), ScriptConstant("14"), ScriptConstant("15"))) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid if there is less than 3 elements on the stack for OP_ROT" in { val stack = List(ScriptConstant("14"), ScriptConstant("15")) val script = List(OP_ROT) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opRot(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_2ROT correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_2ROT) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op2Rot(program) newProgram.stack must be(List(ScriptConstant("18"), ScriptConstant("19"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"))) newProgram.script.isEmpty must be(true) } it must "mark a scirpt as invalid if there is less than 6 elements on the stack for OP_2ROT" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18")) val script = List(OP_2ROT) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.op2Rot(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_2DROP correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_2DROP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op2Drop(program) newProgram.stack must be(List( ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) } it must "mark a script invalid if an OP_2DROP script does not have two stack items" in { val stack = List(ScriptConstant("14")) val script = List(OP_2DROP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.op2Drop(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_SWAP correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_SWAP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opSwap(program) newProgram.stack must be(List(ScriptConstant("15"), ScriptConstant("14"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark a script invalid if an OP_SWAP script does not have two stack items" in { val stack = List(ScriptConstant("14")) val script = List(OP_SWAP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.opSwap(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_TUCK correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_TUCK) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.opTuck(program) newProgram.stack must be(List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("14"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark a script as invalid if there is less than 2 elements on the stack for OP_TUCK" in { val stack = List(OP_0) val script = List(OP_TUCK) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.opTuck(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_2DUP correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_2DUP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op2Dup(program) newProgram.stack must be(List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark a script as invalid if there is less than 2 elements on the stack for OP_2DUP" in { val stack = List(OP_0) val script = List(OP_2DUP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.op2Dup(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_3DUP correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_3DUP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op3Dup(program) newProgram.stack must be(List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark the script as invalid if OP_3DUP does not have three stack elements" in { val stack = List(ScriptConstant("14"), ScriptConstant("15")) val script = List(OP_3DUP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = SI.op3Dup(program) newProgram.isInstanceOf[ExecutedScriptProgram] must be(true) newProgram.asInstanceOf[ExecutedScriptProgram].error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_2OVER correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_2OVER) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op2Over(program) newProgram.stack must be(List( ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark a script as invalid if there is less than 4 elements on the stack for OP_2OVER" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16")) val script = List(OP_2OVER) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.op2Over(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "evaluate an OP_2SWAP correctly" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("18"), ScriptConstant("19")) val script = List(OP_2SWAP) val program = ScriptProgram(TestUtil.testProgram, stack, script) val newProgram = SI.op2Swap(program) newProgram.stack must be(List(ScriptConstant("16"), ScriptConstant("17"), ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("18"), ScriptConstant("19"))) newProgram.script.isEmpty must be(true) } it must "mark a script as invalid if there is less than 4 elements on the stack for OP_2SWAP" in { val stack = List(ScriptConstant("14"), ScriptConstant("15"), ScriptConstant("16")) val script = List(OP_2SWAP) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val newProgram = ScriptProgramTestUtil.toExecutedScriptProgram(SI.op2Swap(program)) newProgram.error must be(Some(ScriptErrorInvalidStackOperation)) } it must "move an element from the alt stack to the main stack" in { val stack = List() val script = List(OP_FROMALTSTACK) val altStack = List(OP_0) val program = ScriptProgram(TestUtil.testProgramExecutionInProgress, stack, script) val programWithAltStack = ScriptProgram(program, altStack, ScriptProgram.AltStack) val executedProgram = SI.opFromAltStack(programWithAltStack) executedProgram.stack must be(altStack) } }
Christewart/bitcoin-s-core
src/test/scala/org/bitcoins/core/script/stack/StackInterpreterTest.scala
Scala
mit
18,249
package org.dmpp.adf.command import scopt.OptionParser import org.dmpp.adf.app._ import java.io.{File, FileOutputStream} case class Config(command: String="dir", verbose: Boolean=false, files: Seq[File]=Seq()) object Main { def executeCommand(config: Config) { config.command match { case "create" => val volume = UserVolumeFactory.createEmptyDoubleDensityDisk() val fos = new FileOutputStream(config.files(0)) try { volume.writeToOutputStream(fos) } finally { fos.close() } case "dir" => val volume = UserVolumeFactory.readFromFile(config.files(0)) val dir = volume.rootDirectory if (dir.list.isEmpty) { printf("Directory '%s' is empty\n", dir.name) } else { val dirs = dir.list.filter(_.isDirectory) val files = dir.list.filter(!_.isDirectory) dirs.sortWith(_.name < _.name).map(f => printf("(DIR) %s\n", f)) files.sortWith(_.name < _.name).map(println) } case _ => println("ignore") } } def main(args: Array[String]) { val parser = new scopt.OptionParser[Config]("adfcmd") { head("adfcmd", "1.0") opt[Unit]("verbose") abbr("v") action { (_, c) => c.copy(verbose=true) } text("verbose mode") help("help") abbr("h") text("Prints this usage text") cmd("dir") action { (_, c) => c.copy(command="dir") } text("list directory contents") children( arg[File]("<file>") minOccurs(1) maxOccurs(1) action { (x, c) => c.copy(files= c.files :+ x) } text("ADF input file") ) cmd("create") action { (_, c) => c.copy(command="create") } text("create blank ADF") children( arg[File]("<file>") minOccurs(1) maxOccurs(1) action { (x, c) => c.copy(files= c.files :+ x) } text("output file") ) } parser.parse(args, Config()) match { case Some(config) => executeCommand(config) case _ => println("") } } }
weiju/adf-tools
app/src/main/scala/org/dmpp/adf/command/Command.scala
Scala
bsd-3-clause
2,048
package de.tu_berlin.impro3.core import net.sourceforge.argparse4j.inf.{Namespace, Subparser} object Algorithm { abstract class Command[A <: Algorithm](implicit val m: scala.reflect.Manifest[A]) { /** * Algorithm key. */ def name: String /** * Algorithm name. */ def description: String /** * Algorithm subparser configuration. * * @param parser The subparser for this algorithm. */ def setup(parser: Subparser): Unit /** * Create an instance of the algorithm. * * @param ns The parsed arguments to be passed to the algorithm constructor. * @return */ def instantiate(ns: Namespace): Algorithm = { val constructor = m.runtimeClass.getConstructor(classOf[Namespace]) constructor.newInstance(ns).asInstanceOf[Algorithm] } } } abstract class Algorithm { def run(): Unit }
joroKr21/spatio-temporal-dynamics
impro3-ws14-core/src/main/scala/de/tu_berlin/impro3/core/Algorithm.scala
Scala
apache-2.0
899
package com.nekopiano.scala.processing.sandbox.sample.glow import com.nekopiano.scala.processing.{ScalaPAppCompanion, TwoDimensionalPApp} /** * Created on 07/08/2016. */ class WaveClockApp extends TwoDimensionalPApp { // Jan 2009 // http://www.abandonedart.org // http://www.zenbullets.com // // This work is licensed under a Creative Commons 3.0 License. // (Attribution - NonCommerical - ShareAlike) // http://creativecommons.org/licenses/by-nc-sa/3.0/ // // This basically means, you are free to use it as long as you: // 1. give http://www.zenbullets.com a credit // 2. don't use it for commercial gain // 3. share anything you create with it in the same way I have // // These conditions can be waived if you want to do something groovy with it // though, so feel free to email me via http://www.zenbullets.com //================================= global vars var _num = 10; var _angnoise = 0f var _radiusnoise = 0f var _xnoise = 0f var _ynoise = 0f var _angle = -PI/2f var _radius = 100f var _strokeCol = 254f //================================= init override def settings: Unit = { size(500, 300); } override def setup() { //size(500, 300); smooth(); frameRate(30); clearBackground(); _angnoise = random(10); _radiusnoise = random(10); _xnoise = random(10); _ynoise = random(10); } def clearBackground() { background(255) } //================================= frame loop override def draw() { _radiusnoise += 0.005f _radius = (noise(_radiusnoise) * 550) +1; _angnoise += 0.005f _angle += (noise(_angnoise) * 6) - 3; if (_angle > 360) { _angle -= 360; } if (_angle < 0) { _angle += 360; } // wobble centre _xnoise += 0.01f _ynoise += 0.01f val centreX = width/2 + (noise(_xnoise) * 100) - 50; val centreY = height/2 + (noise(_ynoise) * 100) - 50; import com.nekopiano.scala.processing.Angles._ val rad = radians(_angle); val x1 = centreX + (_radius * cos(rad)); val y1 = centreY + (_radius * sin(rad)); // opposite val opprad = rad + PI; val x2 = centreX + (_radius * cos(opprad)); val y2 = centreY + (_radius * sin(opprad)); noFill(); _strokeCol += _strokeChange; if (_strokeCol > 254) { _strokeChange *= -1; } if (_strokeCol < 0) { _strokeChange *= -1; } stroke(_strokeCol, 60); strokeWeight(1); line(x1, y1, x2, y2); } var _strokeChange = -1 //================================= interaction override def mousePressed() { clearBackground(); } } object WaveClockApp extends ScalaPAppCompanion {}
lamusique/ScalaProcessing
samples/src/test/scala/com/nekopiano/scala/processing/sandbox/sample/glow/WaveClock.scala
Scala
apache-2.0
2,659
import collection.immutable.{RedBlackTree => RB} import org.scalacheck._ import Prop._ import Gen._ /* Properties of a Red & Black Tree: A node is either red or black. The root is black. (This rule is used in some definitions and not others. Since the root can always be changed from red to black but not necessarily vice-versa this rule has little effect on analysis.) All leaves are black. Both children of every red node are black. Every simple path from a given node to any of its descendant leaves contains the same number of black nodes. */ package scala.collection.immutable.redblacktree { abstract class RedBlackTreeTest extends Properties("RedBlackTree") { def minimumSize = 0 def maximumSize = 5 import RB._ def nodeAt[A](tree: Tree[String, A], n: Int): Option[(String, A)] = if (n < iterator(tree).size && n >= 0) Some(iterator(tree).drop(n).next) else None def treeContains[A](tree: Tree[String, A], key: String) = iterator(tree).map(_._1) contains key def height(tree: Tree[_, _]): Int = if (tree eq null) 0 else (1 + math.max(height(tree.left), height(tree.right))) def mkTree(level: Int, parentIsBlack: Boolean = false, label: String = ""): Gen[Tree[String, Int]] = if (level == 0) { const(null) } else { for { oddOrEven <- choose(0, 2) tryRed = oddOrEven.sample.get % 2 == 0 // work around arbitrary[Boolean] bug isRed = parentIsBlack && tryRed nextLevel = if (isRed) level else level - 1 left <- mkTree(nextLevel, !isRed, label + "L") right <- mkTree(nextLevel, !isRed, label + "R") } yield { if (isRed) RedTree(label + "N", 0, left, right) else BlackTree(label + "N", 0, left, right) } } def genTree = for { depth <- choose(minimumSize, maximumSize + 1) tree <- mkTree(depth) } yield tree type ModifyParm def genParm(tree: Tree[String, Int]): Gen[ModifyParm] def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] def genInput: Gen[(Tree[String, Int], ModifyParm, Tree[String, Int])] = for { tree <- genTree parm <- genParm(tree) } yield (tree, parm, modify(tree, parm)) } trait RedBlackTreeInvariants { self: RedBlackTreeTest => import RB._ def rootIsBlack[A](t: Tree[String, A]) = isBlack(t) def areAllLeavesBlack[A](t: Tree[String, A]): Boolean = t match { case null => isBlack(t) case ne => List(ne.left, ne.right) forall areAllLeavesBlack } def areRedNodeChildrenBlack[A](t: Tree[String, A]): Boolean = t match { case RedTree(_, _, left, right) => List(left, right) forall (t => isBlack(t) && areRedNodeChildrenBlack(t)) case BlackTree(_, _, left, right) => List(left, right) forall areRedNodeChildrenBlack case null => true } def blackNodesToLeaves[A](t: Tree[String, A]): List[Int] = t match { case null => List(1) case BlackTree(_, _, left, right) => List(left, right) flatMap blackNodesToLeaves map (_ + 1) case RedTree(_, _, left, right) => List(left, right) flatMap blackNodesToLeaves } def areBlackNodesToLeavesEqual[A](t: Tree[String, A]): Boolean = t match { case null => true case ne => ( blackNodesToLeaves(ne).distinct.size == 1 && areBlackNodesToLeavesEqual(ne.left) && areBlackNodesToLeavesEqual(ne.right) ) } def orderIsPreserved[A](t: Tree[String, A]): Boolean = iterator(t) zip iterator(t).drop(1) forall { case (x, y) => x._1 < y._1 } def heightIsBounded(t: Tree[_, _]): Boolean = height(t) <= (2 * (32 - Integer.numberOfLeadingZeros(count(t) + 2)) - 2) def setup(invariant: Tree[String, Int] => Boolean) = forAll(genInput) { case (tree, parm, newTree) => invariant(newTree) } property("root is black") = setup(rootIsBlack) property("all leaves are black") = setup(areAllLeavesBlack) property("children of red nodes are black") = setup(areRedNodeChildrenBlack) property("black nodes are balanced") = setup(areBlackNodesToLeavesEqual) property("ordering of keys is preserved") = setup(orderIsPreserved) property("height is bounded") = setup(heightIsBounded) } object TestInsert extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override type ModifyParm = Int override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = choose(0, iterator(tree).size + 1) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = update(tree, generateKey(tree, parm), 0, true) def generateKey(tree: Tree[String, Int], parm: ModifyParm): String = nodeAt(tree, parm) match { case Some((key, _)) => key.init.mkString + "MN" case None => nodeAt(tree, parm - 1) match { case Some((key, _)) => key.init.mkString + "RN" case None => "N" } } property("update adds elements") = forAll(genInput) { case (tree, parm, newTree) => treeContains(newTree, generateKey(tree, parm)) } } object TestModify extends RedBlackTreeTest { import RB._ def newValue = 1 override def minimumSize = 1 override type ModifyParm = Int override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = choose(0, iterator(tree).size) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = nodeAt(tree, parm) map { case (key, _) => update(tree, key, newValue, true) } getOrElse tree property("update modifies values") = forAll(genInput) { case (tree, parm, newTree) => nodeAt(tree,parm) forall { case (key, _) => iterator(newTree) contains (key, newValue) } } } object TestDelete extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override def minimumSize = 1 override type ModifyParm = Int override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = choose(0, iterator(tree).size) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = nodeAt(tree, parm) map { case (key, _) => delete(tree, key) } getOrElse tree property("delete removes elements") = forAll(genInput) { case (tree, parm, newTree) => nodeAt(tree, parm) forall { case (key, _) => !treeContains(newTree, key) } } } object TestRange extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override type ModifyParm = (Option[Int], Option[Int]) override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = for { from <- choose(0, iterator(tree).size) to <- choose(0, iterator(tree).size) suchThat (from <=) optionalFrom <- oneOf(Some(from), None, Some(from)) // Double Some(n) to get around a bug optionalTo <- oneOf(Some(to), None, Some(to)) // Double Some(n) to get around a bug } yield (optionalFrom, optionalTo) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = { val from = parm._1 flatMap (nodeAt(tree, _) map (_._1)) val to = parm._2 flatMap (nodeAt(tree, _) map (_._1)) rangeImpl(tree, from, to) } property("range boundaries respected") = forAll(genInput) { case (tree, parm, newTree) => val from = parm._1 flatMap (nodeAt(tree, _) map (_._1)) val to = parm._2 flatMap (nodeAt(tree, _) map (_._1)) ("lower boundary" |: (from forall ( key => keysIterator(newTree) forall (key <=)))) && ("upper boundary" |: (to forall ( key => keysIterator(newTree) forall (key >)))) } property("range returns all elements") = forAll(genInput) { case (tree, parm, newTree) => val from = parm._1 flatMap (nodeAt(tree, _) map (_._1)) val to = parm._2 flatMap (nodeAt(tree, _) map (_._1)) val filteredTree = (keysIterator(tree) .filter(key => from forall (key >=)) .filter(key => to forall (key <)) .toList) filteredTree == keysIterator(newTree).toList } } object TestDrop extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override type ModifyParm = Int override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = choose(0, iterator(tree).size) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = drop(tree, parm) property("drop") = forAll(genInput) { case (tree, parm, newTree) => iterator(tree).drop(parm).toList == iterator(newTree).toList } } object TestTake extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override type ModifyParm = Int override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = choose(0, iterator(tree).size) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = take(tree, parm) property("take") = forAll(genInput) { case (tree, parm, newTree) => iterator(tree).take(parm).toList == iterator(newTree).toList } } object TestSlice extends RedBlackTreeTest with RedBlackTreeInvariants { import RB._ override type ModifyParm = (Int, Int) override def genParm(tree: Tree[String, Int]): Gen[ModifyParm] = for { from <- choose(0, iterator(tree).size) to <- choose(from, iterator(tree).size) } yield (from, to) override def modify(tree: Tree[String, Int], parm: ModifyParm): Tree[String, Int] = slice(tree, parm._1, parm._2) property("slice") = forAll(genInput) { case (tree, parm, newTree) => iterator(tree).slice(parm._1, parm._2).toList == iterator(newTree).toList } } } object Test extends Properties("RedBlackTree") { import collection.immutable.redblacktree._ include(TestInsert) include(TestModify) include(TestDelete) include(TestRange) include(TestDrop) include(TestTake) include(TestSlice) }
felixmulder/scala
test/files/scalacheck/redblacktree.scala
Scala
bsd-3-clause
9,865
package java.util import scalajs.js import scala.annotation.tailrec import scala.reflect.ClassTag import scala.collection.immutable object Arrays { @inline private final implicit def naturalOrdering[T <: AnyRef]: Ordering[T] = { new Ordering[T] { def compare(x: T, y: T): Int = x.asInstanceOf[Comparable[T]].compareTo(y) } } @noinline def sort(a: Array[Int]): Unit = sortImpl(a) @noinline def sort(a: Array[Int], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Int](a, fromIndex, toIndex) @noinline def sort(a: Array[Long]): Unit = sortImpl(a) @noinline def sort(a: Array[Long], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Long](a, fromIndex, toIndex) @noinline def sort(a: Array[Short]): Unit = sortImpl(a) @noinline def sort(a: Array[Short], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Short](a, fromIndex, toIndex) @noinline def sort(a: Array[Char]): Unit = sortImpl(a) @noinline def sort(a: Array[Char], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Char](a, fromIndex, toIndex) @noinline def sort(a: Array[Byte]): Unit = sortImpl(a) @noinline def sort(a: Array[Byte], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Byte](a, fromIndex, toIndex) @noinline def sort(a: Array[Float]): Unit = sortImpl(a) @noinline def sort(a: Array[Float], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Float](a, fromIndex, toIndex) @noinline def sort(a: Array[Double]): Unit = sortImpl(a) @noinline def sort(a: Array[Double], fromIndex: Int, toIndex: Int): Unit = sortRangeImpl[Double](a, fromIndex, toIndex) @noinline def sort(a: Array[AnyRef]): Unit = sortAnyRefImpl(a) @noinline def sort(a: Array[AnyRef], fromIndex: Int, toIndex: Int): Unit = sortRangeAnyRefImpl(a, fromIndex, toIndex) @noinline def sort[T <: AnyRef](array: Array[T], comparator: Comparator[_ >: T]): Unit = { implicit val ord = toOrdering(comparator).asInstanceOf[Ordering[AnyRef]] sortAnyRefImpl(array.asInstanceOf[Array[AnyRef]]) } @noinline def sort[T <: AnyRef](array: Array[T], fromIndex: Int, toIndex: Int, comparator: Comparator[_ >: T]): Unit = { implicit val ord = toOrdering(comparator).asInstanceOf[Ordering[AnyRef]] sortRangeAnyRefImpl(array.asInstanceOf[Array[AnyRef]], fromIndex, toIndex) } @inline private def sortRangeImpl[@specialized T: ClassTag]( a: Array[T], fromIndex: Int, toIndex: Int)(implicit ord: Ordering[T]): Unit = { checkIndicesForCopyOfRange(a.length, fromIndex, toIndex) stableMergeSort[T](a, fromIndex, toIndex) } @inline private def sortRangeAnyRefImpl(a: Array[AnyRef], fromIndex: Int, toIndex: Int)( implicit ord: Ordering[AnyRef]): Unit = { checkIndicesForCopyOfRange(a.length, fromIndex, toIndex) stableMergeSortAnyRef(a, fromIndex, toIndex) } @inline private def sortImpl[@specialized T: ClassTag: Ordering](a: Array[T]): Unit = stableMergeSort[T](a, 0, a.length) @inline private def sortAnyRefImpl(a: Array[AnyRef])(implicit ord: Ordering[AnyRef]): Unit = stableMergeSortAnyRef(a, 0, a.length) private final val inPlaceSortThreshold = 16 /** Sort array `a` with merge sort and insertion sort, * using the Ordering on its elements. */ @inline private def stableMergeSort[@specialized K: ClassTag](a: Array[K], start: Int, end: Int)(implicit ord: Ordering[K]): Unit = { if (end - start > inPlaceSortThreshold) stableSplitMerge(a, new Array[K](a.length), start, end) else insertionSort(a, start, end) } @noinline private def stableSplitMerge[@specialized K](a: Array[K], temp: Array[K], start: Int, end: Int)(implicit ord: Ordering[K]): Unit = { val length = end - start if (length > inPlaceSortThreshold) { val middle = start + (length / 2) stableSplitMerge(a, temp, start, middle) stableSplitMerge(a, temp, middle, end) stableMerge(a, temp, start, middle, end) System.arraycopy(temp, start, a, start, length) } else { insertionSort(a, start, end) } } @inline private def stableMerge[@specialized K](a: Array[K], temp: Array[K], start: Int, middle: Int, end: Int)(implicit ord: Ordering[K]): Unit = { var outIndex = start var leftInIndex = start var rightInIndex = middle while (outIndex < end) { if (leftInIndex < middle && (rightInIndex >= end || ord.lteq(a(leftInIndex), a(rightInIndex)))) { temp(outIndex) = a(leftInIndex) leftInIndex += 1 } else { temp(outIndex) = a(rightInIndex) rightInIndex += 1 } outIndex += 1 } } // Ordering[T] might be slow especially for boxed primitives, so use binary // search variant of insertion sort // Caller must pass end >= start or math will fail. Also, start >= 0. @noinline private final def insertionSort[@specialized T](a: Array[T], start: Int, end: Int)(implicit ord: Ordering[T]): Unit = { val n = end - start if (n >= 2) { if (ord.compare(a(start), a(start + 1)) > 0) { val temp = a(start) a(start) = a(start + 1) a(start + 1) = temp } var m = 2 while (m < n) { // Speed up already-sorted case by checking last element first val next = a(start + m) if (ord.compare(next, a(start + m - 1)) < 0) { var iA = start var iB = start + m - 1 while (iB - iA > 1) { val ix = (iA + iB) >>> 1 // Use bit shift to get unsigned div by 2 if (ord.compare(next, a(ix)) < 0) iB = ix else iA = ix } val ix = iA + (if (ord.compare(next, a(iA)) < 0) 0 else 1) var i = start + m while (i > ix) { a(i) = a(i - 1) i -= 1 } a(ix) = next } m += 1 } } } /** Sort array `a` with merge sort and insertion sort, * using the Ordering on its elements. */ @inline private def stableMergeSortAnyRef(a: Array[AnyRef], start: Int, end: Int)( implicit ord: Ordering[AnyRef]): Unit = { if (end - start > inPlaceSortThreshold) stableSplitMergeAnyRef(a, new Array(a.length), start, end) else insertionSortAnyRef(a, start, end) } @noinline private def stableSplitMergeAnyRef(a: Array[AnyRef], temp: Array[AnyRef], start: Int, end: Int)(implicit ord: Ordering[AnyRef]): Unit = { val length = end - start if (length > inPlaceSortThreshold) { val middle = start + (length / 2) stableSplitMergeAnyRef(a, temp, start, middle) stableSplitMergeAnyRef(a, temp, middle, end) stableMergeAnyRef(a, temp, start, middle, end) System.arraycopy(temp, start, a, start, length) } else { insertionSortAnyRef(a, start, end) } } @inline private def stableMergeAnyRef(a: Array[AnyRef], temp: Array[AnyRef], start: Int, middle: Int, end: Int)(implicit ord: Ordering[AnyRef]): Unit = { var outIndex = start var leftInIndex = start var rightInIndex = middle while (outIndex < end) { if (leftInIndex < middle && (rightInIndex >= end || ord.lteq(a(leftInIndex), a(rightInIndex)))) { temp(outIndex) = a(leftInIndex) leftInIndex += 1 } else { temp(outIndex) = a(rightInIndex) rightInIndex += 1 } outIndex += 1 } } @noinline private final def insertionSortAnyRef(a: Array[AnyRef], start: Int, end: Int)( implicit ord: Ordering[AnyRef]): Unit = { val n = end - start if (n >= 2) { if (ord.compare(a(start), a(start + 1)) > 0) { val temp = a(start) a(start) = a(start + 1) a(start + 1) = temp } var m = 2 while (m < n) { // Speed up already-sorted case by checking last element first val next = a(start + m) if (ord.compare(next, a(start + m - 1)) < 0) { var iA = start var iB = start + m - 1 while (iB - iA > 1) { val ix = (iA + iB) >>> 1 // Use bit shift to get unsigned div by 2 if (ord.compare(next, a(ix)) < 0) iB = ix else iA = ix } val ix = iA + (if (ord.compare(next, a(iA)) < 0) 0 else 1) var i = start + m while (i > ix) { a(i) = a(i - 1) i -= 1 } a(ix) = next } m += 1 } } } @noinline def binarySearch(a: Array[Long], key: Long): Int = binarySearchImpl[Long](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Long], startIndex: Int, endIndex: Int, key: Long): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Long](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Int], key: Int): Int = binarySearchImpl[Int](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Int], startIndex: Int, endIndex: Int, key: Int): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Int](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Short], key: Short): Int = binarySearchImpl[Short](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Short], startIndex: Int, endIndex: Int, key: Short): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Short](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Char], key: Char): Int = binarySearchImpl[Char](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Char], startIndex: Int, endIndex: Int, key: Char): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Char](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Byte], key: Byte): Int = binarySearchImpl[Byte](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Byte], startIndex: Int, endIndex: Int, key: Byte): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Byte](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Double], key: Double): Int = binarySearchImpl[Double](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Double], startIndex: Int, endIndex: Int, key: Double): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Double](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[Float], key: Float): Int = binarySearchImpl[Float](a, 0, a.length, key, _ < _) @noinline def binarySearch(a: Array[Float], startIndex: Int, endIndex: Int, key: Float): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[Float](a, startIndex, endIndex, key, _ < _) } @noinline def binarySearch(a: Array[AnyRef], key: AnyRef): Int = binarySearchImplRef(a, 0, a.length, key) @noinline def binarySearch(a: Array[AnyRef], startIndex: Int, endIndex: Int, key: AnyRef): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImplRef(a, startIndex, endIndex, key) } @noinline def binarySearch[T](a: Array[T], key: T, c: Comparator[_ >: T]): Int = binarySearchImpl[T](a, 0, a.length, key, (a, b) => c.compare(a, b) < 0) @noinline def binarySearch[T](a: Array[T], startIndex: Int, endIndex: Int, key: T, c: Comparator[_ >: T]): Int = { checkRangeIndices(a.length, startIndex, endIndex) binarySearchImpl[T](a, startIndex, endIndex, key, (a, b) => c.compare(a, b) < 0) } @inline @tailrec private def binarySearchImpl[T](a: Array[T], startIndex: Int, endIndex: Int, key: T, lt: (T, T) => Boolean): Int = { if (startIndex == endIndex) { // Not found -startIndex - 1 } else { // Indices are unsigned 31-bit integer, so this does not overflow val mid = (startIndex + endIndex) >>> 1 val elem = a(mid) if (lt(key, elem)) { binarySearchImpl(a, startIndex, mid, key, lt) } else if (key == elem) { // Found mid } else { binarySearchImpl(a, mid + 1, endIndex, key, lt) } } } @inline @tailrec def binarySearchImplRef(a: Array[AnyRef], startIndex: Int, endIndex: Int, key: AnyRef): Int = { if (startIndex == endIndex) { // Not found -startIndex - 1 } else { // Indices are unsigned 31-bit integer, so this does not overflow val mid = (startIndex + endIndex) >>> 1 val cmp = key.asInstanceOf[Comparable[AnyRef]].compareTo(a(mid)) if (cmp < 0) { binarySearchImplRef(a, startIndex, mid, key) } else if (cmp == 0) { // Found mid } else { binarySearchImplRef(a, mid + 1, endIndex, key) } } } @noinline def equals(a: Array[Long], b: Array[Long]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Int], b: Array[Int]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Short], b: Array[Short]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Char], b: Array[Char]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Byte], b: Array[Byte]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Boolean], b: Array[Boolean]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Double], b: Array[Double]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[Float], b: Array[Float]): Boolean = equalsImpl(a, b) @noinline def equals(a: Array[AnyRef], b: Array[AnyRef]): Boolean = equalsImpl(a, b) @inline private def equalsImpl[T](a: Array[T], b: Array[T]): Boolean = { (a eq b) || (a != null && b != null && a.length == b.length && a.indices.forall(i => a(i) == b(i))) } @noinline def fill(a: Array[Long], value: Long): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Long], fromIndex: Int, toIndex: Int, value: Long): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Int], value: Int): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Int], fromIndex: Int, toIndex: Int, value: Int): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Short], value: Short): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Short], fromIndex: Int, toIndex: Int, value: Short): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Char], value: Char): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Char], fromIndex: Int, toIndex: Int, value: Char): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Byte], value: Byte): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Byte], fromIndex: Int, toIndex: Int, value: Byte): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Boolean], value: Boolean): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Boolean], fromIndex: Int, toIndex: Int, value: Boolean): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Double], value: Double): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Double], fromIndex: Int, toIndex: Int, value: Double): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[Float], value: Float): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[Float], fromIndex: Int, toIndex: Int, value: Float): Unit = fillImpl(a, fromIndex, toIndex, value) @noinline def fill(a: Array[AnyRef], value: AnyRef): Unit = fillImpl(a, 0, a.length, value, checkIndices = false) @noinline def fill(a: Array[AnyRef], fromIndex: Int, toIndex: Int, value: AnyRef): Unit = fillImpl(a, fromIndex, toIndex, value) @inline private def fillImpl[T](a: Array[T], fromIndex: Int, toIndex: Int, value: T, checkIndices: Boolean = true): Unit = { if (checkIndices) checkRangeIndices(a.length, fromIndex, toIndex) var i = fromIndex while (i != toIndex) { a(i) = value i += 1 } } @noinline def copyOf[T <: AnyRef](original: Array[T], newLength: Int): Array[T] = { implicit val tagT = ClassTag[T](original.getClass.getComponentType) copyOfImpl(original, newLength) } @noinline def copyOf[T <: AnyRef, U <: AnyRef](original: Array[U], newLength: Int, newType: Class[_ <: Array[T]]): Array[T] = { implicit val tag = ClassTag[T](newType.getComponentType) copyOfImpl(original, newLength) } @noinline def copyOf(original: Array[Byte], newLength: Int): Array[Byte] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Short], newLength: Int): Array[Short] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Int], newLength: Int): Array[Int] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Long], newLength: Int): Array[Long] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Char], newLength: Int): Array[Char] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Float], newLength: Int): Array[Float] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Double], newLength: Int): Array[Double] = copyOfImpl(original, newLength) @noinline def copyOf(original: Array[Boolean], newLength: Int): Array[Boolean] = copyOfImpl(original, newLength) @inline private def copyOfImpl[U, T: ClassTag](original: Array[U], newLength: Int): Array[T] = { checkArrayLength(newLength) val copyLength = Math.min(newLength, original.length) val ret = new Array[T](newLength) System.arraycopy(original, 0, ret, 0, copyLength) ret } @noinline def copyOfRange[T <: AnyRef](original: Array[T], from: Int, to: Int): Array[T] = { copyOfRangeImpl[T](original, from, to)(ClassTag(original.getClass.getComponentType)).asInstanceOf[Array[T]] } @noinline def copyOfRange[T <: AnyRef, U <: AnyRef](original: Array[U], from: Int, to: Int, newType: Class[_ <: Array[T]]): Array[T] = { copyOfRangeImpl[AnyRef](original.asInstanceOf[Array[AnyRef]], from, to)( ClassTag(newType.getComponentType)).asInstanceOf[Array[T]] } @noinline def copyOfRange(original: Array[Byte], start: Int, end: Int): Array[Byte] = copyOfRangeImpl[Byte](original, start, end) @noinline def copyOfRange(original: Array[Short], start: Int, end: Int): Array[Short] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Int], start: Int, end: Int): Array[Int] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Long], start: Int, end: Int): Array[Long] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Char], start: Int, end: Int): Array[Char] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Float], start: Int, end: Int): Array[Float] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Double], start: Int, end: Int): Array[Double] = copyOfRangeImpl(original, start, end) @noinline def copyOfRange(original: Array[Boolean], start: Int, end: Int): Array[Boolean] = copyOfRangeImpl(original, start, end) @inline private def copyOfRangeImpl[T: ClassTag](original: Array[T], start: Int, end: Int): Array[T] = { checkIndicesForCopyOfRange(original.length, start, end) val retLength = end - start val copyLength = Math.min(retLength, original.length - start) val ret = new Array[T](retLength) System.arraycopy(original, start, ret, 0, copyLength) ret } @inline private def checkArrayLength(len: Int): Unit = { if (len < 0) throw new NegativeArraySizeException } @inline private def checkIndicesForCopyOfRange( len: Int, start: Int, end: Int): Unit = { if (start > end) throw new IllegalArgumentException(start + " > " + end) if (start < 0 || start > len) throw new ArrayIndexOutOfBoundsException } @noinline def asList[T <: AnyRef](a: Array[T]): List[T] = { new AbstractList[T] with RandomAccess { def size(): Int = a.length def get(index: Int): T = a(index) override def set(index: Int, element: T): T = { val ret = a(index) a(index) = element ret } } } @noinline def hashCode(a: Array[Long]): Int = hashCodeImpl[Long](a) @noinline def hashCode(a: Array[Int]): Int = hashCodeImpl[Int](a) @noinline def hashCode(a: Array[Short]): Int = hashCodeImpl[Short](a) @noinline def hashCode(a: Array[Char]): Int = hashCodeImpl[Char](a) @noinline def hashCode(a: Array[Byte]): Int = hashCodeImpl[Byte](a) @noinline def hashCode(a: Array[Boolean]): Int = hashCodeImpl[Boolean](a) @noinline def hashCode(a: Array[Float]): Int = hashCodeImpl[Float](a) @noinline def hashCode(a: Array[Double]): Int = hashCodeImpl[Double](a) @noinline def hashCode(a: Array[AnyRef]): Int = hashCodeImpl[AnyRef](a) @inline private def hashCodeImpl[T](a: Array[T], elementHashCode: T => Int = (x: T) => x.asInstanceOf[AnyRef].hashCode): Int = { if (a == null) 0 else a.foldLeft(1)((acc, x) => 31*acc + (if (x == null) 0 else elementHashCode(x))) } @noinline def deepHashCode(a: Array[AnyRef]): Int = { @inline def getHash(elem: AnyRef): Int = { elem match { case elem: Array[AnyRef] => deepHashCode(elem) case elem: Array[Long] => hashCode(elem) case elem: Array[Int] => hashCode(elem) case elem: Array[Short] => hashCode(elem) case elem: Array[Char] => hashCode(elem) case elem: Array[Byte] => hashCode(elem) case elem: Array[Boolean] => hashCode(elem) case elem: Array[Float] => hashCode(elem) case elem: Array[Double] => hashCode(elem) case _ => elem.hashCode } } hashCodeImpl(a, getHash) } @noinline def deepEquals(a1: Array[AnyRef], a2: Array[AnyRef]): Boolean = { if (a1 eq a2) true else if (a1 == null || a2 == null || a1.length != a2.length) false else a1.indices.forall(i => Objects.deepEquals(a1(i), a2(i))) } @noinline def toString(a: Array[Long]): String = toStringImpl[Long](a) @noinline def toString(a: Array[Int]): String = toStringImpl[Int](a) @noinline def toString(a: Array[Short]): String = toStringImpl[Short](a) @noinline def toString(a: Array[Char]): String = toStringImpl[Char](a) @noinline def toString(a: Array[Byte]): String = toStringImpl[Byte](a) @noinline def toString(a: Array[Boolean]): String = toStringImpl[Boolean](a) @noinline def toString(a: Array[Float]): String = toStringImpl[Float](a) @noinline def toString(a: Array[Double]): String = toStringImpl[Double](a) @noinline def toString(a: Array[AnyRef]): String = toStringImpl[AnyRef](a) @inline private def toStringImpl[T](a: Array[T]): String = { if (a == null) "null" else a.mkString("[", ", ", "]") } @noinline def deepToString(a: Array[AnyRef]): String = deepToStringImpl(a, immutable.HashSet.empty[AsRef]) private def deepToStringImpl(a: Array[AnyRef], branch: immutable.Set[AsRef]): String = { @inline def valueToString(e: AnyRef): String = { if (e == null) "null" else { e match { case e: Array[AnyRef] => deepToStringImpl(e, branch + new AsRef(a)) case e: Array[Long] => toString(e) case e: Array[Int] => toString(e) case e: Array[Short] => toString(e) case e: Array[Byte] => toString(e) case e: Array[Char] => toString(e) case e: Array[Boolean] => toString(e) case e: Array[Float] => toString(e) case e: Array[Double] => toString(e) case _ => String.valueOf(e) } } } if (a == null) "null" else if (branch.contains(new AsRef(a))) "[...]" else a.iterator.map(valueToString).mkString("[", ", ", "]") } @inline private def checkRangeIndices(length: Int, start: Int, end: Int): Unit = { if (start > end) throw new IllegalArgumentException("fromIndex(" + start + ") > toIndex(" + end + ")") if (start < 0) throw new ArrayIndexOutOfBoundsException("Array index out of range: " + start) if (end > length) throw new ArrayIndexOutOfBoundsException("Array index out of range: " + end) } @inline private def toOrdering[T](cmp: Comparator[T]): Ordering[T] = { new Ordering[T] { def compare(x: T, y: T): Int = cmp.compare(x, y) } } private final class AsRef(val inner: AnyRef) { override def hashCode(): Int = System.identityHashCode(inner) override def equals(obj: Any): Boolean = { obj match { case obj: AsRef => obj.inner eq inner case _ => false } } } }
lrytz/scala-js
javalib/src/main/scala/java/util/Arrays.scala
Scala
bsd-3-clause
25,438
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.pattern.ask import akka.util.Timeout import com.typesafe.config.ConfigFactory import cqrs.query.{CQRSViewEventBus, EventDuplicatesFilter} import org.slf4j.LoggerFactory import read_model.AccountView import write_model.AccountQueries._ import scala.concurrent.Await object ReadModelMain extends App { val logger = LoggerFactory.getLogger(this.getClass) val time = System.currentTimeMillis() // unique client name so it is possible to create many read model instances on single machine val config = ConfigFactory.parseString(s"cqrs.kafka-pub-sub.node-name=local-$time") .withFallback(ConfigFactory.load()) val system = ActorSystem(s"read-model-$time", config) val filter = new EventDuplicatesFilter() val view = system.actorOf(AccountView.props(filter), "account-view") system.actorOf(CQRSViewEventBus(view, subscribeFromScratch = true), "cqrs-view-event-bus") implicit val timeout = Timeout(15, TimeUnit.SECONDS) implicit val ec = system.dispatcher while (true) { logger.info( s""" |1 - query bank money |2 - query number of clients |3 - list of all accounts """.stripMargin) val line = scala.io.StdIn.readLine() line match { case "1" => { val result = Await.result((view ? BankMoney).mapTo[BankMoneyResponse], timeout.duration) logger.info(s"+++Bank currently has ${result.bankMoney} money+++") } case "2" => { val result = Await.result((view ? NumberOfClients).mapTo[NumberOfClientsResponse], timeout.duration) logger.info(s"+++Bank currently has ${result.numberOfClients} clients") } case "3" => { val result = Await.result((view ? AccountNames).mapTo[AccountNamesResponse], timeout.duration) logger.info(s"Accounts: ${result.accounts}") } case _ => { logger.warn("+++Unrecognized query+++") } } } }
cqrs-endeavour/cqrs-framework-sample
src/main/scala/ReadModelMain.scala
Scala
mpl-2.0
2,195
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.json import org.apache.spark.sql.{QueryTest, Row} import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{StringType, StructType} /** * Test cases for various [[org.apache.spark.sql.catalyst.json.JSONOptions]]. */ class JsonParsingOptionsSuite extends QueryTest with SharedSparkSession { import testImplicits._ test("allowComments off") { val str = """{'name': /* hello */ 'Reynold Xin'}""" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowComments on") { val str = """{'name': /* hello */ 'Reynold Xin'}""" val df = spark.read.option("allowComments", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "name") assert(df.first().getString(0) == "Reynold Xin") } test("allowSingleQuotes off") { val str = """{'name': 'Reynold Xin'}""" val df = spark.read.option("allowSingleQuotes", "false").json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowSingleQuotes on") { val str = """{'name': 'Reynold Xin'}""" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "name") assert(df.first().getString(0) == "Reynold Xin") } test("allowUnquotedFieldNames off") { val str = """{name: 'Reynold Xin'}""" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowUnquotedFieldNames on") { val str = """{name: 'Reynold Xin'}""" val df = spark.read.option("allowUnquotedFieldNames", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "name") assert(df.first().getString(0) == "Reynold Xin") } test("allowUnquotedControlChars off") { val str = "{\\"name\\": \\"a\\u0001b\\"}" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowUnquotedControlChars on") { val str = "{\\"name\\": \\"a\\u0001b\\"}" val df = spark.read.option("allowUnquotedControlChars", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "name") assert(df.first().getString(0) == "a\\u0001b") } test("allowNumericLeadingZeros off") { val str = """{"age": 0018}""" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowNumericLeadingZeros on") { val str = """{"age": 0018}""" val df = spark.read.option("allowNumericLeadingZeros", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "age") assert(df.first().getLong(0) == 18) } test("allowNonNumericNumbers off") { val str = """{"age": NaN}""" val df = spark.read.option("allowNonNumericNumbers", false).json(Seq(str).toDS()) assert(df.schema === new StructType().add("_corrupt_record", StringType)) checkAnswer(df, Row(str)) } test("allowNonNumericNumbers on") { val str = """{"c0":NaN, "c1":+INF, "c2":+Infinity, "c3":Infinity, "c4":-INF, "c5":-Infinity}""" val df = spark.read.option("allowNonNumericNumbers", true).json(Seq(str).toDS()) assert(df.schema === new StructType() .add("c0", "double") .add("c1", "double") .add("c2", "double") .add("c3", "double") .add("c4", "double") .add("c5", "double")) checkAnswer( df, Row( Double.NaN, Double.PositiveInfinity, Double.PositiveInfinity, Double.PositiveInfinity, Double.NegativeInfinity, Double.NegativeInfinity)) } test("allowBackslashEscapingAnyCharacter off") { val str = """{"name": "Cazen Lee", "price": "\\$10"}""" val df = spark.read.option("allowBackslashEscapingAnyCharacter", "false").json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowBackslashEscapingAnyCharacter on") { val str = """{"name": "Cazen Lee", "price": "\\$10"}""" val df = spark.read.option("allowBackslashEscapingAnyCharacter", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "name") assert(df.schema.last.name == "price") assert(df.first().getString(0) == "Cazen Lee") assert(df.first().getString(1) == "$10") } }
maropu/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonParsingOptionsSuite.scala
Scala
apache-2.0
5,035
package com.sfxcode.sapphire.core.el import javax.el.MethodNotFoundException object ObjectExpressionHelper { val TempObjectName = "_self" val TempValueName = "_tempValue" val ExpressionPrefix = "${" val FxmlExpressionPrefix: String = "!{" def getValue(obj: AnyRef, expressionString: String, clazz: Class[AnyRef]): Option[Any] = { var result: Option[Any] = None val expression = expressionString.replace(FxmlExpressionPrefix, ExpressionPrefix) if (expression.contains(ExpressionPrefix)) result = getValueOnObject(obj, expression, clazz) else if (expression.contains("(")) result = getValueOnObject(obj, String.format("${%s.%s}", TempObjectName, expression), clazz) else { var tempExpression = expression while (tempExpression.indexOf(".") != -1 && tempExpression.indexOf("().") == -1) { val index = tempExpression.indexOf(".") tempExpression = tempExpression.substring(0, index) + "()" + tempExpression.substring(index) } try { var methodExpression = tempExpression if (!methodExpression.endsWith("()")) methodExpression = methodExpression + "()" result = getValueOnObject(obj, String.format("${%s.%s}", TempObjectName, methodExpression), clazz) } catch { case _: MethodNotFoundException => if (!tempExpression.endsWith("()")) result = getValueOnObject(obj, String.format("${%s.%s}", TempObjectName, tempExpression), clazz) } } result } private def getValueOnObject(obj: AnyRef, expression: String, clazz: Class[AnyRef]): Option[Any] = { val tempObjectString = "%s_%s".format(TempObjectName, Math.abs(obj.hashCode())) val newExpression = expression.replace(TempObjectName, tempObjectString) Expressions.register(tempObjectString, obj) val result = Expressions.getValue(newExpression, clazz) Expressions.unregister(tempObjectString) result } def isExpressionKey(key: String): Boolean = key.contains(ObjectExpressionHelper.ExpressionPrefix) || key.contains(ObjectExpressionHelper.FxmlExpressionPrefix) }
sfxcode/sapphire-core
src/main/scala/com/sfxcode/sapphire/core/el/ObjectExpressionHelper.scala
Scala
apache-2.0
2,111
/** * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package kafka.api import java.util.Properties import java.util.concurrent.Future import kafka.consumer.SimpleConsumer import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig import kafka.utils.{ShutdownableThread, TestUtils} import kafka.utils.Implicits._ import org.apache.kafka.clients.producer._ import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback import org.junit.Assert._ import org.junit.{Ignore, Test} import scala.collection.mutable.ArrayBuffer class ProducerBounceTest extends KafkaServerTestHarness { private val producerBufferSize = 65536 private val serverMessageMaxBytes = producerBufferSize/2 val numServers = 4 val overridingProps = new Properties() overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString) overridingProps.put(KafkaConfig.MessageMaxBytesProp, serverMessageMaxBytes.toString) // Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic) // so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long overridingProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString) overridingProps.put(KafkaConfig.ControlledShutdownEnableProp, true.toString) overridingProps.put(KafkaConfig.UncleanLeaderElectionEnableProp, false.toString) overridingProps.put(KafkaConfig.AutoLeaderRebalanceEnableProp, false.toString) // This is the one of the few tests we currently allow to preallocate ports, despite the fact that this can result in transient // failures due to ports getting reused. We can't use random ports because of bad behavior that can result from bouncing // brokers too quickly when they get new, random ports. If we're not careful, the client can end up in a situation // where metadata is not refreshed quickly enough, and by the time it's actually trying to, all the servers have // been bounced and have new addresses. None of the bootstrap nodes or current metadata can get them connected to a // running server. // // Since such quick rotation of servers is incredibly unrealistic, we allow this one test to preallocate ports, leaving // a small risk of hitting errors due to port conflicts. Hopefully this is infrequent enough to not cause problems. override def generateConfigs = { FixedPortTestUtils.createBrokerConfigs(numServers, zkConnect,enableControlledShutdown = true) .map(KafkaConfig.fromProps(_, overridingProps)) } private val topic1 = "topic-1" /** * With replication, producer should able able to find new leader after it detects broker failure */ @Ignore // To be re-enabled once we can make it less flaky (KAFKA-2837) @Test def testBrokerFailure() { val numPartitions = 3 val topicConfig = new Properties() topicConfig.put(KafkaConfig.MinInSyncReplicasProp, 2.toString) TestUtils.createTopic(zkUtils, topic1, numPartitions, numServers, servers, topicConfig) val scheduler = new ProducerScheduler() scheduler.start // rolling bounce brokers for (_ <- 0 until numServers) { for (server <- servers) { info("Shutting down server : %s".format(server.config.brokerId)) server.shutdown() server.awaitShutdown() info("Server %s shut down. Starting it up again.".format(server.config.brokerId)) server.startup() info("Restarted server: %s".format(server.config.brokerId)) } // Make sure the producer do not see any exception in returned metadata due to broker failures assertFalse(scheduler.failed) // Make sure the leader still exists after bouncing brokers (0 until numPartitions).foreach(partition => TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic1, partition)) } scheduler.shutdown // Make sure the producer do not see any exception // when draining the left messages on shutdown assertFalse(scheduler.failed) // double check that the leader info has been propagated after consecutive bounces val newLeaders = (0 until numPartitions).map(i => TestUtils.waitUntilMetadataIsPropagated(servers, topic1, i)) val fetchResponses = newLeaders.zipWithIndex.map { case (leader, partition) => // Consumers must be instantiated after all the restarts since they use random ports each time they start up val consumer = new SimpleConsumer("localhost", boundPort(servers(leader)), 30000, 1024 * 1024, "") val response = consumer.fetch(new FetchRequestBuilder().addFetch(topic1, partition, 0, Int.MaxValue).build()).messageSet(topic1, partition) consumer.close response } val messages = fetchResponses.flatMap(r => r.iterator.toList.map(_.message)) val uniqueMessages = messages.toSet val uniqueMessageSize = uniqueMessages.size info(s"number of unique messages sent: ${uniqueMessageSize}") assertEquals(s"Found ${messages.size - uniqueMessageSize} duplicate messages.", uniqueMessageSize, messages.size) assertEquals("Should have fetched " + scheduler.sent + " unique messages", scheduler.sent, messages.size) } private class ProducerScheduler extends ShutdownableThread("daemon-producer", false) { val numRecords = 1000 var sent = 0 var failed = false val producerConfig = new Properties() producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true") producerConfig.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1") val producerConfigWithCompression = new Properties() producerConfigWithCompression ++= producerConfig producerConfigWithCompression.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "lz4") val producers = List( TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize / 4, retries = 10, props = Some(producerConfig)), TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize / 2, retries = 10, lingerMs = 5000, props = Some(producerConfig)), TestUtils.createNewProducer(brokerList, bufferSize = producerBufferSize, retries = 10, lingerMs = 10000, props = Some(producerConfigWithCompression)) ) override def doWork(): Unit = { info("Starting to send messages..") var producerId = 0 val responses = new ArrayBuffer[IndexedSeq[Future[RecordMetadata]]]() for (producer <- producers) { val response = for (i <- sent+1 to sent+numRecords) yield producer.send(new ProducerRecord[Array[Byte],Array[Byte]](topic1, null, null, ((producerId + 1) * i).toString.getBytes), new ErrorLoggingCallback(topic1, null, null, true)) responses.append(response) producerId += 1 } try { for (response <- responses) { val futures = response.toList futures.map(_.get) sent += numRecords } info(s"Sent $sent records") } catch { case e : Exception => error(s"Got exception ${e.getMessage}") e.printStackTrace() failed = true } } override def shutdown(){ super.shutdown() for (producer <- producers) { producer.close() } } } }
zzwlstarby/mykafka
core/src/test/scala/integration/kafka/api/ProducerBounceTest.scala
Scala
apache-2.0
7,965
package defend.shard import akka.actor.{ ActorSystem, PoisonPill, Props, Terminated } import akka.testkit.{ TestKit, TestProbe } import com.typesafe.config.ConfigFactory import defend.game.GameEngine.Protocol.RocketFired import defend.model._ import defend.shard.TowerActor.Protocol.{ ExperienceGained, Ping, Situation } import defend.ui.StatusKeeper.Protocol.{ RecoveryReport, TowerKeepAlive } import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } import scala.concurrent.{ Await, Future } import scala.concurrent.duration._ import scala.language.postfixOps class TowerActorTest extends TestKit(ActorSystem("defend", ConfigFactory.load("application-test.conf"))) with WordSpecLike with Matchers with BeforeAndAfterAll { private val tower: DefenceTower = DefenceTower("A", Position(0, 0)) private val situation: Situation = Situation(0, tower, Nil, LandScape(200, 100, 50)) private val situationWithIncoming: Situation = Situation(1, tower, List(WeaponInAction(AlienBomb(1, 1), Position(10, 100), MoveVector(0, 0))), LandScape(200, 100, 50)) "TowerActor" should { "start with ready" in { val statusKeeper: TestProbe = new TestProbe(system) val props: Props = TowerActor.props("name", statusKeeper.ref, 100 millis) val underTest = system.actorOf(props) underTest ! Ping underTest ! situation underTest ! Ping statusKeeper.expectMsgPF(2 second) { case rr: RecoveryReport => rr.success shouldBe true } statusKeeper.expectMsgPF(2 seconds) { case t: TowerKeepAlive => t.towerState shouldBe DefenceTowerReady case x: Any => throw new Exception(s"Received wrong message $x") } // underTest ! PoisonPill } "go to reloading after shooting and back to ready" in { val statusKeeper: TestProbe = new TestProbe(system) val situationSender: TestProbe = new TestProbe(system) val props: Props = TowerActor.props("name", statusKeeper.ref, reloadTime = 150 millis) val underTest = system.actorOf(props) statusKeeper.expectMsgPF(2 second) { case rr: RecoveryReport => rr.success shouldBe true } underTest.tell(situationWithIncoming, situationSender.ref) underTest ! Ping statusKeeper.fishForMessage(1 second) { case TowerKeepAlive(tower.name, _, DefenceTowerReloading, _) => true case TowerKeepAlive("?", _, DefenceTowerReloading, _) => true case DefenceTowerStatus(_, DefenceTowerReloading, true, _, _, _) => true } underTest.tell(situationWithIncoming, situationSender.ref) underTest ! Ping statusKeeper.fishForMessage(1 second) { case TowerKeepAlive(tower.name, _, DefenceTowerReloading, _) => true } //rocket fired situationSender.expectMsgPF(1 second) { case rf: RocketFired => rf.defenceTower shouldBe tower } //back to ready statusKeeper.fishForMessage(1 second) { case TowerKeepAlive(tower.name, _, DefenceTowerReady, _) => true case TowerKeepAlive(tower.name, _, DefenceTowerReloading, _) => false case DefenceTowerStatus(_, DefenceTowerReady, true, _, _, _) => true } underTest ! PoisonPill } "accumulate experience" in { val statusKeeper: TestProbe = new TestProbe(system) val props: Props = TowerActor.props("name", statusKeeper.ref) val underTest = system.actorOf(props) statusKeeper.expectMsgPF(2 second) { case rr: RecoveryReport => rr.success shouldBe true case st: DefenceTowerStatus => st.isUp shouldBe true } statusKeeper.expectMsgPF(2 second) { case rr: RecoveryReport => rr.success shouldBe true case st: DefenceTowerStatus => st.isUp shouldBe true } underTest ! situation underTest ! ExperienceGained(10) val level: Int = defend.experienceToLevel(10) underTest ! Ping statusKeeper.fishForMessage(1 second) { case TowerKeepAlive(tower.name, _, _, `level`) => true case TowerKeepAlive(tower.name, _, _, _) => false case DefenceTowerStatus(_, _, true, _, `level`, _) => true } underTest ! ExperienceGained(10) underTest ! Ping val level2: Int = defend.experienceToLevel(20) statusKeeper.fishForMessage(1 second) { case TowerKeepAlive(tower.name, _, _, `level2`) => true case TowerKeepAlive(tower.name, _, _, _) => false case DefenceTowerStatus(_, _, true, _, `level2`, _) => true } } } override protected def afterAll(): Unit = { val terminate: Future[Terminated] = system.terminate() Await.result(terminate, 5 seconds) } }
otrebski/reactive-missile-defend
src/test/scala/defend/shard/TowerActorTest.scala
Scala
apache-2.0
4,787
/* * Copyright 2017 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.ml.samples.criteo import org.scalatest.{FlatSpec, GivenWhenThen, Matchers} class CleanTSVImporterTest extends FlatSpec with SparkSpec with GivenWhenThen with Matchers { "criteoImport" should "import clean training data from a TSV file" in { val inputPath = "src/test/resources/test_train.csv" val trainFeatures = CriteoFeatures() val importer = new CleanTSVImporter(inputPath, trainFeatures.inputSchema, 1) val df = importer.criteoImport df.count should equal(5) // turn test dataframe to array to avoid serialization val df_seq = df.collect.map(_.toSeq) df_seq.foreach(row => { // verify all nulls are replaced by asserting // length without nulls is the same val nonulls = row.filter(_ != null) row.length should equal(nonulls.length) }) } }
GoogleCloudDataproc/cloud-dataproc
spark-tensorflow/prepare/src/test/scala/com/google/cloud/ml/samples/criteo/CriteoImporterTest.scala
Scala
apache-2.0
1,470
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.optimizer import scala.collection.mutable import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical.{RepartitionOperation, _} import org.apache.spark.sql.catalyst.rules._ import org.apache.spark.sql.catalyst.trees.AlwaysProcess import org.apache.spark.sql.catalyst.trees.TreePattern._ import org.apache.spark.sql.connector.catalog.CatalogManager import org.apache.spark.sql.errors.QueryCompilationErrors import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.sql.util.SchemaUtils._ import org.apache.spark.util.Utils /** * Abstract class all optimizers should inherit of, contains the standard batches (extending * Optimizers can override this. */ abstract class Optimizer(catalogManager: CatalogManager) extends RuleExecutor[LogicalPlan] { // Check for structural integrity of the plan in test mode. // Currently we check after the execution of each rule if a plan: // - is still resolved // - only host special expressions in supported operators // - has globally-unique attribute IDs // - optimized plan have same schema with previous plan. override protected def isPlanIntegral( previousPlan: LogicalPlan, currentPlan: LogicalPlan): Boolean = { !Utils.isTesting || (currentPlan.resolved && currentPlan.find(PlanHelper.specialExpressionsInUnsupportedOperator(_).nonEmpty).isEmpty && LogicalPlanIntegrity.checkIfExprIdsAreGloballyUnique(currentPlan) && DataType.equalsIgnoreNullability(previousPlan.schema, currentPlan.schema)) } override protected val excludedOnceBatches: Set[String] = Set( "PartitionPruning", "Extract Python UDFs") protected def fixedPoint = FixedPoint( SQLConf.get.optimizerMaxIterations, maxIterationsSetting = SQLConf.OPTIMIZER_MAX_ITERATIONS.key) /** * Defines the default rule batches in the Optimizer. * * Implementations of this class should override this method, and [[nonExcludableRules]] if * necessary, instead of [[batches]]. The rule batches that eventually run in the Optimizer, * i.e., returned by [[batches]], will be (defaultBatches - (excludedRules - nonExcludableRules)). */ def defaultBatches: Seq[Batch] = { val operatorOptimizationRuleSet = Seq( // Operator push down PushProjectionThroughUnion, ReorderJoin, EliminateOuterJoin, PushDownPredicates, PushDownLeftSemiAntiJoin, PushLeftSemiLeftAntiThroughJoin, LimitPushDown, LimitPushDownThroughWindow, ColumnPruning, GenerateOptimization, // Operator combine CollapseRepartition, CollapseProject, OptimizeWindowFunctions, CollapseWindow, CombineFilters, EliminateLimits, CombineUnions, // Constant folding and strength reduction OptimizeRepartition, TransposeWindow, NullPropagation, NullDownPropagation, ConstantPropagation, FoldablePropagation, OptimizeIn, ConstantFolding, EliminateAggregateFilter, ReorderAssociativeOperator, LikeSimplification, BooleanSimplification, SimplifyConditionals, PushFoldableIntoBranches, RemoveDispensableExpressions, SimplifyBinaryComparison, ReplaceNullWithFalseInPredicate, SimplifyConditionalsInPredicate, PruneFilters, SimplifyCasts, SimplifyCaseConversionExpressions, RewriteCorrelatedScalarSubquery, RewriteLateralSubquery, EliminateSerialization, RemoveRedundantAliases, RemoveRedundantAggregates, UnwrapCastInBinaryComparison, RemoveNoopOperators, OptimizeUpdateFields, SimplifyExtractValueOps, OptimizeCsvJsonExprs, CombineConcats) ++ extendedOperatorOptimizationRules val operatorOptimizationBatch: Seq[Batch] = { Batch("Operator Optimization before Inferring Filters", fixedPoint, operatorOptimizationRuleSet: _*) :: Batch("Infer Filters", Once, InferFiltersFromGenerate, InferFiltersFromConstraints) :: Batch("Operator Optimization after Inferring Filters", fixedPoint, operatorOptimizationRuleSet: _*) :: // Set strategy to Once to avoid pushing filter every time because we do not change the // join condition. Batch("Push extra predicate through join", fixedPoint, PushExtraPredicateThroughJoin, PushDownPredicates) :: Nil } val batches = (Batch("Eliminate Distinct", Once, EliminateDistinct) :: // Technically some of the rules in Finish Analysis are not optimizer rules and belong more // in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime). // However, because we also use the analyzer to canonicalized queries (for view definition), // we do not eliminate subqueries or compute current time in the analyzer. Batch("Finish Analysis", Once, EliminateResolvedHint, EliminateSubqueryAliases, EliminateView, InlineCTE, ReplaceExpressions, RewriteNonCorrelatedExists, PullOutGroupingExpressions, ComputeCurrentTime, ReplaceCurrentLike(catalogManager), SpecialDatetimeValues, RewriteAsOfJoin) :: ////////////////////////////////////////////////////////////////////////////////////////// // Optimizer rules start here ////////////////////////////////////////////////////////////////////////////////////////// // - Do the first call of CombineUnions before starting the major Optimizer rules, // since it can reduce the number of iteration and the other rules could add/move // extra operators between two adjacent Union operators. // - Call CombineUnions again in Batch("Operator Optimizations"), // since the other rules might make two separate Unions operators adjacent. Batch("Union", Once, RemoveNoopOperators, CombineUnions, RemoveNoopUnion) :: Batch("OptimizeLimitZero", Once, OptimizeLimitZero) :: // Run this once earlier. This might simplify the plan and reduce cost of optimizer. // For example, a query such as Filter(LocalRelation) would go through all the heavy // optimizer rules that are triggered when there is a filter // (e.g. InferFiltersFromConstraints). If we run this batch earlier, the query becomes just // LocalRelation and does not trigger many rules. Batch("LocalRelation early", fixedPoint, ConvertToLocalRelation, PropagateEmptyRelation, // PropagateEmptyRelation can change the nullability of an attribute from nullable to // non-nullable when an empty relation child of a Union is removed UpdateAttributeNullability) :: Batch("Pullup Correlated Expressions", Once, OptimizeOneRowRelationSubquery, PullupCorrelatedPredicates) :: // Subquery batch applies the optimizer rules recursively. Therefore, it makes no sense // to enforce idempotence on it and we change this batch from Once to FixedPoint(1). Batch("Subquery", FixedPoint(1), OptimizeSubqueries) :: Batch("Replace Operators", fixedPoint, RewriteExceptAll, RewriteIntersectAll, ReplaceIntersectWithSemiJoin, ReplaceExceptWithFilter, ReplaceExceptWithAntiJoin, ReplaceDistinctWithAggregate, ReplaceDeduplicateWithAggregate) :: Batch("Aggregate", fixedPoint, RemoveLiteralFromGroupExpressions, RemoveRepetitionFromGroupExpressions) :: Nil ++ operatorOptimizationBatch) :+ // This batch rewrites plans after the operator optimization and // before any batches that depend on stats. Batch("Pre CBO Rules", Once, preCBORules: _*) :+ // This batch pushes filters and projections into scan nodes. Before this batch, the logical // plan may contain nodes that do not report stats. Anything that uses stats must run after // this batch. Batch("Early Filter and Projection Push-Down", Once, earlyScanPushDownRules: _*) :+ Batch("Update CTE Relation Stats", Once, UpdateCTERelationStats) :+ // Since join costs in AQP can change between multiple runs, there is no reason that we have an // idempotence enforcement on this batch. We thus make it FixedPoint(1) instead of Once. Batch("Join Reorder", FixedPoint(1), CostBasedJoinReorder) :+ Batch("Eliminate Sorts", Once, EliminateSorts) :+ Batch("Decimal Optimizations", fixedPoint, DecimalAggregates) :+ // This batch must run after "Decimal Optimizations", as that one may change the // aggregate distinct column Batch("Distinct Aggregate Rewrite", Once, RewriteDistinctAggregates) :+ Batch("Object Expressions Optimization", fixedPoint, EliminateMapObjects, CombineTypedFilters, ObjectSerializerPruning, ReassignLambdaVariableID) :+ Batch("LocalRelation", fixedPoint, ConvertToLocalRelation, PropagateEmptyRelation, // PropagateEmptyRelation can change the nullability of an attribute from nullable to // non-nullable when an empty relation child of a Union is removed UpdateAttributeNullability) :+ Batch("Optimize One Row Plan", fixedPoint, OptimizeOneRowPlan) :+ // The following batch should be executed after batch "Join Reorder" and "LocalRelation". Batch("Check Cartesian Products", Once, CheckCartesianProducts) :+ Batch("RewriteSubquery", Once, RewritePredicateSubquery, ColumnPruning, CollapseProject, RemoveRedundantAliases, RemoveNoopOperators) :+ // This batch must be executed after the `RewriteSubquery` batch, which creates joins. Batch("NormalizeFloatingNumbers", Once, NormalizeFloatingNumbers) :+ Batch("ReplaceUpdateFieldsExpression", Once, ReplaceUpdateFieldsExpression) // remove any batches with no rules. this may happen when subclasses do not add optional rules. batches.filter(_.rules.nonEmpty) } /** * Defines rules that cannot be excluded from the Optimizer even if they are specified in * SQL config "excludedRules". * * Implementations of this class can override this method if necessary. The rule batches * that eventually run in the Optimizer, i.e., returned by [[batches]], will be * (defaultBatches - (excludedRules - nonExcludableRules)). */ def nonExcludableRules: Seq[String] = EliminateDistinct.ruleName :: EliminateResolvedHint.ruleName :: EliminateSubqueryAliases.ruleName :: EliminateView.ruleName :: ReplaceExpressions.ruleName :: ComputeCurrentTime.ruleName :: SpecialDatetimeValues.ruleName :: ReplaceCurrentLike(catalogManager).ruleName :: RewriteDistinctAggregates.ruleName :: ReplaceDeduplicateWithAggregate.ruleName :: ReplaceIntersectWithSemiJoin.ruleName :: ReplaceExceptWithFilter.ruleName :: ReplaceExceptWithAntiJoin.ruleName :: RewriteExceptAll.ruleName :: RewriteIntersectAll.ruleName :: ReplaceDistinctWithAggregate.ruleName :: PullupCorrelatedPredicates.ruleName :: RewriteCorrelatedScalarSubquery.ruleName :: RewritePredicateSubquery.ruleName :: NormalizeFloatingNumbers.ruleName :: ReplaceUpdateFieldsExpression.ruleName :: PullOutGroupingExpressions.ruleName :: RewriteAsOfJoin.ruleName :: RewriteLateralSubquery.ruleName :: Nil /** * Optimize all the subqueries inside expression. */ object OptimizeSubqueries extends Rule[LogicalPlan] { private def removeTopLevelSort(plan: LogicalPlan): LogicalPlan = { if (!plan.containsPattern(SORT)) { return plan } plan match { case Sort(_, _, child) => child case Project(fields, child) => Project(fields, removeTopLevelSort(child)) case other => other } } def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning( _.containsPattern(PLAN_EXPRESSION), ruleId) { case s: SubqueryExpression => val Subquery(newPlan, _) = Optimizer.this.execute(Subquery.fromExpression(s)) // At this point we have an optimized subquery plan that we are going to attach // to this subquery expression. Here we can safely remove any top level sort // in the plan as tuples produced by a subquery are un-ordered. s.withNewPlan(removeTopLevelSort(newPlan)) } } /** * Update CTE reference stats. */ object UpdateCTERelationStats extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = { if (!plan.isInstanceOf[Subquery] && plan.containsPattern(CTE)) { val statsMap = mutable.HashMap.empty[Long, Statistics] updateCTEStats(plan, statsMap) } else { plan } } private def updateCTEStats( plan: LogicalPlan, statsMap: mutable.HashMap[Long, Statistics]): LogicalPlan = plan match { case WithCTE(child, cteDefs) => val newDefs = cteDefs.map { cteDef => val newDef = updateCTEStats(cteDef, statsMap) statsMap.put(cteDef.id, newDef.stats) newDef.asInstanceOf[CTERelationDef] } WithCTE(updateCTEStats(child, statsMap), newDefs) case c: CTERelationRef => statsMap.get(c.cteId).map(s => c.withNewStats(Some(s))).getOrElse(c) case _ if plan.containsPattern(CTE) => plan .withNewChildren(plan.children.map(child => updateCTEStats(child, statsMap))) .transformExpressionsWithPruning(_.containsAllPatterns(PLAN_EXPRESSION, CTE)) { case e: SubqueryExpression => e.withNewPlan(updateCTEStats(e.plan, statsMap)) } case _ => plan } } /** * Override to provide additional rules for the operator optimization batch. */ def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = Nil /** * Override to provide additional rules for early projection and filter pushdown to scans. */ def earlyScanPushDownRules: Seq[Rule[LogicalPlan]] = Nil /** * Override to provide additional rules for rewriting plans after operator optimization rules and * before any cost-based optimization rules that depend on stats. */ def preCBORules: Seq[Rule[LogicalPlan]] = Nil /** * Returns (defaultBatches - (excludedRules - nonExcludableRules)), the rule batches that * eventually run in the Optimizer. * * Implementations of this class should override [[defaultBatches]], and [[nonExcludableRules]] * if necessary, instead of this method. */ final override def batches: Seq[Batch] = { val excludedRulesConf = SQLConf.get.optimizerExcludedRules.toSeq.flatMap(Utils.stringToSeq) val excludedRules = excludedRulesConf.filter { ruleName => val nonExcludable = nonExcludableRules.contains(ruleName) if (nonExcludable) { logWarning(s"Optimization rule '${ruleName}' was not excluded from the optimizer " + s"because this rule is a non-excludable rule.") } !nonExcludable } if (excludedRules.isEmpty) { defaultBatches } else { defaultBatches.flatMap { batch => val filteredRules = batch.rules.filter { rule => val exclude = excludedRules.contains(rule.ruleName) if (exclude) { logInfo(s"Optimization rule '${rule.ruleName}' is excluded from the optimizer.") } !exclude } if (batch.rules == filteredRules) { Some(batch) } else if (filteredRules.nonEmpty) { Some(Batch(batch.name, batch.strategy, filteredRules: _*)) } else { logInfo(s"Optimization batch '${batch.name}' is excluded from the optimizer " + s"as all enclosed rules have been excluded.") None } } } } } /** * Remove useless DISTINCT for MAX and MIN. * This rule should be applied before RewriteDistinctAggregates. */ object EliminateDistinct extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning( _.containsPattern(AGGREGATE_EXPRESSION)) { case ae: AggregateExpression if ae.isDistinct && isDuplicateAgnostic(ae.aggregateFunction) => ae.copy(isDistinct = false) } def isDuplicateAgnostic(af: AggregateFunction): Boolean = af match { case _: Max => true case _: Min => true case _: BitAndAgg => true case _: BitOrAgg => true case _: CollectSet => true case _: First => true case _: Last => true case _ => false } } /** * Remove useless FILTER clause for aggregate expressions. * This rule should be applied before RewriteDistinctAggregates. */ object EliminateAggregateFilter extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning( _.containsAllPatterns(AGGREGATE_EXPRESSION, TRUE_OR_FALSE_LITERAL), ruleId) { case ae @ AggregateExpression(_, _, _, Some(Literal.TrueLiteral), _) => ae.copy(filter = None) case AggregateExpression(af: DeclarativeAggregate, _, _, Some(Literal.FalseLiteral), _) => val initialProject = SafeProjection.create(af.initialValues) val evalProject = SafeProjection.create(af.evaluateExpression :: Nil, af.aggBufferAttributes) val initialBuffer = initialProject(EmptyRow) val internalRow = evalProject(initialBuffer) Literal.create(internalRow.get(0, af.dataType), af.dataType) case AggregateExpression(af: ImperativeAggregate, _, _, Some(Literal.FalseLiteral), _) => val buffer = new SpecificInternalRow(af.aggBufferAttributes.map(_.dataType)) af.initialize(buffer) Literal.create(af.eval(buffer), af.dataType) } } /** * An optimizer used in test code. * * To ensure extendability, we leave the standard rules in the abstract optimizer rules, while * specific rules go to the subclasses */ object SimpleTestOptimizer extends SimpleTestOptimizer class SimpleTestOptimizer extends Optimizer( new CatalogManager( FakeV2SessionCatalog, new SessionCatalog(new InMemoryCatalog, EmptyFunctionRegistry, EmptyTableFunctionRegistry))) /** * Remove redundant aliases from a query plan. A redundant alias is an alias that does not change * the name or metadata of a column, and does not deduplicate it. */ object RemoveRedundantAliases extends Rule[LogicalPlan] { /** * Create an attribute mapping from the old to the new attributes. This function will only * return the attribute pairs that have changed. */ private def createAttributeMapping(current: LogicalPlan, next: LogicalPlan) : Seq[(Attribute, Attribute)] = { current.output.zip(next.output).filterNot { case (a1, a2) => a1.semanticEquals(a2) } } /** * Remove the top-level alias from an expression when it is redundant. */ private def removeRedundantAlias(e: Expression, excludeList: AttributeSet): Expression = e match { // Alias with metadata can not be stripped, or the metadata will be lost. // If the alias name is different from attribute name, we can't strip it either, or we // may accidentally change the output schema name of the root plan. case a @ Alias(attr: Attribute, name) if (a.metadata == Metadata.empty || a.metadata == attr.metadata) && name == attr.name && !excludeList.contains(attr) && !excludeList.contains(a) => attr case a => a } /** * Remove redundant alias expression from a LogicalPlan and its subtree. A set of excludes is used * to prevent the removal of seemingly redundant aliases used to deduplicate the input for a * (self) join or to prevent the removal of top-level subquery attributes. */ private def removeRedundantAliases(plan: LogicalPlan, excluded: AttributeSet): LogicalPlan = { if (!plan.containsPattern(ALIAS)) { return plan } plan match { // We want to keep the same output attributes for subqueries. This means we cannot remove // the aliases that produce these attributes case Subquery(child, correlated) => Subquery(removeRedundantAliases(child, excluded ++ child.outputSet), correlated) // A join has to be treated differently, because the left and the right side of the join are // not allowed to use the same attributes. We use an exclude list to prevent us from creating // a situation in which this happens; the rule will only remove an alias if its child // attribute is not on the black list. case Join(left, right, joinType, condition, hint) => val newLeft = removeRedundantAliases(left, excluded ++ right.outputSet) val newRight = removeRedundantAliases(right, excluded ++ newLeft.outputSet) val mapping = AttributeMap( createAttributeMapping(left, newLeft) ++ createAttributeMapping(right, newRight)) val newCondition = condition.map(_.transform { case a: Attribute => mapping.getOrElse(a, a) }) Join(newLeft, newRight, joinType, newCondition, hint) case _ => // Remove redundant aliases in the subtree(s). val currentNextAttrPairs = mutable.Buffer.empty[(Attribute, Attribute)] val newNode = plan.mapChildren { child => val newChild = removeRedundantAliases(child, excluded) currentNextAttrPairs ++= createAttributeMapping(child, newChild) newChild } // Create the attribute mapping. Note that the currentNextAttrPairs can contain duplicate // keys in case of Union (this is caused by the PushProjectionThroughUnion rule); in this // case we use the first mapping (which should be provided by the first child). val mapping = AttributeMap(currentNextAttrPairs.toSeq) // Create a an expression cleaning function for nodes that can actually produce redundant // aliases, use identity otherwise. val clean: Expression => Expression = plan match { case _: Project => removeRedundantAlias(_, excluded) case _: Aggregate => removeRedundantAlias(_, excluded) case _: Window => removeRedundantAlias(_, excluded) case _ => identity[Expression] } // Transform the expressions. newNode.mapExpressions { expr => clean(expr.transform { case a: Attribute => mapping.get(a).map(_.withName(a.name)).getOrElse(a) }) } } } def apply(plan: LogicalPlan): LogicalPlan = removeRedundantAliases(plan, AttributeSet.empty) } /** * Remove no-op operators from the query plan that do not make any modifications. */ object RemoveNoopOperators extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsAnyPattern(PROJECT, WINDOW), ruleId) { // Eliminate no-op Projects case p @ Project(projectList, child) if child.sameOutput(p) => val newChild = child match { case p: Project => p.copy(projectList = restoreOriginalOutputNames(p.projectList, projectList.map(_.name))) case agg: Aggregate => agg.copy(aggregateExpressions = restoreOriginalOutputNames(agg.aggregateExpressions, projectList.map(_.name))) case _ => child } if (newChild.output.zip(projectList).forall { case (a1, a2) => a1.name == a2.name }) { newChild } else { p } // Eliminate no-op Window case w: Window if w.windowExpressions.isEmpty => w.child } } /** * Smplify the children of `Union` or remove no-op `Union` from the query plan that * do not make any modifications to the query. */ object RemoveNoopUnion extends Rule[LogicalPlan] { /** * This only removes the `Project` that has only attributes or aliased attributes * from its child. */ private def removeAliasOnlyProject(plan: LogicalPlan): LogicalPlan = plan match { case p @ Project(projectList, child) => val aliasOnly = projectList.length == child.output.length && projectList.zip(child.output).forall { case (Alias(left: Attribute, _), right) => left.semanticEquals(right) case (left: Attribute, right) => left.semanticEquals(right) case _ => false } if (aliasOnly) { child } else { p } case _ => plan } private def simplifyUnion(u: Union): LogicalPlan = { val uniqueChildren = mutable.ArrayBuffer.empty[LogicalPlan] val uniqueChildrenKey = mutable.HashSet.empty[LogicalPlan] u.children.foreach { c => val key = removeAliasOnlyProject(c).canonicalized if (!uniqueChildrenKey.contains(key)) { uniqueChildren += c uniqueChildrenKey += key } } if (uniqueChildren.size == 1) { u.children.head } else { u.copy(children = uniqueChildren.toSeq) } } def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsAllPatterns(DISTINCT_LIKE, UNION)) { case d @ Distinct(u: Union) => d.withNewChildren(Seq(simplifyUnion(u))) case d @ Deduplicate(_, u: Union) => d.withNewChildren(Seq(simplifyUnion(u))) } } /** * Pushes down [[LocalLimit]] beneath UNION ALL and joins. */ object LimitPushDown extends Rule[LogicalPlan] { private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = { plan match { case GlobalLimit(_, child) => child case _ => plan } } private def maybePushLocalLimit(limitExp: Expression, plan: LogicalPlan): LogicalPlan = { (limitExp, plan.maxRowsPerPartition) match { case (IntegerLiteral(newLimit), Some(childMaxRows)) if newLimit < childMaxRows => // If the child has a cap on max rows per partition and the cap is larger than // the new limit, put a new LocalLimit there. LocalLimit(limitExp, stripGlobalLimitIfPresent(plan)) case (_, None) => // If the child has no cap, put the new LocalLimit. LocalLimit(limitExp, stripGlobalLimitIfPresent(plan)) case _ => // Otherwise, don't put a new LocalLimit. plan } } private def pushLocalLimitThroughJoin(limitExpr: Expression, join: Join): Join = { join.joinType match { case RightOuter => join.copy(right = maybePushLocalLimit(limitExpr, join.right)) case LeftOuter => join.copy(left = maybePushLocalLimit(limitExpr, join.left)) case _: InnerLike if join.condition.isEmpty => join.copy( left = maybePushLocalLimit(limitExpr, join.left), right = maybePushLocalLimit(limitExpr, join.right)) case LeftSemi | LeftAnti if join.condition.isEmpty => join.copy( left = maybePushLocalLimit(limitExpr, join.left), right = maybePushLocalLimit(Literal(1, IntegerType), join.right)) case _ => join } } def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(LIMIT), ruleId) { // Adding extra Limits below UNION ALL for children which are not Limit or do not have Limit // descendants whose maxRow is larger. This heuristic is valid assuming there does not exist any // Limit push-down rule that is unable to infer the value of maxRows. // Note: right now Union means UNION ALL, which does not de-duplicate rows, so it is safe to // pushdown Limit through it. Once we add UNION DISTINCT, however, we will not be able to // pushdown Limit. case LocalLimit(exp, u: Union) => LocalLimit(exp, u.copy(children = u.children.map(maybePushLocalLimit(exp, _)))) // Add extra limits below JOIN: // 1. For LEFT OUTER and RIGHT OUTER JOIN, we push limits to the left and right sides, // respectively. // 2. For INNER and CROSS JOIN, we push limits to both the left and right sides if join // condition is empty. // 3. For LEFT SEMI and LEFT ANTI JOIN, we push limits to the left side if join condition // is empty. // It's not safe to push limits below FULL OUTER JOIN in the general case without a more // invasive rewrite. We also need to ensure that this limit pushdown rule will not eventually // introduce limits on both sides if it is applied multiple times. Therefore: // - If one side is already limited, stack another limit on top if the new limit is smaller. // The redundant limit will be collapsed by the CombineLimits rule. case LocalLimit(exp, join: Join) => LocalLimit(exp, pushLocalLimitThroughJoin(exp, join)) // There is a Project between LocalLimit and Join if they do not have the same output. case LocalLimit(exp, project @ Project(_, join: Join)) => LocalLimit(exp, project.copy(child = pushLocalLimitThroughJoin(exp, join))) // Push down limit 1 through Aggregate and turn Aggregate into Project if it is group only. case Limit(le @ IntegerLiteral(1), a: Aggregate) if a.groupOnly => Limit(le, Project(a.aggregateExpressions, LocalLimit(le, a.child))) case Limit(le @ IntegerLiteral(1), p @ Project(_, a: Aggregate)) if a.groupOnly => Limit(le, p.copy(child = Project(a.aggregateExpressions, LocalLimit(le, a.child)))) } } /** * Pushes Project operator to both sides of a Union operator. * Operations that are safe to pushdown are listed as follows. * Union: * Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is * safe to pushdown Filters and Projections through it. Filter pushdown is handled by another * rule PushDownPredicates. Once we add UNION DISTINCT, we will not be able to pushdown Projections. */ object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper { /** * Maps Attributes from the left side to the corresponding Attribute on the right side. */ private def buildRewrites(left: LogicalPlan, right: LogicalPlan): AttributeMap[Attribute] = { assert(left.output.size == right.output.size) AttributeMap(left.output.zip(right.output)) } /** * Rewrites an expression so that it can be pushed to the right side of a * Union or Except operator. This method relies on the fact that the output attributes * of a union/intersect/except are always equal to the left child's output. */ private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = { val result = e transform { case a: Attribute => rewrites(a) } match { // Make sure exprId is unique in each child of Union. case Alias(child, alias) => Alias(child, alias)() case other => other } // We must promise the compiler that we did not discard the names in the case of project // expressions. This is safe since the only transformation is from Attribute => Attribute. result.asInstanceOf[A] } def pushProjectionThroughUnion(projectList: Seq[NamedExpression], u: Union): Seq[LogicalPlan] = { val newFirstChild = Project(projectList, u.children.head) val newOtherChildren = u.children.tail.map { child => val rewrites = buildRewrites(u.children.head, child) Project(projectList.map(pushToRight(_, rewrites)), child) } newFirstChild +: newOtherChildren } def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsAllPatterns(UNION, PROJECT)) { // Push down deterministic projection through UNION ALL case Project(projectList, u: Union) if projectList.forall(_.deterministic) && u.children.nonEmpty => u.copy(children = pushProjectionThroughUnion(projectList, u)) } } /** * Attempts to eliminate the reading of unneeded columns from the query plan. * * Since adding Project before Filter conflicts with PushPredicatesThroughProject, this rule will * remove the Project p2 in the following pattern: * * p1 @ Project(_, Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(p2.inputSet) * * p2 is usually inserted by this rule and useless, p1 could prune the columns anyway. */ object ColumnPruning extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = removeProjectBeforeFilter( plan.transformWithPruning(AlwaysProcess.fn, ruleId) { // Prunes the unused columns from project list of Project/Aggregate/Expand case p @ Project(_, p2: Project) if !p2.outputSet.subsetOf(p.references) => p.copy(child = p2.copy(projectList = p2.projectList.filter(p.references.contains))) case p @ Project(_, a: Aggregate) if !a.outputSet.subsetOf(p.references) => p.copy( child = a.copy(aggregateExpressions = a.aggregateExpressions.filter(p.references.contains))) case a @ Project(_, e @ Expand(_, _, grandChild)) if !e.outputSet.subsetOf(a.references) => val newOutput = e.output.filter(a.references.contains(_)) val newProjects = e.projections.map { proj => proj.zip(e.output).filter { case (_, a) => newOutput.contains(a) }.unzip._1 } a.copy(child = Expand(newProjects, newOutput, grandChild)) // Prune and drop AttachDistributedSequence if the produced attribute is not referred. case p @ Project(_, a @ AttachDistributedSequence(_, grandChild)) if !p.references.contains(a.sequenceAttr) => p.copy(child = prunedChild(grandChild, p.references)) // Prunes the unused columns from child of `DeserializeToObject` case d @ DeserializeToObject(_, _, child) if !child.outputSet.subsetOf(d.references) => d.copy(child = prunedChild(child, d.references)) // Prunes the unused columns from child of Aggregate/Expand/Generate/ScriptTransformation case a @ Aggregate(_, _, child) if !child.outputSet.subsetOf(a.references) => a.copy(child = prunedChild(child, a.references)) case f @ FlatMapGroupsInPandas(_, _, _, child) if !child.outputSet.subsetOf(f.references) => f.copy(child = prunedChild(child, f.references)) case e @ Expand(_, _, child) if !child.outputSet.subsetOf(e.references) => e.copy(child = prunedChild(child, e.references)) // prune unrequired references case p @ Project(_, g: Generate) if p.references != g.outputSet => val requiredAttrs = p.references -- g.producedAttributes ++ g.generator.references val newChild = prunedChild(g.child, requiredAttrs) val unrequired = g.generator.references -- p.references val unrequiredIndices = newChild.output.zipWithIndex.filter(t => unrequired.contains(t._1)) .map(_._2) p.copy(child = g.copy(child = newChild, unrequiredChildIndex = unrequiredIndices)) // prune unrequired nested fields from `Generate`. case GeneratorNestedColumnAliasing(rewrittenPlan) => rewrittenPlan // Eliminate unneeded attributes from right side of a Left Existence Join. case j @ Join(_, right, LeftExistence(_), _, _) => j.copy(right = prunedChild(right, j.references)) // all the columns will be used to compare, so we can't prune them case p @ Project(_, _: SetOperation) => p case p @ Project(_, _: Distinct) => p // Eliminate unneeded attributes from children of Union. case p @ Project(_, u: Union) => if (!u.outputSet.subsetOf(p.references)) { val firstChild = u.children.head val newOutput = prunedChild(firstChild, p.references).output // pruning the columns of all children based on the pruned first child. val newChildren = u.children.map { p => val selected = p.output.zipWithIndex.filter { case (a, i) => newOutput.contains(firstChild.output(i)) }.map(_._1) Project(selected, p) } p.copy(child = u.withNewChildren(newChildren)) } else { p } // Prune unnecessary window expressions case p @ Project(_, w: Window) if !w.windowOutputSet.subsetOf(p.references) => p.copy(child = w.copy( windowExpressions = w.windowExpressions.filter(p.references.contains))) // Prune WithCTE case p @ Project(_, w: WithCTE) => if (!w.outputSet.subsetOf(p.references)) { p.copy(child = w.withNewPlan(prunedChild(w.plan, p.references))) } else { p } // Can't prune the columns on LeafNode case p @ Project(_, _: LeafNode) => p case NestedColumnAliasing(rewrittenPlan) => rewrittenPlan // for all other logical plans that inherits the output from it's children // Project over project is handled by the first case, skip it here. case p @ Project(_, child) if !child.isInstanceOf[Project] => val required = child.references ++ p.references if (!child.inputSet.subsetOf(required)) { val newChildren = child.children.map(c => prunedChild(c, required)) p.copy(child = child.withNewChildren(newChildren)) } else { p } }) /** Applies a projection only when the child is producing unnecessary attributes */ private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) = if (!c.outputSet.subsetOf(allReferences)) { Project(c.output.filter(allReferences.contains), c) } else { c } /** * The Project before Filter is not necessary but conflict with PushPredicatesThroughProject, * so remove it. Since the Projects have been added top-down, we need to remove in bottom-up * order, otherwise lower Projects can be missed. */ private def removeProjectBeforeFilter(plan: LogicalPlan): LogicalPlan = plan transformUp { case p1 @ Project(_, f @ Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(child.outputSet) && // We only remove attribute-only project. p2.projectList.forall(_.isInstanceOf[AttributeReference]) => p1.copy(child = f.copy(child = child)) } } /** * Combines two [[Project]] operators into one and perform alias substitution, * merging the expressions into one single expression for the following cases. * 1. When two [[Project]] operators are adjacent. * 2. When two [[Project]] operators have LocalLimit/Sample/Repartition operator between them * and the upper project consists of the same number of columns which is equal or aliasing. * `GlobalLimit(LocalLimit)` pattern is also considered. */ object CollapseProject extends Rule[LogicalPlan] with AliasHelper { def apply(plan: LogicalPlan): LogicalPlan = { val alwaysInline = conf.getConf(SQLConf.COLLAPSE_PROJECT_ALWAYS_INLINE) plan.transformUpWithPruning(_.containsPattern(PROJECT), ruleId) { case p1 @ Project(_, p2: Project) if canCollapseExpressions(p1.projectList, p2.projectList, alwaysInline) => p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList)) case p @ Project(_, agg: Aggregate) if canCollapseExpressions(p.projectList, agg.aggregateExpressions, alwaysInline) && canCollapseAggregate(p, agg) => agg.copy(aggregateExpressions = buildCleanedProjectList( p.projectList, agg.aggregateExpressions)) case Project(l1, g @ GlobalLimit(_, limit @ LocalLimit(_, p2 @ Project(l2, _)))) if isRenaming(l1, l2) => val newProjectList = buildCleanedProjectList(l1, l2) g.copy(child = limit.copy(child = p2.copy(projectList = newProjectList))) case Project(l1, limit @ LocalLimit(_, p2 @ Project(l2, _))) if isRenaming(l1, l2) => val newProjectList = buildCleanedProjectList(l1, l2) limit.copy(child = p2.copy(projectList = newProjectList)) case Project(l1, r @ Repartition(_, _, p @ Project(l2, _))) if isRenaming(l1, l2) => r.copy(child = p.copy(projectList = buildCleanedProjectList(l1, p.projectList))) case Project(l1, s @ Sample(_, _, _, _, p2 @ Project(l2, _))) if isRenaming(l1, l2) => s.copy(child = p2.copy(projectList = buildCleanedProjectList(l1, p2.projectList))) } } /** * Check if we can collapse expressions safely. */ def canCollapseExpressions( consumers: Seq[Expression], producers: Seq[NamedExpression], alwaysInline: Boolean): Boolean = { canCollapseExpressions(consumers, getAliasMap(producers), alwaysInline) } /** * Check if we can collapse expressions safely. */ def canCollapseExpressions( consumers: Seq[Expression], producerMap: Map[Attribute, Expression], alwaysInline: Boolean = false): Boolean = { // We can only collapse expressions if all input expressions meet the following criteria: // - The input is deterministic. // - The input is only consumed once OR the underlying input expression is cheap. consumers.flatMap(collectReferences) .groupBy(identity) .mapValues(_.size) .forall { case (reference, count) => val producer = producerMap.getOrElse(reference, reference) producer.deterministic && (count == 1 || alwaysInline || { val relatedConsumers = consumers.filter(_.references.contains(reference)) val extractOnly = relatedConsumers.forall(isExtractOnly(_, reference)) shouldInline(producer, extractOnly) }) } } @scala.annotation.tailrec private def isExtractOnly(expr: Expression, ref: Attribute): Boolean = expr match { case a: Alias => isExtractOnly(a.child, ref) case e: ExtractValue => isExtractOnly(e.children.head, ref) case a: Attribute => a.semanticEquals(ref) case _ => false } /** * A project cannot be collapsed with an aggregate when there are correlated scalar * subqueries in the project list, because currently we only allow correlated subqueries * in aggregate if they are also part of the grouping expressions. Otherwise the plan * after subquery rewrite will not be valid. */ private def canCollapseAggregate(p: Project, a: Aggregate): Boolean = { p.projectList.forall(_.collect { case s: ScalarSubquery if s.outerAttrs.nonEmpty => s }.isEmpty) } def buildCleanedProjectList( upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Seq[NamedExpression] = { val aliases = getAliasMap(lower) upper.map(replaceAliasButKeepName(_, aliases)) } /** * Check if the given expression is cheap that we can inline it. */ private def shouldInline(e: Expression, extractOnlyConsumer: Boolean): Boolean = e match { case _: Attribute | _: OuterReference => true case _ if e.foldable => true // PythonUDF is handled by the rule ExtractPythonUDFs case _: PythonUDF => true // Alias and ExtractValue are very cheap. case _: Alias | _: ExtractValue => e.children.forall(shouldInline(_, extractOnlyConsumer)) // These collection create functions are not cheap, but we have optimizer rules that can // optimize them out if they are only consumed by ExtractValue, so we need to allow to inline // them to avoid perf regression. As an example: // Project(s.a, s.b, Project(create_struct(a, b, c) as s, child)) // We should collapse these two projects and eventually get Project(a, b, child) case _: CreateNamedStruct | _: CreateArray | _: CreateMap | _: UpdateFields => extractOnlyConsumer case _ => false } /** * Return all the references of the given expression without deduplication, which is different * from `Expression.references`. */ private def collectReferences(e: Expression): Seq[Attribute] = e.collect { case a: Attribute => a } private def isRenaming(list1: Seq[NamedExpression], list2: Seq[NamedExpression]): Boolean = { list1.length == list2.length && list1.zip(list2).forall { case (e1, e2) if e1.semanticEquals(e2) => true case (Alias(a: Attribute, _), b) if a.metadata == Metadata.empty && a.name == b.name => true case _ => false } } } /** * Combines adjacent [[RepartitionOperation]] and [[RebalancePartitions]] operators */ object CollapseRepartition extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsAnyPattern(REPARTITION_OPERATION, REBALANCE_PARTITIONS), ruleId) { // Case 1: When a Repartition has a child of Repartition or RepartitionByExpression, // 1) When the top node does not enable the shuffle (i.e., coalesce API), but the child // enables the shuffle. Returns the child node if the last numPartitions is bigger; // otherwise, keep unchanged. // 2) In the other cases, returns the top node with the child's child case r @ Repartition(_, _, child: RepartitionOperation) => (r.shuffle, child.shuffle) match { case (false, true) => if (r.numPartitions >= child.numPartitions) child else r case _ => r.copy(child = child.child) } // Case 2: When a RepartitionByExpression has a child of global Sort, Repartition or // RepartitionByExpression we can remove the child. case r @ RepartitionByExpression(_, child @ (Sort(_, true, _) | _: RepartitionOperation), _) => r.withNewChildren(child.children) // Case 3: When a RebalancePartitions has a child of local or global Sort, Repartition or // RepartitionByExpression we can remove the child. case r @ RebalancePartitions(_, child @ (_: Sort | _: RepartitionOperation)) => r.withNewChildren(child.children) // Case 4: When a RebalancePartitions has a child of RebalancePartitions we can remove the // child. case r @ RebalancePartitions(_, child: RebalancePartitions) => r.withNewChildren(child.children) } } /** * Replace RepartitionByExpression numPartitions to 1 if all partition expressions are foldable * and user not specify. */ object OptimizeRepartition extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(REPARTITION_OPERATION), ruleId) { case r @ RepartitionByExpression(partitionExpressions, _, numPartitions) if partitionExpressions.nonEmpty && partitionExpressions.forall(_.foldable) && numPartitions.isEmpty => r.copy(optNumPartitions = Some(1)) } } /** * Replaces first(col) to nth_value(col, 1) for better performance. */ object OptimizeWindowFunctions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressionsWithPruning( _.containsPattern(WINDOW_EXPRESSION), ruleId) { case we @ WindowExpression(AggregateExpression(first: First, _, _, _, _), WindowSpecDefinition(_, orderSpec, frameSpecification: SpecifiedWindowFrame)) if orderSpec.nonEmpty && frameSpecification.frameType == RowFrame && frameSpecification.lower == UnboundedPreceding && (frameSpecification.upper == UnboundedFollowing || frameSpecification.upper == CurrentRow) => we.copy(windowFunction = NthValue(first.child, Literal(1), first.ignoreNulls)) } } /** * Collapse Adjacent Window Expression. * - If the partition specs and order specs are the same and the window expression are * independent and are of the same window function type, collapse into the parent. */ object CollapseWindow extends Rule[LogicalPlan] { private def windowsCompatible(w1: Window, w2: Window): Boolean = { w1.partitionSpec == w2.partitionSpec && w1.orderSpec == w2.orderSpec && w1.references.intersect(w2.windowOutputSet).isEmpty && w1.windowExpressions.nonEmpty && w2.windowExpressions.nonEmpty && // This assumes Window contains the same type of window expressions. This is ensured // by ExtractWindowFunctions. WindowFunctionType.functionType(w1.windowExpressions.head) == WindowFunctionType.functionType(w2.windowExpressions.head) } def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsPattern(WINDOW), ruleId) { case w1 @ Window(we1, _, _, w2 @ Window(we2, _, _, grandChild)) if windowsCompatible(w1, w2) => w1.copy(windowExpressions = we2 ++ we1, child = grandChild) case w1 @ Window(we1, _, _, Project(pl, w2 @ Window(we2, _, _, grandChild))) if windowsCompatible(w1, w2) && w1.references.subsetOf(grandChild.outputSet) => Project( pl ++ w1.windowOutputSet, w1.copy(windowExpressions = we2 ++ we1, child = grandChild)) } } /** * Transpose Adjacent Window Expressions. * - If the partition spec of the parent Window expression is compatible with the partition spec * of the child window expression, transpose them. */ object TransposeWindow extends Rule[LogicalPlan] { private def compatiblePartitions(ps1 : Seq[Expression], ps2: Seq[Expression]): Boolean = { ps1.length < ps2.length && ps2.take(ps1.length).permutations.exists(ps1.zip(_).forall { case (l, r) => l.semanticEquals(r) }) } private def windowsCompatible(w1: Window, w2: Window): Boolean = { w1.references.intersect(w2.windowOutputSet).isEmpty && w1.expressions.forall(_.deterministic) && w2.expressions.forall(_.deterministic) && compatiblePartitions(w1.partitionSpec, w2.partitionSpec) } def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsPattern(WINDOW), ruleId) { case w1 @ Window(_, _, _, w2 @ Window(_, _, _, grandChild)) if windowsCompatible(w1, w2) => Project(w1.output, w2.copy(child = w1.copy(child = grandChild))) case w1 @ Window(_, _, _, Project(pl, w2 @ Window(_, _, _, grandChild))) if windowsCompatible(w1, w2) && w1.references.subsetOf(grandChild.outputSet) => Project( pl ++ w1.windowOutputSet, w2.copy(child = w1.copy(child = grandChild))) } } /** * Infers filters from [[Generate]], such that rows that would have been removed * by this [[Generate]] can be removed earlier - before joins and in data sources. */ object InferFiltersFromGenerate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsPattern(GENERATE)) { case generate @ Generate(g, _, false, _, _, _) if canInferFilters(g) => assert(g.children.length == 1) val input = g.children.head // Generating extra predicates here has overheads/risks: // - We may evaluate expensive input expressions multiple times. // - We may infer too many constraints later. // - The input expression may fail to be evaluated under ANSI mode. If we reorder the // predicates and evaluate the input expression first, we may fail the query unexpectedly. // To be safe, here we only generate extra predicates if the input is an attribute. // Note that, foldable input is also excluded here, to avoid constant filters like // 'size([1, 2, 3]) > 0'. These do not show up in child's constraints and then the // idempotence will break. if (input.isInstanceOf[Attribute]) { // Exclude child's constraints to guarantee idempotency val inferredFilters = ExpressionSet( Seq(GreaterThan(Size(input), Literal(0)), IsNotNull(input)) ) -- generate.child.constraints if (inferredFilters.nonEmpty) { generate.copy(child = Filter(inferredFilters.reduce(And), generate.child)) } else { generate } } else { generate } } private def canInferFilters(g: Generator): Boolean = g match { case _: ExplodeBase => true case _: Inline => true case _ => false } } /** * Generate a list of additional filters from an operator's existing constraint but remove those * that are either already part of the operator's condition or are part of the operator's child * constraints. These filters are currently inserted to the existing conditions in the Filter * operators and on either side of Join operators. * * Note: While this optimization is applicable to a lot of types of join, it primarily benefits * Inner and LeftSemi joins. */ object InferFiltersFromConstraints extends Rule[LogicalPlan] with PredicateHelper with ConstraintHelper { def apply(plan: LogicalPlan): LogicalPlan = { if (conf.constraintPropagationEnabled) { inferFilters(plan) } else { plan } } private def inferFilters(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsAnyPattern(FILTER, JOIN)) { case filter @ Filter(condition, child) => val newFilters = filter.constraints -- (child.constraints ++ splitConjunctivePredicates(condition)) if (newFilters.nonEmpty) { Filter(And(newFilters.reduce(And), condition), child) } else { filter } case join @ Join(left, right, joinType, conditionOpt, _) => joinType match { // For inner join, we can infer additional filters for both sides. LeftSemi is kind of an // inner join, it just drops the right side in the final output. case _: InnerLike | LeftSemi => val allConstraints = getAllConstraints(left, right, conditionOpt) val newLeft = inferNewFilter(left, allConstraints) val newRight = inferNewFilter(right, allConstraints) join.copy(left = newLeft, right = newRight) // For right outer join, we can only infer additional filters for left side. case RightOuter => val allConstraints = getAllConstraints(left, right, conditionOpt) val newLeft = inferNewFilter(left, allConstraints) join.copy(left = newLeft) // For left join, we can only infer additional filters for right side. case LeftOuter | LeftAnti => val allConstraints = getAllConstraints(left, right, conditionOpt) val newRight = inferNewFilter(right, allConstraints) join.copy(right = newRight) case _ => join } } private def getAllConstraints( left: LogicalPlan, right: LogicalPlan, conditionOpt: Option[Expression]): ExpressionSet = { val baseConstraints = left.constraints.union(right.constraints) .union(ExpressionSet(conditionOpt.map(splitConjunctivePredicates).getOrElse(Nil))) baseConstraints.union(inferAdditionalConstraints(baseConstraints)) } private def inferNewFilter(plan: LogicalPlan, constraints: ExpressionSet): LogicalPlan = { val newPredicates = constraints .union(constructIsNotNullConstraints(constraints, plan.output)) .filter { c => c.references.nonEmpty && c.references.subsetOf(plan.outputSet) && c.deterministic } -- plan.constraints if (newPredicates.isEmpty) { plan } else { Filter(newPredicates.reduce(And), plan) } } } /** * Combines all adjacent [[Union]] operators into a single [[Union]]. */ object CombineUnions extends Rule[LogicalPlan] { import CollapseProject.{buildCleanedProjectList, canCollapseExpressions} import PushProjectionThroughUnion.pushProjectionThroughUnion def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning( _.containsAnyPattern(UNION, DISTINCT_LIKE), ruleId) { case u: Union => flattenUnion(u, false) case Distinct(u: Union) => Distinct(flattenUnion(u, true)) // Only handle distinct-like 'Deduplicate', where the keys == output case Deduplicate(keys: Seq[Attribute], u: Union) if AttributeSet(keys) == u.outputSet => Deduplicate(keys, flattenUnion(u, true)) } private def flattenUnion(union: Union, flattenDistinct: Boolean): Union = { val topByName = union.byName val topAllowMissingCol = union.allowMissingCol val stack = mutable.Stack[LogicalPlan](union) val flattened = mutable.ArrayBuffer.empty[LogicalPlan] // Note that we should only flatten the unions with same byName and allowMissingCol. // Although we do `UnionCoercion` at analysis phase, we manually run `CombineUnions` // in some places like `Dataset.union`. Flattening unions with different resolution // rules (by position and by name) could cause incorrect results. while (stack.nonEmpty) { stack.pop() match { case p1 @ Project(_, p2: Project) if canCollapseExpressions(p1.projectList, p2.projectList, alwaysInline = false) => val newProjectList = buildCleanedProjectList(p1.projectList, p2.projectList) stack.pushAll(Seq(p2.copy(projectList = newProjectList))) case Distinct(Union(children, byName, allowMissingCol)) if flattenDistinct && byName == topByName && allowMissingCol == topAllowMissingCol => stack.pushAll(children.reverse) // Only handle distinct-like 'Deduplicate', where the keys == output case Deduplicate(keys: Seq[Attribute], u: Union) if flattenDistinct && u.byName == topByName && u.allowMissingCol == topAllowMissingCol && AttributeSet(keys) == u.outputSet => stack.pushAll(u.children.reverse) case Union(children, byName, allowMissingCol) if byName == topByName && allowMissingCol == topAllowMissingCol => stack.pushAll(children.reverse) // Push down projection through Union and then push pushed plan to Stack if // there is a Project. case Project(projectList, Distinct(u @ Union(children, byName, allowMissingCol))) if projectList.forall(_.deterministic) && children.nonEmpty && flattenDistinct && byName == topByName && allowMissingCol == topAllowMissingCol => stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse) case Project(projectList, Deduplicate(keys: Seq[Attribute], u: Union)) if projectList.forall(_.deterministic) && flattenDistinct && u.byName == topByName && u.allowMissingCol == topAllowMissingCol && AttributeSet(keys) == u.outputSet => stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse) case Project(projectList, u @ Union(children, byName, allowMissingCol)) if projectList.forall(_.deterministic) && children.nonEmpty && byName == topByName && allowMissingCol == topAllowMissingCol => stack.pushAll(pushProjectionThroughUnion(projectList, u).reverse) case child => flattened += child } } union.copy(children = flattened.toSeq) } } /** * Combines two adjacent [[Filter]] operators into one, merging the non-redundant conditions into * one conjunctive predicate. */ object CombineFilters extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(FILTER), ruleId)(applyLocally) val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = { // The query execution/optimization does not guarantee the expressions are evaluated in order. // We only can combine them if and only if both are deterministic. case Filter(fc, nf @ Filter(nc, grandChild)) if nc.deterministic => val (combineCandidates, nonDeterministic) = splitConjunctivePredicates(fc).partition(_.deterministic) val mergedFilter = (ExpressionSet(combineCandidates) -- ExpressionSet(splitConjunctivePredicates(nc))).reduceOption(And) match { case Some(ac) => Filter(And(nc, ac), grandChild) case None => nf } nonDeterministic.reduceOption(And).map(c => Filter(c, mergedFilter)).getOrElse(mergedFilter) } } /** * Removes Sort operations if they don't affect the final output ordering. * Note that changes in the final output ordering may affect the file size (SPARK-32318). * This rule handles the following cases: * 1) if the sort order is empty or the sort order does not have any reference * 2) if the Sort operator is a local sort and the child is already sorted * 3) if there is another Sort operator separated by 0...n Project, Filter, Repartition or * RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators * 4) if the Sort operator is within Join separated by 0...n Project, Filter, Repartition or * RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators only * and the Join condition is deterministic * 5) if the Sort operator is within GroupBy separated by 0...n Project, Filter, Repartition or * RepartitionByExpression, RebalancePartitions (with deterministic expressions) operators only * and the aggregate function is order irrelevant */ object EliminateSorts extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(SORT))(applyLocally) private val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = { case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) => val newOrders = orders.filterNot(_.child.foldable) if (newOrders.isEmpty) { applyLocally.lift(child).getOrElse(child) } else { s.copy(order = newOrders) } case Sort(orders, false, child) if SortOrder.orderingSatisfies(child.outputOrdering, orders) => applyLocally.lift(child).getOrElse(child) case s @ Sort(_, _, child) => s.copy(child = recursiveRemoveSort(child)) case j @ Join(originLeft, originRight, _, cond, _) if cond.forall(_.deterministic) => j.copy(left = recursiveRemoveSort(originLeft), right = recursiveRemoveSort(originRight)) case g @ Aggregate(_, aggs, originChild) if isOrderIrrelevantAggs(aggs) => g.copy(child = recursiveRemoveSort(originChild)) } private def recursiveRemoveSort(plan: LogicalPlan): LogicalPlan = { if (!plan.containsPattern(SORT)) { return plan } plan match { case Sort(_, _, child) => recursiveRemoveSort(child) case other if canEliminateSort(other) => other.withNewChildren(other.children.map(recursiveRemoveSort)) case _ => plan } } private def canEliminateSort(plan: LogicalPlan): Boolean = plan match { case p: Project => p.projectList.forall(_.deterministic) case f: Filter => f.condition.deterministic case r: RepartitionByExpression => r.partitionExpressions.forall(_.deterministic) case r: RebalancePartitions => r.partitionExpressions.forall(_.deterministic) case _: Repartition => true case _ => false } private def isOrderIrrelevantAggs(aggs: Seq[NamedExpression]): Boolean = { def isOrderIrrelevantAggFunction(func: AggregateFunction): Boolean = func match { case _: Min | _: Max | _: Count | _: BitAggregate => true // Arithmetic operations for floating-point values are order-sensitive // (they are not associative). case _: Sum | _: Average | _: CentralMomentAgg => !Seq(FloatType, DoubleType).exists(_.sameType(func.children.head.dataType)) case _ => false } def checkValidAggregateExpression(expr: Expression): Boolean = expr match { case _: AttributeReference => true case ae: AggregateExpression => isOrderIrrelevantAggFunction(ae.aggregateFunction) case _: UserDefinedExpression => false case e => e.children.forall(checkValidAggregateExpression) } aggs.forall(checkValidAggregateExpression) } } /** * Removes filters that can be evaluated trivially. This can be done through the following ways: * 1) by eliding the filter for cases where it will always evaluate to `true`. * 2) by substituting a dummy empty relation when the filter will always evaluate to `false`. * 3) by eliminating the always-true conditions given the constraints on the child's output. */ object PruneFilters extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(FILTER), ruleId) { // If the filter condition always evaluate to true, remove the filter. case Filter(Literal(true, BooleanType), child) => child // If the filter condition always evaluate to null or false, // replace the input with an empty relation. case Filter(Literal(null, _), child) => LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming) case Filter(Literal(false, BooleanType), child) => LocalRelation(child.output, data = Seq.empty, isStreaming = plan.isStreaming) // If any deterministic condition is guaranteed to be true given the constraints on the child's // output, remove the condition case f @ Filter(fc, p: LogicalPlan) => val (prunedPredicates, remainingPredicates) = splitConjunctivePredicates(fc).partition { cond => cond.deterministic && p.constraints.contains(cond) } if (prunedPredicates.isEmpty) { f } else if (remainingPredicates.isEmpty) { p } else { val newCond = remainingPredicates.reduce(And) Filter(newCond, p) } } } /** * The unified version for predicate pushdown of normal operators and joins. * This rule improves performance of predicate pushdown for cascading joins such as: * Filter-Join-Join-Join. Most predicates can be pushed down in a single pass. */ object PushDownPredicates extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsAnyPattern(FILTER, JOIN)) { CombineFilters.applyLocally .orElse(PushPredicateThroughNonJoin.applyLocally) .orElse(PushPredicateThroughJoin.applyLocally) } } /** * Pushes [[Filter]] operators through many operators iff: * 1) the operator is deterministic * 2) the predicate is deterministic and the operator will not change any of rows. * * This heuristic is valid assuming the expression evaluation cost is minimal. */ object PushPredicateThroughNonJoin extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = { // SPARK-13473: We can't push the predicate down when the underlying projection output non- // deterministic field(s). Non-deterministic expressions are essentially stateful. This // implies that, for a given input row, the output are determined by the expression's initial // state and all the input rows processed before. In another word, the order of input rows // matters for non-deterministic expressions, while pushing down predicates changes the order. // This also applies to Aggregate. case Filter(condition, project @ Project(fields, grandChild)) if fields.forall(_.deterministic) && canPushThroughCondition(grandChild, condition) => val aliasMap = getAliasMap(project) project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild)) case filter @ Filter(condition, aggregate: Aggregate) if aggregate.aggregateExpressions.forall(_.deterministic) && aggregate.groupingExpressions.nonEmpty => val aliasMap = getAliasMap(aggregate) // For each filter, expand the alias and check if the filter can be evaluated using // attributes produced by the aggregate operator's child operator. val (candidates, nonDeterministic) = splitConjunctivePredicates(condition).partition(_.deterministic) val (pushDown, rest) = candidates.partition { cond => val replaced = replaceAlias(cond, aliasMap) cond.references.nonEmpty && replaced.references.subsetOf(aggregate.child.outputSet) } val stayUp = rest ++ nonDeterministic if (pushDown.nonEmpty) { val pushDownPredicate = pushDown.reduce(And) val replaced = replaceAlias(pushDownPredicate, aliasMap) val newAggregate = aggregate.copy(child = Filter(replaced, aggregate.child)) // If there is no more filter to stay up, just eliminate the filter. // Otherwise, create "Filter(stayUp) <- Aggregate <- Filter(pushDownPredicate)". if (stayUp.isEmpty) newAggregate else Filter(stayUp.reduce(And), newAggregate) } else { filter } // Push [[Filter]] operators through [[Window]] operators. Parts of the predicate that can be // pushed beneath must satisfy the following conditions: // 1. All the expressions are part of window partitioning key. The expressions can be compound. // 2. Deterministic. // 3. Placed before any non-deterministic predicates. case filter @ Filter(condition, w: Window) if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) => val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references)) val (candidates, nonDeterministic) = splitConjunctivePredicates(condition).partition(_.deterministic) val (pushDown, rest) = candidates.partition { cond => cond.references.subsetOf(partitionAttrs) } val stayUp = rest ++ nonDeterministic if (pushDown.nonEmpty) { val pushDownPredicate = pushDown.reduce(And) val newWindow = w.copy(child = Filter(pushDownPredicate, w.child)) if (stayUp.isEmpty) newWindow else Filter(stayUp.reduce(And), newWindow) } else { filter } case filter @ Filter(condition, union: Union) => // Union could change the rows, so non-deterministic predicate can't be pushed down val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition(_.deterministic) if (pushDown.nonEmpty) { val pushDownCond = pushDown.reduceLeft(And) val output = union.output val newGrandChildren = union.children.map { grandchild => val newCond = pushDownCond transform { case e if output.exists(_.semanticEquals(e)) => grandchild.output(output.indexWhere(_.semanticEquals(e))) } assert(newCond.references.subsetOf(grandchild.outputSet)) Filter(newCond, grandchild) } val newUnion = union.withNewChildren(newGrandChildren) if (stayUp.nonEmpty) { Filter(stayUp.reduceLeft(And), newUnion) } else { newUnion } } else { filter } case filter @ Filter(condition, watermark: EventTimeWatermark) => val (pushDown, stayUp) = splitConjunctivePredicates(condition).partition { p => p.deterministic && !p.references.contains(watermark.eventTime) } if (pushDown.nonEmpty) { val pushDownPredicate = pushDown.reduceLeft(And) val newWatermark = watermark.copy(child = Filter(pushDownPredicate, watermark.child)) // If there is no more filter to stay up, just eliminate the filter. // Otherwise, create "Filter(stayUp) <- watermark <- Filter(pushDownPredicate)". if (stayUp.isEmpty) newWatermark else Filter(stayUp.reduceLeft(And), newWatermark) } else { filter } case filter @ Filter(_, u: UnaryNode) if canPushThrough(u) && u.expressions.forall(_.deterministic) => pushDownPredicate(filter, u.child) { predicate => u.withNewChildren(Seq(Filter(predicate, u.child))) } } def canPushThrough(p: UnaryNode): Boolean = p match { // Note that some operators (e.g. project, aggregate, union) are being handled separately // (earlier in this rule). case _: AppendColumns => true case _: Distinct => true case _: Generate => true case _: Pivot => true case _: RepartitionByExpression => true case _: Repartition => true case _: RebalancePartitions => true case _: ScriptTransformation => true case _: Sort => true case _: BatchEvalPython => true case _: ArrowEvalPython => true case _: Expand => true case _ => false } private def pushDownPredicate( filter: Filter, grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = { // Only push down the predicates that is deterministic and all the referenced attributes // come from grandchild. // TODO: non-deterministic predicates could be pushed through some operators that do not change // the rows. val (candidates, nonDeterministic) = splitConjunctivePredicates(filter.condition).partition(_.deterministic) val (pushDown, rest) = candidates.partition { cond => cond.references.subsetOf(grandchild.outputSet) } val stayUp = rest ++ nonDeterministic if (pushDown.nonEmpty) { val newChild = insertFilter(pushDown.reduceLeft(And)) if (stayUp.nonEmpty) { Filter(stayUp.reduceLeft(And), newChild) } else { newChild } } else { filter } } /** * Check if we can safely push a filter through a projection, by making sure that predicate * subqueries in the condition do not contain the same attributes as the plan they are moved * into. This can happen when the plan and predicate subquery have the same source. */ private def canPushThroughCondition(plan: LogicalPlan, condition: Expression): Boolean = { val attributes = plan.outputSet val matched = condition.find { case s: SubqueryExpression => s.plan.outputSet.intersect(attributes).nonEmpty case _ => false } matched.isEmpty } } /** * Pushes down [[Filter]] operators where the `condition` can be * evaluated using only the attributes of the left or right side of a join. Other * [[Filter]] conditions are moved into the `condition` of the [[Join]]. * * And also pushes down the join filter, where the `condition` can be evaluated using only the * attributes of the left or right side of sub query when applicable. * * Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details */ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { /** * Splits join condition expressions or filter predicates (on a given join's output) into three * categories based on the attributes required to evaluate them. Note that we explicitly exclude * non-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or * canEvaluateInRight to prevent pushing these predicates on either side of the join. * * @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth) */ private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = { val (pushDownCandidates, nonDeterministic) = condition.partition(_.deterministic) val (leftEvaluateCondition, rest) = pushDownCandidates.partition(_.references.subsetOf(left.outputSet)) val (rightEvaluateCondition, commonCondition) = rest.partition(expr => expr.references.subsetOf(right.outputSet)) (leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ nonDeterministic) } private def canPushThrough(joinType: JoinType): Boolean = joinType match { case _: InnerLike | LeftSemi | RightOuter | LeftOuter | LeftAnti | ExistenceJoin(_) => true case _ => false } def apply(plan: LogicalPlan): LogicalPlan = plan transform applyLocally val applyLocally: PartialFunction[LogicalPlan, LogicalPlan] = { // push the where condition down into join filter case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition, hint)) if canPushThrough(joinType) => val (leftFilterConditions, rightFilterConditions, commonFilterCondition) = split(splitConjunctivePredicates(filterCondition), left, right) joinType match { case _: InnerLike => // push down the single side `where` condition into respective sides val newLeft = leftFilterConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = rightFilterConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val (newJoinConditions, others) = commonFilterCondition.partition(canEvaluateWithinJoin) val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And) val join = Join(newLeft, newRight, joinType, newJoinCond, hint) if (others.nonEmpty) { Filter(others.reduceLeft(And), join) } else { join } case RightOuter => // push down the right side only `where` condition val newLeft = left val newRight = rightFilterConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = joinCondition val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond, hint) (leftFilterConditions ++ commonFilterCondition). reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin) case LeftOuter | LeftExistence(_) => // push down the left side only `where` condition val newLeft = leftFilterConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = right val newJoinCond = joinCondition val newJoin = Join(newLeft, newRight, joinType, newJoinCond, hint) (rightFilterConditions ++ commonFilterCondition). reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin) case other => throw new IllegalStateException(s"Unexpected join type: $other") } // push down the join filter into sub query scanning if applicable case j @ Join(left, right, joinType, joinCondition, hint) if canPushThrough(joinType) => val (leftJoinConditions, rightJoinConditions, commonJoinCondition) = split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right) joinType match { case _: InnerLike | LeftSemi => // push down the single side only join filter for both sides sub queries val newLeft = leftJoinConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = rightJoinConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = commonJoinCondition.reduceLeftOption(And) Join(newLeft, newRight, joinType, newJoinCond, hint) case RightOuter => // push down the left side only join filter for left side sub query val newLeft = leftJoinConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = right val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And) Join(newLeft, newRight, RightOuter, newJoinCond, hint) case LeftOuter | LeftAnti | ExistenceJoin(_) => // push down the right side only join filter for right sub query val newLeft = left val newRight = rightJoinConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And) Join(newLeft, newRight, joinType, newJoinCond, hint) case other => throw new IllegalStateException(s"Unexpected join type: $other") } } } /** * This rule is applied by both normal and AQE Optimizer, and optimizes Limit operators by: * 1. Eliminate [[Limit]]/[[GlobalLimit]] operators if it's child max row <= limit. * 2. Combines two adjacent [[Limit]] operators into one, merging the * expressions into one single expression. */ object EliminateLimits extends Rule[LogicalPlan] { private def canEliminate(limitExpr: Expression, child: LogicalPlan): Boolean = { limitExpr.foldable && child.maxRows.exists { _ <= limitExpr.eval().asInstanceOf[Int] } } def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning( _.containsPattern(LIMIT), ruleId) { case Limit(l, child) if canEliminate(l, child) => child case GlobalLimit(l, child) if canEliminate(l, child) => child case GlobalLimit(le, GlobalLimit(ne, grandChild)) => GlobalLimit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild) case LocalLimit(le, LocalLimit(ne, grandChild)) => LocalLimit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild) case Limit(le, Limit(ne, grandChild)) => Limit(Literal(Least(Seq(ne, le)).eval().asInstanceOf[Int]), grandChild) } } /** * Check if there any cartesian products between joins of any type in the optimized plan tree. * Throw an error if a cartesian product is found without an explicit cross join specified. * This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true. * * This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be * collected before checking if it is a cartesian product. If you have * SELECT * from R, S where R.r = S.s, * the join between R and S is not a cartesian product and therefore should be allowed. * The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule. * * This rule must be run AFTER the batch "LocalRelation", since a join with empty relation should * not be a cartesian product. */ object CheckCartesianProducts extends Rule[LogicalPlan] with PredicateHelper { /** * Check if a join is a cartesian product. Returns true if * there are no join conditions involving references from both left and right. */ def isCartesianProduct(join: Join): Boolean = { val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil) conditions match { case Seq(Literal.FalseLiteral) | Seq(Literal(null, BooleanType)) => false case _ => !conditions.map(_.references).exists(refs => refs.exists(join.left.outputSet.contains) && refs.exists(join.right.outputSet.contains)) } } def apply(plan: LogicalPlan): LogicalPlan = if (conf.crossJoinEnabled) { plan } else plan.transformWithPruning(_.containsAnyPattern(INNER_LIKE_JOIN, OUTER_JOIN)) { case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, _, _) if isCartesianProduct(j) => throw QueryCompilationErrors.joinConditionMissingOrTrivialError(j, left, right) } } /** * Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values. * * This uses the same rules for increasing the precision and scale of the output as * [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]]. */ object DecimalAggregates extends Rule[LogicalPlan] { import Decimal.MAX_LONG_DIGITS /** Maximum number of decimal digits representable precisely in a Double */ private val MAX_DOUBLE_DIGITS = 15 def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsAnyPattern(SUM, AVERAGE), ruleId) { case q: LogicalPlan => q.transformExpressionsDownWithPruning( _.containsAnyPattern(SUM, AVERAGE), ruleId) { case we @ WindowExpression(ae @ AggregateExpression(af, _, _, _, _), _) => af match { case Sum(e @ DecimalType.Expression(prec, scale), _) if prec + 10 <= MAX_LONG_DIGITS => MakeDecimal(we.copy(windowFunction = ae.copy(aggregateFunction = Sum(UnscaledValue(e)))), prec + 10, scale) case Average(e @ DecimalType.Expression(prec, scale), _) if prec + 4 <= MAX_DOUBLE_DIGITS => val newAggExpr = we.copy(windowFunction = ae.copy(aggregateFunction = Average(UnscaledValue(e)))) Cast( Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)), DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone)) case _ => we } case ae @ AggregateExpression(af, _, _, _, _) => af match { case Sum(e @ DecimalType.Expression(prec, scale), _) if prec + 10 <= MAX_LONG_DIGITS => MakeDecimal(ae.copy(aggregateFunction = Sum(UnscaledValue(e))), prec + 10, scale) case Average(e @ DecimalType.Expression(prec, scale), _) if prec + 4 <= MAX_DOUBLE_DIGITS => val newAggExpr = ae.copy(aggregateFunction = Average(UnscaledValue(e))) Cast( Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)), DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone)) case _ => ae } } } } /** * Converts local operations (i.e. ones that don't require data exchange) on `LocalRelation` to * another `LocalRelation`. */ object ConvertToLocalRelation extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(LOCAL_RELATION), ruleId) { case Project(projectList, LocalRelation(output, data, isStreaming)) if !projectList.exists(hasUnevaluableExpr) => val projection = new InterpretedMutableProjection(projectList, output) projection.initialize(0) LocalRelation(projectList.map(_.toAttribute), data.map(projection(_).copy()), isStreaming) case Limit(IntegerLiteral(limit), LocalRelation(output, data, isStreaming)) => LocalRelation(output, data.take(limit), isStreaming) case Filter(condition, LocalRelation(output, data, isStreaming)) if !hasUnevaluableExpr(condition) => val predicate = Predicate.create(condition, output) predicate.initialize(0) LocalRelation(output, data.filter(row => predicate.eval(row)), isStreaming) } private def hasUnevaluableExpr(expr: Expression): Boolean = { expr.find(e => e.isInstanceOf[Unevaluable] && !e.isInstanceOf[AttributeReference]).isDefined } } /** * Replaces logical [[Distinct]] operator with an [[Aggregate]] operator. * {{{ * SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2 * }}} */ object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(DISTINCT_LIKE), ruleId) { case Distinct(child) => Aggregate(child.output, child.output, child) } } /** * Replaces logical [[Deduplicate]] operator with an [[Aggregate]] operator. */ object ReplaceDeduplicateWithAggregate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUpWithNewOutput { case d @ Deduplicate(keys, child) if !child.isStreaming => val keyExprIds = keys.map(_.exprId) val aggCols = child.output.map { attr => if (keyExprIds.contains(attr.exprId)) { attr } else { Alias(new First(attr).toAggregateExpression(), attr.name)() } } // SPARK-22951: Physical aggregate operators distinguishes global aggregation and grouping // aggregations by checking the number of grouping keys. The key difference here is that a // global aggregation always returns at least one row even if there are no input rows. Here // we append a literal when the grouping key list is empty so that the result aggregate // operator is properly treated as a grouping aggregation. val nonemptyKeys = if (keys.isEmpty) Literal(1) :: Nil else keys val newAgg = Aggregate(nonemptyKeys, aggCols, child) val attrMapping = d.output.zip(newAgg.output) newAgg -> attrMapping } } /** * Replaces logical [[Intersect]] operator with a left-semi [[Join]] operator. * {{{ * SELECT a1, a2 FROM Tab1 INTERSECT SELECT b1, b2 FROM Tab2 * ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT SEMI JOIN Tab2 ON a1<=>b1 AND a2<=>b2 * }}} * * Note: * 1. This rule is only applicable to INTERSECT DISTINCT. Do not use it for INTERSECT ALL. * 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated * join conditions will be incorrect. */ object ReplaceIntersectWithSemiJoin extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(INTERSECT), ruleId) { case Intersect(left, right, false) => assert(left.output.size == right.output.size) val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) } Distinct(Join(left, right, LeftSemi, joinCond.reduceLeftOption(And), JoinHint.NONE)) } } /** * Replaces logical [[Except]] operator with a left-anti [[Join]] operator. * {{{ * SELECT a1, a2 FROM Tab1 EXCEPT SELECT b1, b2 FROM Tab2 * ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT ANTI JOIN Tab2 ON a1<=>b1 AND a2<=>b2 * }}} * * Note: * 1. This rule is only applicable to EXCEPT DISTINCT. Do not use it for EXCEPT ALL. * 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated * join conditions will be incorrect. */ object ReplaceExceptWithAntiJoin extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(EXCEPT), ruleId) { case Except(left, right, false) => assert(left.output.size == right.output.size) val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) } Distinct(Join(left, right, LeftAnti, joinCond.reduceLeftOption(And), JoinHint.NONE)) } } /** * Replaces logical [[Except]] operator using a combination of Union, Aggregate * and Generate operator. * * Input Query : * {{{ * SELECT c1 FROM ut1 EXCEPT ALL SELECT c1 FROM ut2 * }}} * * Rewritten Query: * {{{ * SELECT c1 * FROM ( * SELECT replicate_rows(sum_val, c1) * FROM ( * SELECT c1, sum_val * FROM ( * SELECT c1, sum(vcol) AS sum_val * FROM ( * SELECT 1L as vcol, c1 FROM ut1 * UNION ALL * SELECT -1L as vcol, c1 FROM ut2 * ) AS union_all * GROUP BY union_all.c1 * ) * WHERE sum_val > 0 * ) * ) * }}} */ object RewriteExceptAll extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(EXCEPT), ruleId) { case Except(left, right, true) => assert(left.output.size == right.output.size) val newColumnLeft = Alias(Literal(1L), "vcol")() val newColumnRight = Alias(Literal(-1L), "vcol")() val modifiedLeftPlan = Project(Seq(newColumnLeft) ++ left.output, left) val modifiedRightPlan = Project(Seq(newColumnRight) ++ right.output, right) val unionPlan = Union(modifiedLeftPlan, modifiedRightPlan) val aggSumCol = Alias(AggregateExpression(Sum(unionPlan.output.head.toAttribute), Complete, false), "sum")() val aggOutputColumns = left.output ++ Seq(aggSumCol) val aggregatePlan = Aggregate(left.output, aggOutputColumns, unionPlan) val filteredAggPlan = Filter(GreaterThan(aggSumCol.toAttribute, Literal(0L)), aggregatePlan) val genRowPlan = Generate( ReplicateRows(Seq(aggSumCol.toAttribute) ++ left.output), unrequiredChildIndex = Nil, outer = false, qualifier = None, left.output, filteredAggPlan ) Project(left.output, genRowPlan) } } /** * Replaces logical [[Intersect]] operator using a combination of Union, Aggregate * and Generate operator. * * Input Query : * {{{ * SELECT c1 FROM ut1 INTERSECT ALL SELECT c1 FROM ut2 * }}} * * Rewritten Query: * {{{ * SELECT c1 * FROM ( * SELECT replicate_row(min_count, c1) * FROM ( * SELECT c1, If (vcol1_cnt > vcol2_cnt, vcol2_cnt, vcol1_cnt) AS min_count * FROM ( * SELECT c1, count(vcol1) as vcol1_cnt, count(vcol2) as vcol2_cnt * FROM ( * SELECT true as vcol1, null as , c1 FROM ut1 * UNION ALL * SELECT null as vcol1, true as vcol2, c1 FROM ut2 * ) AS union_all * GROUP BY c1 * HAVING vcol1_cnt >= 1 AND vcol2_cnt >= 1 * ) * ) * ) * }}} */ object RewriteIntersectAll extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(INTERSECT), ruleId) { case Intersect(left, right, true) => assert(left.output.size == right.output.size) val trueVcol1 = Alias(Literal(true), "vcol1")() val nullVcol1 = Alias(Literal(null, BooleanType), "vcol1")() val trueVcol2 = Alias(Literal(true), "vcol2")() val nullVcol2 = Alias(Literal(null, BooleanType), "vcol2")() // Add a projection on the top of left and right plans to project out // the additional virtual columns. val leftPlanWithAddedVirtualCols = Project(Seq(trueVcol1, nullVcol2) ++ left.output, left) val rightPlanWithAddedVirtualCols = Project(Seq(nullVcol1, trueVcol2) ++ right.output, right) val unionPlan = Union(leftPlanWithAddedVirtualCols, rightPlanWithAddedVirtualCols) // Expressions to compute count and minimum of both the counts. val vCol1AggrExpr = Alias(AggregateExpression(Count(unionPlan.output(0)), Complete, false), "vcol1_count")() val vCol2AggrExpr = Alias(AggregateExpression(Count(unionPlan.output(1)), Complete, false), "vcol2_count")() val ifExpression = Alias(If( GreaterThan(vCol1AggrExpr.toAttribute, vCol2AggrExpr.toAttribute), vCol2AggrExpr.toAttribute, vCol1AggrExpr.toAttribute ), "min_count")() val aggregatePlan = Aggregate(left.output, Seq(vCol1AggrExpr, vCol2AggrExpr) ++ left.output, unionPlan) val filterPlan = Filter(And(GreaterThanOrEqual(vCol1AggrExpr.toAttribute, Literal(1L)), GreaterThanOrEqual(vCol2AggrExpr.toAttribute, Literal(1L))), aggregatePlan) val projectMinPlan = Project(left.output ++ Seq(ifExpression), filterPlan) // Apply the replicator to replicate rows based on min_count val genRowPlan = Generate( ReplicateRows(Seq(ifExpression.toAttribute) ++ left.output), unrequiredChildIndex = Nil, outer = false, qualifier = None, left.output, projectMinPlan ) Project(left.output, genRowPlan) } } /** * Removes literals from group expressions in [[Aggregate]], as they have no effect to the result * but only makes the grouping key bigger. */ object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(AGGREGATE), ruleId) { case a @ Aggregate(grouping, _, _) if grouping.nonEmpty => val newGrouping = grouping.filter(!_.foldable) if (newGrouping.nonEmpty) { a.copy(groupingExpressions = newGrouping) } else { // All grouping expressions are literals. We should not drop them all, because this can // change the return semantics when the input of the Aggregate is empty (SPARK-17114). We // instead replace this by single, easy to hash/sort, literal expression. a.copy(groupingExpressions = Seq(Literal(0, IntegerType))) } } } /** * Prunes unnecessary fields from a [[Generate]] if it is under a project which does not refer * any generated attributes, .e.g., count-like aggregation on an exploded array. */ object GenerateOptimization extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformDownWithPruning( _.containsAllPatterns(PROJECT, GENERATE), ruleId) { case p @ Project(_, g: Generate) if p.references.isEmpty && g.generator.isInstanceOf[ExplodeBase] => g.generator.children.head.dataType match { case ArrayType(StructType(fields), containsNull) if fields.length > 1 => // Try to pick up smallest field val sortedFields = fields.zipWithIndex.sortBy(f => f._1.dataType.defaultSize) val extractor = GetArrayStructFields(g.generator.children.head, sortedFields(0)._1, sortedFields(0)._2, fields.length, containsNull || sortedFields(0)._1.nullable) val rewrittenG = g.transformExpressions { case e: ExplodeBase => e.withNewChildren(Seq(extractor)) } // As we change the child of the generator, its output data type must be updated. val updatedGeneratorOutput = rewrittenG.generatorOutput .zip(rewrittenG.generator.elementSchema.toAttributes) .map { case (oldAttr, newAttr) => newAttr.withExprId(oldAttr.exprId).withName(oldAttr.name) } assert(updatedGeneratorOutput.length == rewrittenG.generatorOutput.length, "Updated generator output must have the same length " + "with original generator output.") val updatedGenerate = rewrittenG.copy(generatorOutput = updatedGeneratorOutput) p.withNewChildren(Seq(updatedGenerate)) case _ => p } } } /** * Removes repetition from group expressions in [[Aggregate]], as they have no effect to the result * but only makes the grouping key bigger. */ object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning( _.containsPattern(AGGREGATE), ruleId) { case a @ Aggregate(grouping, _, _) if grouping.size > 1 => val newGrouping = ExpressionSet(grouping).toSeq if (newGrouping.size == grouping.size) { a } else { a.copy(groupingExpressions = newGrouping) } } } /** * Replaces GlobalLimit 0 and LocalLimit 0 nodes (subtree) with empty Local Relation, as they don't * return any rows. */ object OptimizeLimitZero extends Rule[LogicalPlan] { // returns empty Local Relation corresponding to given plan private def empty(plan: LogicalPlan) = LocalRelation(plan.output, data = Seq.empty, isStreaming = plan.isStreaming) def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning( _.containsAllPatterns(LIMIT, LITERAL)) { // Nodes below GlobalLimit or LocalLimit can be pruned if the limit value is zero (0). // Any subtree in the logical plan that has GlobalLimit 0 or LocalLimit 0 as its root is // semantically equivalent to an empty relation. // // In such cases, the effects of Limit 0 can be propagated through the Logical Plan by replacing // the (Global/Local) Limit subtree with an empty LocalRelation, thereby pruning the subtree // below and triggering other optimization rules of PropagateEmptyRelation to propagate the // changes up the Logical Plan. // // Replace Global Limit 0 nodes with empty Local Relation case gl @ GlobalLimit(IntegerLiteral(0), _) => empty(gl) // Note: For all SQL queries, if a LocalLimit 0 node exists in the Logical Plan, then a // GlobalLimit 0 node would also exist. Thus, the above case would be sufficient to handle // almost all cases. However, if a user explicitly creates a Logical Plan with LocalLimit 0 node // then the following rule will handle that case as well. // // Replace Local Limit 0 nodes with empty Local Relation case ll @ LocalLimit(IntegerLiteral(0), _) => empty(ll) } }
vinodkc/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
Scala
apache-2.0
100,019
/* * #%L * Core runtime for OOXOO * %% * Copyright (C) 2006 - 2017 Open Design Flow * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * #L% */ package com.idyria.osi.ooxoo.core.buffers.datatypes import com.idyria.osi.ooxoo.core.buffers.structural.AbstractDataBuffer import java.net.URI import scala.language.implicitConversions import org.odfi.tea.env.EnvStrToStr class URIBuffer extends AbstractDataBuffer[URI] { // Constructors //------------------- def this(str: String) = { this(); dataFromString(str) } def this(uri: URI) = { this() this.data = uri } def dataToString: String = { this.data match { case null => "" case other => data.toString() } } /** * Set provided string to actual data */ def dataFromString(str: String): URI = { this.data = URI.create(str); data } override def toString: String = { dataToString } def equals(comp: URIBuffer): Boolean = { //println("Called equals to xsdstringbuffer") this.data.equals(comp.data) } def equals(comp: URI): Boolean = { //println("Called equals to String") this.data.equals(comp) } def compareTo(comp: URI): Int = { //println("Called compare to to xsdstringbuffer") this.data.compareTo(comp) } // Utils //--------------- /** * Does not edit the source data */ def toReplacedEnvironment = { new URI(EnvStrToStr(this.dataToString).replaceAll("""\\\\""","/")) } def normalizeString = { this.dataFromString(this.dataToString.replaceAll("""\\\\""","/")) } } object URIBuffer { def apply() = new URIBuffer def convertFromString(str: String): URIBuffer = new URIBuffer(str) implicit def convertFromStringToURIBuffer(str: String): URIBuffer = new URIBuffer(str) implicit def convertFromURIBufferToString(uri: URIBuffer): String = uri.dataToString implicit def convertFromURItoURIBuffer(uri: URI): URIBuffer = new URIBuffer(uri) implicit def convertFromURItBufferoURI(uri: URIBuffer): URI = uri.data }
richnou/ooxoo-core
ooxoo-core/src/main/scala/com/idyria/osi/ooxoo/core/buffers/datatypes/URIBuffer.scala
Scala
agpl-3.0
2,750
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.k8s.features import java.util.UUID import io.fabric8.kubernetes.api.model.{ContainerBuilder, HasMetadata, PodBuilder, VolumeBuilder, VolumeMountBuilder} import org.apache.spark.deploy.k8s.{KubernetesConf, SparkPod} import org.apache.spark.deploy.k8s.Config._ private[spark] class LocalDirsFeatureStep( conf: KubernetesConf, defaultLocalDir: String = s"/var/data/spark-${UUID.randomUUID}") extends KubernetesFeatureConfigStep { // Cannot use Utils.getConfiguredLocalDirs because that will default to the Java system // property - we want to instead default to mounting an emptydir volume that doesn't already // exist in the image. // We could make utils.getConfiguredLocalDirs opinionated about Kubernetes, as it is already // a bit opinionated about YARN and Mesos. private val resolvedLocalDirs = Option(conf.sparkConf.getenv("SPARK_LOCAL_DIRS")) .orElse(conf.getOption("spark.local.dir")) .getOrElse(defaultLocalDir) .split(",") private val useLocalDirTmpFs = conf.get(KUBERNETES_LOCAL_DIRS_TMPFS) override def configurePod(pod: SparkPod): SparkPod = { val localDirVolumes = resolvedLocalDirs .zipWithIndex .map { case (localDir, index) => new VolumeBuilder() .withName(s"spark-local-dir-${index + 1}") .withNewEmptyDir() .withMedium(if (useLocalDirTmpFs) "Memory" else null) .endEmptyDir() .build() } val localDirVolumeMounts = localDirVolumes .zip(resolvedLocalDirs) .map { case (localDirVolume, localDirPath) => new VolumeMountBuilder() .withName(localDirVolume.getName) .withMountPath(localDirPath) .build() } val podWithLocalDirVolumes = new PodBuilder(pod.pod) .editSpec() .addToVolumes(localDirVolumes: _*) .endSpec() .build() val containerWithLocalDirVolumeMounts = new ContainerBuilder(pod.container) .addNewEnv() .withName("SPARK_LOCAL_DIRS") .withValue(resolvedLocalDirs.mkString(",")) .endEnv() .addToVolumeMounts(localDirVolumeMounts: _*) .build() SparkPod(podWithLocalDirVolumes, containerWithLocalDirVolumeMounts) } }
WindCanDie/spark
resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/LocalDirsFeatureStep.scala
Scala
apache-2.0
3,039
package scala.meta package tokens import scala.math.ScalaNumber import scala.meta.internal.tokens._ import scala.meta.inputs._ import scala.meta.classifiers._ import scala.meta.prettyprinters._ import scala.meta.internal.prettyprinters._ // NOTE: `start` and `end` are String.substring-style, // i.e. `start` is inclusive and `end` is not. // Therefore Token.end can point to the last character plus one. // Btw, Token.start can also point to the last character plus one if it's an EOF token. @root trait Token extends InternalToken { def input: Input def dialect: Dialect def start: Int def end: Int def pos: Position def text: String = pos.text } object Token { // Literals (include some keywords from above, constants, interpolations and xml) @branch trait Literal extends Token @branch abstract class Constant[A] extends Literal { val value: A } @branch abstract class NumericConstant[A <: ScalaNumber] extends Constant[A] @branch trait Keyword extends Token @branch trait ModifierKeyword extends Keyword @branch trait Trivia extends Token @branch trait Whitespace extends Trivia @branch trait HSpace extends Whitespace @branch trait AtEOL extends Whitespace @branch trait EOL extends AtEOL @branch trait Symbolic extends Token @branch trait SymbolicKeyword extends Symbolic @branch trait Punct extends Symbolic @branch trait OpenDelim extends Punct @branch trait CloseDelim extends Punct // Identifiers @freeform("identifier") class Ident(value: String) extends Token // Alphanumeric keywords @fixed("abstract") class KwAbstract extends ModifierKeyword @fixed("case") class KwCase extends Keyword @fixed("catch") class KwCatch extends Keyword @fixed("class") class KwClass extends Keyword @fixed("def") class KwDef extends Keyword @fixed("do") class KwDo extends Keyword @fixed("else") class KwElse extends Keyword @fixed("enum") class KwEnum extends Keyword @fixed("export") class KwExport extends Keyword @fixed("extends") class KwExtends extends Keyword @fixed("false") class KwFalse extends Literal @fixed("final") class KwFinal extends ModifierKeyword @fixed("finally") class KwFinally extends Keyword @fixed("for") class KwFor extends Keyword @fixed("forSome") class KwForsome extends Keyword @fixed("given") class KwGiven extends Keyword @fixed("if") class KwIf extends Keyword @fixed("implicit") class KwImplicit extends ModifierKeyword @fixed("import") class KwImport extends Keyword @fixed("lazy") class KwLazy extends ModifierKeyword @fixed("match") class KwMatch extends Keyword @fixed("macro") class KwMacro extends Keyword @fixed("new") class KwNew extends Keyword @fixed("null") class KwNull extends Literal @fixed("object") class KwObject extends Keyword @fixed("override") class KwOverride extends ModifierKeyword @fixed("package") class KwPackage extends Keyword @fixed("private") class KwPrivate extends ModifierKeyword @fixed("protected") class KwProtected extends ModifierKeyword @fixed("return") class KwReturn extends Keyword @fixed("sealed") class KwSealed extends ModifierKeyword @fixed("super") class KwSuper extends Keyword @fixed("then") class KwThen extends Keyword @fixed("this") class KwThis extends Keyword @fixed("throw") class KwThrow extends Keyword @fixed("trait") class KwTrait extends Keyword @fixed("true") class KwTrue extends Literal @fixed("try") class KwTry extends Keyword @fixed("type") class KwType extends Keyword @fixed("val") class KwVal extends Keyword @fixed("var") class KwVar extends Keyword @fixed("while") class KwWhile extends Keyword @fixed("with") class KwWith extends Keyword @fixed("yield") class KwYield extends Keyword // Symbolic keywords @fixed("#") class Hash extends SymbolicKeyword @fixed(":") class Colon extends SymbolicKeyword @fixed("<%") class Viewbound extends SymbolicKeyword @freeform("<-") class LeftArrow extends SymbolicKeyword @fixed("<:") class Subtype extends SymbolicKeyword @fixed("=") class Equals extends SymbolicKeyword @freeform("=>") class RightArrow extends SymbolicKeyword @fixed(">:") class Supertype extends SymbolicKeyword @fixed("@") class At extends SymbolicKeyword @fixed("_") class Underscore extends SymbolicKeyword @fixed("=>>") class TypeLambdaArrow extends SymbolicKeyword @fixed("?=>") class ContextArrow extends SymbolicKeyword @fixed("'") class MacroQuote extends SymbolicKeyword @fixed("$") class MacroSplice extends SymbolicKeyword // Delimiters @fixed("(") class LeftParen extends OpenDelim @fixed(")") class RightParen extends CloseDelim @fixed(",") class Comma extends Punct @fixed(".") class Dot extends Punct @fixed(";") class Semicolon extends Punct @fixed("[") class LeftBracket extends OpenDelim @fixed("]") class RightBracket extends CloseDelim @fixed("{") class LeftBrace extends OpenDelim @fixed("}") class RightBrace extends CloseDelim object Constant { @freeform("integer constant") class Int(value: BigInt) extends NumericConstant[BigInt] @freeform("long constant") class Long(value: BigInt) extends NumericConstant[BigInt] @freeform("float constant") class Float(value: BigDecimal) extends NumericConstant[BigDecimal] @freeform("double constant") class Double(value: BigDecimal) extends NumericConstant[BigDecimal] @freeform("character constant") class Char(value: scala.Char) extends Constant[scala.Char] @freeform("symbol constant") class Symbol(value: scala.Symbol) extends Constant[scala.Symbol] @freeform("string constant") class String(value: Predef.String) extends Constant[Predef.String] } // NOTE: Here's example tokenization of q"${foo}bar". // BOF, Id(q)<"q">, Start<"\\"">, Part("")<"">, SpliceStart<"$">, {, foo, }, SpliceEnd<"">, Part("bar")<"bar">, End("\\""), EOF. // As you can see, SpliceEnd is always empty, but I still decided to expose it for consistency reasons. object Interpolation { @freeform("interpolation id") class Id(value: String) extends Token @freeform("interpolation start") class Start extends Token @freeform("interpolation part") class Part(value: String) extends Token @freeform("splice start") class SpliceStart extends Token @freeform("splice end") class SpliceEnd extends Token @freeform("interpolation end") class End extends Token } object Xml { @freeform("xml start") class Start extends Token @freeform("xml part") class Part(value: String) extends Token @freeform("xml splice start") class SpliceStart extends Token @freeform("xml splice end") class SpliceEnd extends Token @freeform("xml end") class End extends Token } object Indentation { @freeform("indent") class Indent extends Token @freeform("outdent") class Outdent extends Token } // Trivia @fixed(" ") class Space extends HSpace @fixed("\\t") class Tab extends HSpace @fixed("\\r") class CR extends AtEOL @fixed("\\n") class LF extends EOL @fixed("\\f") class FF extends EOL @freeform("comment") class Comment(value: String) extends Trivia @freeform("beginning of file") class BOF extends Token { def this(input: Input, dialect: Dialect) = this(input, dialect, 0) def end = start } @freeform("end of file") class EOF extends Token { def this(input: Input, dialect: Dialect) = this(input, dialect, input.chars.length) def end = start } // NOTE: in order to maintain conceptual compatibility with scala.reflect's implementation, // Ellipsis.rank = 1 means .., Ellipsis.rank = 2 means ..., etc @freeform("\\n\\n") private[meta] class LFLF extends AtEOL @freeform("ellipsis") private[meta] class Ellipsis(rank: Int) extends Token @freeform("unquote") private[meta] class Unquote extends Token implicit def classifiable[T <: Token]: Classifiable[T] = null implicit def showStructure[T <: Token]: Structure[T] = TokenStructure.apply[T] implicit def showSyntax[T <: Token](implicit dialect: Dialect): Syntax[T] = TokenSyntax.apply[T](dialect) }
scalameta/scalameta
scalameta/tokens/shared/src/main/scala/scala/meta/tokens/Token.scala
Scala
bsd-3-clause
8,031
/* * Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics.iglu.client package validation // Scalaz import scalaz._ import Scalaz._ // Specs2 import org.specs2.Specification import org.specs2.matcher.DataTables import org.specs2.scalaz.ValidationMatchers class SchemaCriterionSpec extends Specification with DataTables with ValidationMatchers { def is = s2""" This is a specification to test the SchemaCriterion class if the payload, vendor, and format aren't identical, the schema should be rejected $e1 correctly validate schemas $e2 parse schema criterion $e3 """ def e1 = { val actual = SchemaKey("wrong", "payload_data", "jsonschema", "2-3-4") SchemaCriterion("io.acuminous.cumulo", "payload_data", "jsonschema", 1).matches(actual) must beFalse } def e2 = "SPEC NAME" || "Criterion" | "SchemaVer" | "EXPECTED OUTPUT" | "Correct model" !! (2, None, None) ! "2-3-4" ! true | "Incorrect model version" !! (2, None, None) ! "1-0-0" ! false | "Correct revision" !! (2, Some(3), None) ! "2-3-0" ! true | "Correct revision and addition" !! (2, Some(3), None) ! "2-0-9" ! true | "Incorrect revision" !! (2, Some(3), None) ! "2-4-0" ! false | "Correct model, revision, and addition" !! (2, Some(3), Some(4)) ! "2-3-4" ! true | "Correct model and revision, higher addition" !! (2, Some(3), Some(4)) ! "2-3-9" ! false | "Correct model, lower revision, higher addition" !! (2, Some(3), Some(4)) ! "2-0-9" ! true |>{ (_, criterion, version, expected) => SchemaCriterion("io.acuminous.cumulo", "payload_data", "jsonschema", criterion._1, criterion._2, criterion._3) .matches(SchemaKey("io.acuminous.cumulo", "payload_data", "jsonschema", version)) .must_==(expected) } def e3 = { val criterion = SchemaCriterion.parse("iglu:com.snowplowanalytics.snowplow/mobile_context/jsonschema/1-0-*") criterion must beSuccessful(SchemaCriterion("com.snowplowanalytics.snowplow", "mobile_context", "jsonschema", 1, Some(0), None)) } }
jramos/iglu-scala-client
src/test/scala/com.snowplowanalytics.iglu.client/SchemaCriterionSpec.scala
Scala
apache-2.0
3,077
import scala.tools.partest._ // Type constructors for FunctionN and TupleN should not be considered as function type / tuple types. object Test extends DirectTest { def code = "" def show(): Unit = { val global = newCompiler() new global.Run() import global._, definitions._ val function0TC = FunctionClass(0).typeConstructor val tuple1TC = TupleClass(1).typeConstructor FunctionClass.seq.foreach { sym => val tc = sym.typeConstructor assert(!isFunctionType(tc), s"$tc") assert(!isFunctionTypeDirect(tc), s"$tc (direct)") } TupleClass.seq.foreach { sym => val tc = sym.typeConstructor assert(!isTupleType(tc), s"$sym") assert(!isTupleTypeDirect(tc), s"$tc (direct)") } } }
scala/scala
test/files/run/t7876.scala
Scala
apache-2.0
754
object BobAndBombs extends App { val in = io.Source.stdin.getLines() val t = in.next.toInt for(_ <- (1 to t)){ val mapping = " " + in.next + " " var count = 0 for (i <- (2 until mapping.size - 2)){ if(mapping.charAt(i) == 'W'){ if(mapping.substring(i-2, i+3).exists(_ == 'B')) count += 1 } } println(count) } }
clemus90/competitive-programming
hackerEarth/practice/basicProgramming/implementation/BobAndBombs.scala
Scala
mit
362
package io.scalac.amqp import com.typesafe.config.ConfigFactory import org.scalatest.{FlatSpec, Matchers} import scala.collection.immutable.Seq import scala.concurrent.duration._ class ConnectionSettingsSpec extends FlatSpec with Matchers { val referenceSettings = ConnectionSettings( addresses = Seq(Address(host = "localhost", port = 5672)), virtualHost = "/", username = "guest", password = "guest", heartbeat = None, timeout = Duration.Inf, automaticRecovery = false, recoveryInterval = 5.seconds, ssl = None) "apply" should "be able to load configuration from TypeSafe Config" in { val settings = ConnectionSettings(ConfigFactory.load("application.conf")) if(settings.addresses.head.host == "localhost") settings shouldBe referenceSettings else settings shouldBe referenceSettings.copy(addresses = referenceSettings.addresses.map(_.copy(host = "boot2docker"))) def parseAndLoad(s: String) = ConfigFactory.load(ConfigFactory.parseString(s)) ConnectionSettings(parseAndLoad("amqp.heartbeat = 5 seconds")).heartbeat shouldBe Some(5.seconds) ConnectionSettings(parseAndLoad("amqp.timeout = 10 seconds")).timeout shouldBe 10.seconds } }
davidwrpayne/reactive-rabbit
src/test/scala/io/scalac/amqp/ConnectionSettingsSpec.scala
Scala
apache-2.0
1,303
/* * Copyright 2007-2008 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions * and limitations under the License. */ package net.liftweb.util import _root_.org.specs._ import _root_.net.liftweb.util.Box._ import _root_.org.specs.runner._ import _root_.org.specs.Sugar._ class BoxSpecTest extends Runner(BoxSpec) with JUnit with Console object BoxSpec extends Specification { "A Box" can { "be created from a Option. It is Empty if the option is None" in { Box(None) mustBe Empty } "be created from a Option. It is Full(x) if the option is Some(x)" in { Box(Some(1)) must_== Full(1) } "be created from a List containing one element. It is Empty if the list is empty" in { Box(Nil) mustBe Empty } "be created from a List containing one element. It is Full(x) if the list is List(x)" in { Box(List(1)) must_== Full(1) } "be created from a List containing more than one element. It is Full(x) if the list is x::rest" in { Box(List(1, 2, 3)) must_== Full(1) } "be used as an iterable" in { Full(1) reduceLeft {(x: Int, y: Int) => x + y} must_== 1 } "be used as an Option" in { Full(1).get must_== 1 Empty.isDefined must beFalse } "be implicitly defined from an Option. The open_! method can be used on an Option for example" in { Some(1).open_! must_== 1 } "be defined from some legacy code (possibly passing null values). If the passed value is not null, a Full(value) is returned" in { Box.legacyNullTest("s") must_== Full("s") } "be defined from some legacy code (possibly passing null values). If the passed value is null, an Empty is returned" in { Box.legacyNullTest(null) must_== Empty } } "A Box" should { "provide a 'choice' method to either apply a function to the Box value or return another default can" in { def gotIt = (x: Int) => Full("got it: " + x.toString) Full(1).choice(gotIt)(Full("nothing")) must_== Full("got it: 1") Empty.choice(gotIt)(Full("nothing")) must_== Full("nothing") } } "A Full Box" should { "not beEmpty" in { Full(1).isEmpty must beFalse } "be defined" in { Full(1).isDefined must beTrue } "return its value when opened" in { Full(1).open_! mustBe 1 } "return its value when opened with openOr(default value)" in { Full(1) openOr 0 mustBe 1 } "return itself when or'ed with another Box" in { Full(1) or Full(2) must_== Full(1) } "define an 'exists' method returning true if the Box value verifies the function" in { Full(1) exists {_ > 0} must beTrue } "define an exists method returning false if the Box value doesn't verify the function" in { Full(0) exists {_ > 0} must beFalse } "define a 'filter' method, returning a Full Box if the filter is satisfied" in { Full(1) filter {_ > 0} must_== Full(1) } "define a 'filter' method, returning Empty if the filter is not satisfied" in { Full(1) filter {_ == 0} mustBe Empty } "define a 'filterMsg' method, returning a Failure if the filter predicate is not satisfied" in { Full(1).filterMsg("not equal to 0")(_ == 0) must_== Failure("not equal to 0", Empty, Empty) } "define a 'foreach' method using its value (to display it for instance)" in { var total = 0 Full(1) foreach { total += _ } total must_== 1 } "define a 'map' method to transform its value" in { Full(1) map { _.toString } must_== Full("1") } "define a 'flatMap' method transforming its value in another Box. If the value is transformed in a Full can, the total result is a Full can" in { Full(1) flatMap { x: Int => if (x > 0) Full("full") else Empty } must_== Full("full") } "define a 'flatMap' method transforming its value in another Box. If the value is transformed in an Empty can, the total result is an Empty can" in { Full(0) flatMap { x: Int => if (x > 0) Full("full") else Empty } mustBe Empty } "define an 'elements' method returning an iterator containing its value" in { Full(1).elements.next must_== 1 } "define a 'toList' method returning a List containing its value" in { Full(1).toList must_== List(1) } "define a 'toOption' method returning a Some object containing its value" in { Full(1).toOption must_== Some(1) } "return itself if asked for its status with the operator ?~" in { Full(1) ?~ "error" must_== Full(1) } "return itself if asked for its status with the operator ?~!" in { Full(1) ?~! "error" must_== Full(1) } "define a 'pass' method passing the can to a function and returning itself (alias: $)" in { var empty = false def emptyString(s: Box[String]) = s foreach {c: String => empty = c.isEmpty} Full("") $ emptyString _ empty must beTrue } "define a 'run' method either returning a default value or applying a user-defined function on it" in { def appendToString(s: String, x: Int) = s + x.toString Full(1).run("string")(appendToString) must_== "string1" } "define a 'isA' method returning a Full(value) if the value is the instance of a given class" in { Full("s").isA(classOf[String]) must_== Full("s") } "define a 'isA' method returning Empty if the value is not the instance of a given class" in { Full("s").isA(classOf[Double]) must_== Empty } "define a 'asA' method returning a Full(value) if the value is the instance of a given type" in { Full("s").asA[String] must_== Full("s") } "define a 'asA' method returning Empty if the value is not the instance of a given type" in { Full("s").asA[Double] must_== Empty } } "An Empty Box" should { "beEmpty" in { Empty.isEmpty must beTrue } "not be defined" in { Empty.isDefined must beFalse } "throw an exception if opened" in { {Empty.open_!; ()} must throwA[NullPointerException] } "return a default value if opened with openOr" in { Empty.openOr(1) mustBe 1 } "return the other Box if or'ed with another Box" in { Empty.or(Full(1)) must_== Full(1) } "return itself if filtered with a predicate" in { val empty: Box[Int] = Empty empty.filter {_ > 0} mustBe Empty } "define an 'exists' method returning false" in { val empty: Box[Int] = Empty empty exists {_ > 0} must beFalse } "define a 'filter' method, returning Empty" in { val empty: Box[Int] = Empty empty filter {_ > 0} mustBe Empty } "define a 'filterMsg' method, returning a Failure" in { Empty.filterMsg("not equal to 0")(_ == 0) must_== Failure("not equal to 0", Empty, Empty) } "define a 'foreach' doing nothing" in { var total = 0 val empty: Box[Int] = Empty empty foreach { total += _ } total must_== 0 } "define a 'map' method returning Empty" in { Empty map { _.toString } mustBe Empty } "define a 'flatMap' method returning Empty" in { Empty flatMap { x: Int => Full("full") } mustBe Empty } "define an 'elements' method returning an empty iterator" in { Empty.elements.hasNext must beFalse } "define a 'toList' method returning Nil" in { Empty.toList must_== Nil } "define a 'toOption' method returning None" in { Empty.toOption must_== None } "return a failure with a message if asked for its status with the operator ?~" in { Empty ?~ "nothing" must_== Failure("nothing", Empty, Empty) } "return a failure with a message if asked for its status with the operator ?~!" in { Empty ?~! "nothing" must_== Failure("nothing", Empty, Empty) } "define a 'isA' method returning Empty" in { Empty.isA(classOf[Double]) must_== Empty } "define a 'asA' method returning Empty" in { Empty.asA[Double] must_== Empty } } "A Failure is an Empty Box which" can { "return its cause as an exception" in { case class LiftException(m: String) extends Exception Failure("error", Full(new LiftException("broken")), Empty).exception.get must_== new LiftException("broken") } "return a chained list of causes" in { Failure("error", Full(new Exception("broken")), Full(Failure("nested cause", Empty, Empty))).chain must_== Full(Failure("nested cause", Empty, Empty)) } } "A Failure is an Empty Box which" should { "return itself if mapped or flatmapped" in { Failure("error", Empty, Empty) map {_.toString} must_== Failure("error", Empty, Empty) Failure("error", Empty, Empty) flatMap {x: String => Full(x.toString)} must_== Failure("error", Empty, Empty) } "return a itself when asked for its status with the operator ?~" in { Failure("error", Empty, Empty) ?~ "nothing" must_== Failure("error", Empty, Empty) } "create a new failure with a chained message if asked for its status with the operator ?~!" in { Failure("error", Empty, Empty) ?~! "error2" must_== Failure("error2", Empty, Full(Failure("error", Empty, Empty))) } } }
beni55/liftweb
lift-util/src/test/scala/net/liftweb/util/BoxSpec.scala
Scala
apache-2.0
9,702
package multithreading /** * Created by Om Prakash C on 16-06-2017. */ object ParallelCollection extends App { def fib(n: Int): Int = if (n<2) 1 else fib(n-1)+fib(n-2) for (i <- (30 to 15 by -1).par) { println(fib(i)) } }
comprakash/learning-scala
concurrency/src/main/scala/multithreading/ParallelCollection.scala
Scala
gpl-3.0
238
package org.jetbrains.plugins.scala.lang.psi.impl import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope, PackageScope, SearchScope} import com.intellij.psi.util.PsiTreeUtil import com.intellij.psi.{PsiElement, PsiNamedElement, PsiPackage, PsiReference} import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil import org.jetbrains.plugins.scala.lang.psi.api.{ScFile, ScalaFile, ScalaPsiElement} import org.jetbrains.plugins.scala.lang.psi.api.base.ScPrimaryConstructor import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScObject, ScTypeDefinition} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScModifierListOwner, ScNamedElement} import scala.annotation.tailrec private object ScalaUseScope { def apply(baseUseScope: SearchScope, element: ScalaPsiElement): SearchScope = { val useScope = element.containingScalaFile.fold(baseUseScope)(apply(baseUseScope, _)) val narrowScope = element match { case cp: ScClassParameter => classParameterScope(cp) case p: ScParameter => Option(parameterScope(p)) case n: ScNamedElement => namedScope(n) case m: ScMember => memberScope(m) case _ => None } intersect(useScope, narrowScope) } def apply(baseUseScope: SearchScope, file: ScalaFile): SearchScope = { if (file.isWorksheetFile || file.isScriptFile) { // elements from worksheets (including scratch files) can only be used in that files file match { case ScFile.VirtualFile(virtualFile) => GlobalSearchScope.fileScope(file.getProject, virtualFile) case _ => baseUseScope } } else { baseUseScope } } private def intersect(scope: SearchScope, scopeOption: Option[SearchScope]): SearchScope = scopeOption.fold(scope)(_.intersectWith(scope)) private def intersectOptions(scope1: Option[SearchScope], scope2: Option[SearchScope]): Option[SearchScope] = scope1.map(intersect(_, scope2)).orElse(scope2) private def parameterScope(parameter: ScParameter): SearchScope = parameter.getDeclarationScope match { case null => GlobalSearchScope.EMPTY_SCOPE case expr: ScFunctionExpr => safeLocalScope(expr) case td: ScTypeDefinition => intersect(td.getUseScope, namedScope(parameter)) //class parameters case d => d.getUseScope //named parameters } private def namedScope(named: ScNamedElement): Option[SearchScope] = named.nameContext match { case member: ScMember if member.isLocal => localDefinitionScope(member) case member: ScMember if member != named => Some(member.getUseScope) case member: ScMember => memberScope(member) case caseClause: ScCaseClause => Some(safeLocalScope(caseClause)) case elem@(_: ScForBinding | _: ScGenerator) => localDefinitionScope(elem) case _ => None } private def localDefinitionScope(elem: PsiElement): Option[LocalSearchScope] = elem.parentOfType(Seq(classOf[ScFor], classOf[ScBlock], classOf[ScMember])) .map(safeLocalScope) private def safeLocalScope(elem: PsiElement): LocalSearchScope = if (elem.isValid && elem.getContainingFile != null) new LocalSearchScope(elem) else LocalSearchScope.EMPTY private def classParameterScope(cp: ScClassParameter): Option[SearchScope] = { val asNamedArgument = Option(PsiTreeUtil.getContextOfType(cp, classOf[ScPrimaryConstructor])) .map(_.getUseScope) .getOrElse(LocalSearchScope.EMPTY) val classScope = Option(cp.containingClass).map(_.getUseScope) val asMember = intersectOptions(byAccessModifier(cp), classScope) asMember.map(_.union(asNamedArgument)) } private def memberScope(member: ScMember): Option[SearchScope] = { syntheticMethodScope(member) .orElse(byAccessModifier(member)) .orElse(fromContainingBlockOrMember(member)) } private def byAccessModifier(member: ScMember): Option[SearchScope] = fromUnqualifiedOrThisPrivate(member) orElse fromQualifiedPrivate(member) private def localSearchScope(typeDefinition: ScTypeDefinition, withCompanion: Boolean = true): SearchScope = { val scope = safeLocalScope(typeDefinition) if (withCompanion) { typeDefinition.baseCompanion match { case Some(td) => scope.union(safeLocalScope(td)) case _ => scope } } else scope } //private top level classes may be used in the same package private def forTopLevelPrivate(modifierListOwner: ScModifierListOwner) = modifierListOwner match { case td: ScTypeDefinition if td.isTopLevel => for { qName <- Option(td.qualifiedName) parentPackage <- ScalaPsiUtil.parentPackage(qName, td.getProject) } yield new PackageScope(parentPackage, /*includeSubpackages*/ true, /*includeLibraries*/ true) case _ => None } @tailrec private def fromContainingBlockOrMember(elem: PsiElement): Option[SearchScope] = { val blockOrMember = PsiTreeUtil.getContextOfType(elem, true, classOf[ScBlock], classOf[ScMember]) blockOrMember match { case null => None case b: ScBlock => Some(safeLocalScope(b)) case o: ScObject => Some(o.getUseScope) case td: ScTypeDefinition => //can't use td.getUseScope because of inheritance fromUnqualifiedOrThisPrivate(td) match { case None => fromContainingBlockOrMember(td) case scope => scope } case member: ScMember => Some(member.getUseScope) } } //should be checked only for the member itself //member of a qualified private class may escape it's package with inheritance private def fromQualifiedPrivate(member: ScMember): Option[SearchScope] = { def resolve(reference: PsiReference): Option[PsiNamedElement] = reference match { case ResolvesTo(target: PsiNamedElement) => target match { case o: ScObject if o.isPackageObject => val pName = o.qualifiedName.stripSuffix(".`package`") Some(ScPackageImpl.findPackage(o.getProject, pName)) case _ => Some(target) } case _ => None } val maybeTarget = for { list <- Option(member.getModifierList) modifier <- list.accessModifier if modifier.isPrivate && !modifier.isUnqualifiedPrivateOrThis target <- resolve(modifier.getReference).orElse { modifier.parentOfType(classOf[ScTypeDefinition]) } } yield target maybeTarget.collect { case p: PsiPackage => new PackageScope(p, /*includeSubpackages*/ true, /*includeLibraries*/ true) case td: ScTypeDefinition => localSearchScope(td) } } private def fromUnqualifiedOrThisPrivate(owner: ScMember) = for { list <- Option(owner.getModifierList) modifier <- list.accessModifier if modifier.isUnqualifiedPrivateOrThis scope <- forTopLevelPrivate(owner).orElse { owner.containingClass match { case definition: ScTypeDefinition => Some(localSearchScope(definition, withCompanion = !modifier.isThis)) case _ => owner.containingFile.map(safeLocalScope(_)) } } } yield scope private def syntheticMethodScope(member: ScMember): Option[SearchScope] = { member match { case fun: ScFunction if fun.isSynthetic => Some(fun.syntheticNavigationElement.getUseScope) case _ => None } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/ScalaUseScope.scala
Scala
apache-2.0
7,807
import java.util.Collections import java.util.concurrent._ /** * Created by yangguo on 15/9/28. */ class Task(val id:Int) extends Runnable{ override def equals(obj: scala.Any): Boolean = { var res=false if(obj!=null&&obj.isInstanceOf[Task]) { val _tmp=obj.asInstanceOf[Task] if(_tmp.id==this.id) res=true } res } override def run(): Unit = println(id) } class KeyTask(val id:String){ override def hashCode(): Int = id.hashCode override def equals(obj: scala.Any): Boolean = { var res=false if(obj!=null&&obj.isInstanceOf[KeyTask]) if(obj.asInstanceOf[KeyTask].id.equals(this.id)) res=true res } } object TestExcutores { def main(args: Array[String]) { // val queue= val pool = new ThreadPoolExecutor(2, 2, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable]()) val run4=new Task(4) val run3=new Task(3) val run2=new Task(2) { override def run(): Unit = { Thread.sleep(3000) println("2"+","+pool.getQueue.size()) // pool.remove(run3) // pool.remove(run4) } } val run1=new Task(1) { override def run(): Unit = { Thread.sleep(1000) println(1) } } // pool.execute(run1) pool.execute(run2) Thread.sleep(1000) pool.remove(run2) // pool.execute(run3) // pool.execute(run4) val map=new ConcurrentHashMap[KeyTask,AnyRef]()//.get() map.put(new KeyTask("1"),"2") map.put(new KeyTask("2"),"3") println(map.get(new KeyTask("2"))) } } abstract class QueueLink[T] extends LinkedBlockingQueue[T]{ override def remove(o: scala.Any): Boolean = super.remove(o) }
guoyang2011/flashbird
src/test/scala-2.10/TestExcutores.scala
Scala
apache-2.0
1,662
package io.udash.web.homepage import io.udash._ class RoutingRegistryDef extends RoutingRegistry[RoutingState] { def matchUrl(url: Url): RoutingState = url2State.applyOrElse("/" + url.value.stripPrefix("/").stripSuffix("/"), (_: String) => ErrorState) def matchState(state: RoutingState): Url = Url(state2Url.apply(state)) private val (url2State, state2Url) = bidirectional { case "/" => HelloState case "/demo" / "select" => SelectState } }
UdashFramework/udash-core
guide/homepage/.js/src/main/scala/io/udash/web/homepage/RoutingRegistryDef.scala
Scala
apache-2.0
469
package org.jetbrains.plugins.scala.lang.psi.api.expr import com.intellij.openapi.project.DumbService import com.intellij.psi.{PsiAnnotation, ResolveResult} import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScPrimaryConstructor} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition import org.jetbrains.plugins.scala.lang.psi.impl.base.ScStableCodeReferenceElementImpl import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScStubElementType import org.jetbrains.plugins.scala.lang.resolve.processor.ResolveProcessor /** * @author Alexander Podkhalyuzin * Date: 07.03.2008 */ trait ScAnnotation extends ScalaPsiElement with PsiAnnotation { /** * Return full annotation only without @ token. * @return annotation expression */ def annotationExpr: ScAnnotationExpr = findChildByClassScala(classOf[ScAnnotationExpr]) /** * Return constructor element af annotation expression. For example * if annotation is <code>@Nullable</code> then method returns <code> * Nullable</code> psiElement. * @return constructor element */ def constructor: ScConstructor = annotationExpr.constr def typeElement: ScTypeElement }
gtache/intellij-lsp
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScAnnotation.scala
Scala
apache-2.0
1,331
package com.stripe.bonsai package layout import java.io.{ DataOutput, DataInput } case class DisjunctionLayout[A, B, C]( leftLayout: Layout[A], rightLayout: Layout[B], unpack: C => Either[A, B], mkLeft: A => C, mkRight: B => C ) extends Layout[C] { def newBuilder: DisjunctionBuilder[A, B, C] = new DisjunctionBuilder[A, B, C]( leftLayout.newBuilder, rightLayout.newBuilder, unpack, mkLeft, mkRight ) def write(vec: Vec[C], out: DataOutput): Unit = { val DisjunctionVec(bitset, left, right, _, _) = recast(vec) out.writeByte(DisjunctionLayout.SplitEncoding) leftLayout.write(left, out) rightLayout.write(right, out) IndexedBitSet.write(bitset, out) } def read(in: DataInput): Vec[C] = { in.readByte() match { case DisjunctionLayout.SplitEncoding => val left = leftLayout.read(in) val right = rightLayout.read(in) val bitset = IndexedBitSet.read(in) DisjunctionVec(bitset, left, right, mkLeft, mkRight) case _ => throw new java.io.IOException("unsupported encoding for disjunction layout") } } def isSafeToCast(vec: Vec[_]): Boolean = vec match { case DisjunctionVec(_, left, right, _, _) => leftLayout.isSafeToCast(left) && rightLayout.isSafeToCast(right) case _ => false } private def recast(vec: Vec[C]): DisjunctionVec[A, B, C] = { if (isSafeToCast(vec)) { vec.asInstanceOf[DisjunctionVec[A, B, C]] } else { (newBuilder ++= vec).result() } } } object DisjunctionLayout { final val SplitEncoding = 1.toByte } class DisjunctionBuilder[A, B, C]( leftBldr: VecBuilder[A], rightBldr: VecBuilder[B], unpack: C => Either[A, B], mkLeft: A => C, mkRight: B => C ) extends VecBuilder[C] { val bitsetBldr = new IndexedBitSetBuilder def +=(that: C) = { unpack(that) match { case Left(a) => bitsetBldr += true leftBldr += a case Right(b) => bitsetBldr += false rightBldr += b } this } def clear(): Unit = { bitsetBldr.clear() leftBldr.clear() rightBldr.clear() } def result(): DisjunctionVec[A, B, C] = new DisjunctionVec[A, B, C]( bitsetBldr.result(), leftBldr.result(), rightBldr.result(), mkLeft, mkRight ) } case class DisjunctionVec[A, B, C]( bitset: IndexedBitSet, left: Vec[A], right: Vec[B], mkLeft: A => C, mkRight: B => C ) extends Vec[C] { def size: Int = left.size + right.size def apply(index: Int): C = { if (bitset(index)) { mkLeft(left(bitset.rank(index) - 1)) } else { mkRight(right(index - bitset.rank(index))) } } }
stripe/bonsai
bonsai-core/src/main/scala/com/stripe/bonsai/layout/DisjunctionLayout.scala
Scala
mit
2,669
package org.jetbrains.plugins.scala.lang.psi.implicits import com.intellij.psi.{PsiElement, PsiFile} import org.jetbrains.plugins.scala.extensions.{PsiElementExt, childOf} import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.api.base.{ScMethodLike, ScPrimaryConstructor} import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameters} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateParents import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember /** * @author Nikolay.Tropin */ trait ImplicitSearchScope object ImplicitSearchScope { private case class ImplicitSearchScopeImpl(file: PsiFile, upperBorder: Option[PsiElement]) extends ImplicitSearchScope //should be different for two elements if they have different sets of available implicit names def forElement(e: PsiElement): ImplicitSearchScope = { e.getContainingFile match { case scalaFile: ScalaFile => ImplicitSearchScopeImpl(scalaFile, findBorderUp(e)) case file => ImplicitSearchScopeImpl(file, None) } } private def findBorderUp(e: PsiElement): Option[PsiElement] = { e.contexts .takeWhile(e => e != null && !e.isInstanceOf[PsiFile]) .flatMap(_.prevSiblings) .find(isImplicitSearchBorder) } private def isImplicitSearchBorder(elem: PsiElement): Boolean = elem match { case _: ScImportStmt | _: ScPackaging => true case (_: ScParameters) childOf (m: ScMethodLike) => hasImplicitClause(m) case pc: ScPrimaryConstructor => hasImplicitClause(pc) case p: ScParameter => p.isImplicitParameter case m: ScMember => m.hasModifierProperty("implicit") case _: ScTemplateParents => true case _ => false } private def hasImplicitClause(m: ScMethodLike): Boolean = m.effectiveParameterClauses.exists(_.isImplicit) }
ilinum/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/implicits/ImplicitSearchScope.scala
Scala
apache-2.0
2,043
package breeze.polynomial import breeze.numerics._ import breeze.generic._ import breeze.linalg.{DenseVector, DenseMatrix} import spire.math._ import spire.math.poly._ import spire.algebra._ import spire.implicits._ trait DensePolynomial { object densePolyval extends UFunc { implicit object doubleImpl extends Impl2[PolyDenseUFuncWrapper,Double,Double] { def apply(k: PolyDenseUFuncWrapper, v: Double) = k.p(v) } implicit object denseVectorImpl extends Impl2[PolyDenseUFuncWrapper,DenseVector[Double],DenseVector[Double]] { /* This implementation uses Horner's Algorithm: * http://en.wikipedia.org/wiki/Horner's_method * * Iterating over the polynomial coefficients first and the * vector coefficients second is about 3x faster than * the other way around. */ def apply(k: PolyDenseUFuncWrapper, v: DenseVector[Double]) = { val coeffs: Array[Double] = k.p.coeffs var i = coeffs.length - 1 var result = DenseVector.fill[Double](v.size, coeffs(i)) while (i > 0) { i -= 1 val c = coeffs(i) cfor(0)(j => j < result.size, j => j+1)( j => { result.unsafeUpdate(j, result.unsafeValueAt(j)*v.unsafeValueAt(j)+c) }) } result } } implicit object denseMatrixImpl extends Impl2[PolyDenseUFuncWrapper,DenseMatrix[Double],DenseMatrix[Double]] { /* This implementation uses Horner's Algorithm: * http://en.wikipedia.org/wiki/Horner's_method * * Iterating over the polynomial coefficients first and the * vector coefficients second is about 3x faster than * the other way around. */ def apply(k: PolyDenseUFuncWrapper, v: DenseMatrix[Double]) = { if (v.rows != v.cols) { throw new IllegalArgumentException("Can only apply polynomial to square matrix.") } val n = v.rows val coeffs: Array[Double] = k.p.coeffs var i = coeffs.length - 1 var result = DenseMatrix.eye[Double](n) * coeffs(i) while (i > 0) { i -= 1 result = result*v //WILDLY INEFFICIENT, FIGURE OUT IN PLACE MULTIPLY val c = coeffs(i) cfor(0)(i => i < n, i => i+1)(i => { result.update(i,i, result(i,i)+c) }) } result } } } implicit class PolyDenseUFuncWrapper(val p: PolyDense[Double]) extends VariableUFunc[densePolyval.type,PolyDenseUFuncWrapper] }
wavelets/breeze
src/main/scala/breeze/polynomial/DensePolynomial.scala
Scala
apache-2.0
2,511
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.container import java.util import java.util.concurrent.atomic.AtomicReference import org.apache.samza.Partition import org.apache.samza.config.{ClusterManagerConfig, Config, MapConfig} import org.apache.samza.context.{ApplicationContainerContext, ContainerContext} import org.apache.samza.coordinator.JobModelManager import org.apache.samza.coordinator.server.{HttpServer, JobServlet} import org.apache.samza.job.model.{ContainerModel, JobModel, TaskModel} import org.apache.samza.metrics.Gauge import org.apache.samza.storage.ContainerStorageManager import org.apache.samza.system._ import org.junit.Assert._ import org.junit.{Before, Test} import org.mockito.Matchers.{any, notNull} import org.mockito.Mockito._ import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer import org.mockito.{Mock, Mockito, MockitoAnnotations} import org.scalatest.junit.AssertionsForJUnit import org.scalatest.mockito.MockitoSugar import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ class TestSamzaContainer extends AssertionsForJUnit with MockitoSugar { private val TASK_NAME = new TaskName("taskName") @Mock private var config: Config = null @Mock private var taskInstance: TaskInstance = null @Mock private var runLoop: Runnable = null @Mock private var systemAdmins: SystemAdmins = null @Mock private var consumerMultiplexer: SystemConsumers = null @Mock private var producerMultiplexer: SystemProducers = null @Mock private var metrics: SamzaContainerMetrics = null @Mock private var localityManager: LocalityManager = null @Mock private var containerContext: ContainerContext = null @Mock private var applicationContainerContext: ApplicationContainerContext = null @Mock private var samzaContainerListener: SamzaContainerListener = null @Mock private var containerStorageManager: ContainerStorageManager = null private var samzaContainer: SamzaContainer = null @Before def setup(): Unit = { MockitoAnnotations.initMocks(this) setupSamzaContainer(Some(this.applicationContainerContext)) when(this.metrics.containerStartupTime).thenReturn(mock[Gauge[Long]]) } @Test def testExceptionInTaskInitShutsDownTask() { when(this.taskInstance.initTask).thenThrow(new RuntimeException("Trigger a shutdown, please.")) this.samzaContainer.run verify(this.taskInstance).shutdownTask assertEquals(SamzaContainerStatus.FAILED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener, never()).afterStart() verify(this.samzaContainerListener, never()).afterStop() verify(this.samzaContainerListener).afterFailure(notNull(classOf[Exception])) verifyZeroInteractions(this.runLoop) } @Test def testErrorInTaskInitShutsDownTask(): Unit = { when(this.taskInstance.initTask).thenThrow(new NoSuchMethodError("Trigger a shutdown, please.")) this.samzaContainer.run verify(this.taskInstance).shutdownTask assertEquals(SamzaContainerStatus.FAILED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener, never()).afterStart() verify(this.samzaContainerListener, never()).afterStop() verify(this.samzaContainerListener).afterFailure(notNull(classOf[Exception])) verifyZeroInteractions(this.runLoop) } @Test def testExceptionInTaskProcessRunLoop() { when(this.runLoop.run()).thenThrow(new RuntimeException("Trigger a shutdown, please.")) this.samzaContainer.run verify(this.taskInstance).shutdownTask assertEquals(SamzaContainerStatus.FAILED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterStart() verify(this.samzaContainerListener, never()).afterStop() verify(this.samzaContainerListener).afterFailure(notNull(classOf[Exception])) verify(this.runLoop).run() } @Test def testShutDownSequenceForStandbyContainers() { class ShutDownSignal(container: SamzaContainer) extends Runnable { def run(): Unit = { Thread.sleep(2000) container.shutdown(); } } this.samzaContainer = new SamzaContainer( this.config, Map.empty[TaskName, TaskInstance], Map.empty[TaskName, TaskInstanceMetrics], this.runLoop, this.systemAdmins, this.consumerMultiplexer, this.producerMultiplexer, this.metrics, localityManager = this.localityManager, containerContext = this.containerContext, applicationContainerContextOption = Some(this.applicationContainerContext), externalContextOption = None, containerStorageManager = containerStorageManager) this.samzaContainer.setContainerListener(this.samzaContainerListener) new ShutDownSignal(samzaContainer).run(); this.samzaContainer.run verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterStart() verify(this.samzaContainerListener).afterStop() verify(this.runLoop, never()).run() verify(this.systemAdmins).stop() verify(this.containerStorageManager).shutdown() } @Test def testCleanRun(): Unit = { doNothing().when(this.runLoop).run() // run loop completes successfully this.samzaContainer.run verify(this.taskInstance).shutdownTask assertEquals(SamzaContainerStatus.STOPPED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterStart() verify(this.samzaContainerListener).afterStop() verify(this.samzaContainerListener, never()).afterFailure(any()) verify(this.runLoop).run() } @Test def testInterruptDuringStoreRestorationShutdownContainer(): Unit = { when(this.containerStorageManager.start()) .thenAnswer(new Answer[Void] { override def answer(mock: InvocationOnMock): Void = { Thread.sleep(1000) throw new InterruptedException("Injecting interrupt into container storage manager") } }) this.samzaContainer.run assertEquals(SamzaContainerStatus.STOPPED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterStop() verify(this.samzaContainerListener, never()).afterFailure(any()) verify(this.runLoop, times(0)).run() } @Test def testInterruptDuringStoreRestorationWithErrorsDuringContainerShutdown(): Unit = { when(this.containerStorageManager.start()) .thenAnswer(new Answer[Void] { override def answer(mock: InvocationOnMock): Void = { Thread.sleep(1000) throw new InterruptedException("Injecting interrupt into container storage manager") } }) when(this.taskInstance.shutdownTask).thenThrow(new RuntimeException("Trigger a shutdown, please.")) this.samzaContainer.run assertEquals(SamzaContainerStatus.FAILED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterFailure(any()) verify(this.samzaContainerListener, never()).afterStop() verify(this.runLoop, times(0)).run() } @Test def testFailureDuringShutdown(): Unit = { doNothing().when(this.runLoop).run() // run loop completes successfully when(this.taskInstance.shutdownTask).thenThrow(new RuntimeException("Trigger a shutdown, please.")) this.samzaContainer.run verify(this.taskInstance).shutdownTask assertEquals(SamzaContainerStatus.FAILED, this.samzaContainer.getStatus()) verify(this.samzaContainerListener).beforeStart() verify(this.samzaContainerListener).afterStart() verify(this.samzaContainerListener, never()).afterStop() verify(this.samzaContainerListener).afterFailure(notNull(classOf[Exception])) verify(this.runLoop).run() } @Test def testApplicationContainerContext() { val orderVerifier = inOrder(this.applicationContainerContext, this.runLoop) this.samzaContainer.run orderVerifier.verify(this.applicationContainerContext).start() orderVerifier.verify(this.runLoop).run() orderVerifier.verify(this.applicationContainerContext).stop() } @Test def testNullApplicationContainerContextFactory() { setupSamzaContainer(None) this.samzaContainer.run verify(this.runLoop).run() // applicationContainerContext is not even wired into the container anymore, but just double check it is not used verifyZeroInteractions(this.applicationContainerContext) } @Test def testReadJobModel() { val config = new MapConfig(Map("a" -> "b").asJava) val offsets = new util.HashMap[SystemStreamPartition, String]() offsets.put(new SystemStreamPartition("system","stream", new Partition(0)), "1") val tasks = Map( new TaskName("t1") -> new TaskModel(new TaskName("t1"), offsets.keySet(), new Partition(0)), new TaskName("t2") -> new TaskModel(new TaskName("t2"), offsets.keySet(), new Partition(0))) val containers = Map( "0" -> new ContainerModel("0", tasks), "1" -> new ContainerModel("1", tasks)) val jobModel = new JobModel(config, containers) def jobModelGenerator(): JobModel = jobModel val server = new HttpServer val coordinator = new JobModelManager(jobModel, server) JobModelManager.jobModelRef.set(jobModelGenerator()) coordinator.server.addServlet("/*", new JobServlet(JobModelManager.jobModelRef)) try { coordinator.start assertEquals(jobModel, SamzaContainer.readJobModel(server.getUrl.toString)) } finally { coordinator.stop } } @Test def testReadJobModelWithTimeouts() { val config = new MapConfig(Map("a" -> "b").asJava) val offsets = new util.HashMap[SystemStreamPartition, String]() offsets.put(new SystemStreamPartition("system","stream", new Partition(0)), "1") val tasks = Map( new TaskName("t1") -> new TaskModel(new TaskName("t1"), offsets.keySet(), new Partition(0)), new TaskName("t2") -> new TaskModel(new TaskName("t2"), offsets.keySet(), new Partition(0))) val containers = Map( "0" -> new ContainerModel("0", tasks), "1" -> new ContainerModel("1", tasks)) val jobModel = new JobModel(config, containers) def jobModelGenerator(): JobModel = jobModel val server = new HttpServer val coordinator = new JobModelManager(jobModel, server) JobModelManager.jobModelRef.set(jobModelGenerator()) val mockJobServlet = new MockJobServlet(2, JobModelManager.jobModelRef) coordinator.server.addServlet("/*", mockJobServlet) try { coordinator.start assertEquals(jobModel, SamzaContainer.readJobModel(server.getUrl.toString)) } finally { coordinator.stop } assertEquals(2, mockJobServlet.exceptionCount) } @Test def testGetChangelogSSPsForContainer() { val taskName0 = new TaskName("task0") val taskName1 = new TaskName("task1") val taskModel0 = new TaskModel(taskName0, Set(new SystemStreamPartition("input", "stream", new Partition(0))), new Partition(10)) val taskModel1 = new TaskModel(taskName1, Set(new SystemStreamPartition("input", "stream", new Partition(1))), new Partition(11)) val containerModel = new ContainerModel("processorId", Map(taskName0 -> taskModel0, taskName1 -> taskModel1)) val changeLogSystemStreams = Map("store0" -> new SystemStream("changelogSystem0", "store0-changelog"), "store1" -> new SystemStream("changelogSystem1", "store1-changelog")) val expected = Set(new SystemStreamPartition("changelogSystem0", "store0-changelog", new Partition(10)), new SystemStreamPartition("changelogSystem1", "store1-changelog", new Partition(10)), new SystemStreamPartition("changelogSystem0", "store0-changelog", new Partition(11)), new SystemStreamPartition("changelogSystem1", "store1-changelog", new Partition(11))) assertEquals(expected, SamzaContainer.getChangelogSSPsForContainer(containerModel, changeLogSystemStreams)) } @Test def testGetChangelogSSPsForContainerNoChangelogs() { val taskName0 = new TaskName("task0") val taskName1 = new TaskName("task1") val taskModel0 = new TaskModel(taskName0, Set(new SystemStreamPartition("input", "stream", new Partition(0))), new Partition(10)) val taskModel1 = new TaskModel(taskName1, Set(new SystemStreamPartition("input", "stream", new Partition(1))), new Partition(11)) val containerModel = new ContainerModel("processorId", Map(taskName0 -> taskModel0, taskName1 -> taskModel1)) assertEquals(Set(), SamzaContainer.getChangelogSSPsForContainer(containerModel, Map())) } @Test def testStoreContainerLocality():Unit = { this.config = new MapConfig(Map(ClusterManagerConfig.JOB_HOST_AFFINITY_ENABLED -> "true")) setupSamzaContainer(None) // re-init with an actual config val containerModel: ContainerModel = Mockito.mock[ContainerModel](classOf[ContainerModel]) val testContainerId = "1" Mockito.when(containerModel.getId).thenReturn(testContainerId) Mockito.when(this.containerContext.getContainerModel).thenReturn(containerModel) this.samzaContainer.storeContainerLocality Mockito.verify(this.localityManager).writeContainerToHostMapping(any(), any()) } private def setupSamzaContainer(applicationContainerContext: Option[ApplicationContainerContext]) { this.samzaContainer = new SamzaContainer( this.config, Map(TASK_NAME -> this.taskInstance), Map(TASK_NAME -> new TaskInstanceMetrics), this.runLoop, this.systemAdmins, this.consumerMultiplexer, this.producerMultiplexer, this.metrics, localityManager = this.localityManager, containerContext = this.containerContext, applicationContainerContextOption = applicationContainerContext, externalContextOption = None, containerStorageManager = containerStorageManager) this.samzaContainer.setContainerListener(this.samzaContainerListener) } class MockJobServlet(exceptionLimit: Int, jobModelRef: AtomicReference[JobModel]) extends JobServlet(jobModelRef) { var exceptionCount = 0 override protected def getObjectToWrite(): JobModel = { if (exceptionCount < exceptionLimit) { exceptionCount += 1 throw new java.io.IOException("Throwing exception") } else { val jobModel = jobModelRef.get() jobModel } } } }
abhishekshivanna/samza
samza-core/src/test/scala/org/apache/samza/container/TestSamzaContainer.scala
Scala
apache-2.0
15,365
package io.youi.form import org.scalajs.dom.{Event, html} class FormInput(formSupport: FormSupport, val element: html.Element) { val error: FieldError = formSupport.createFieldError(this) element.addEventListener("focus", (_: Event) => { if (formSupport.clearErrorOnFocus) { error.clear() } }) element.addEventListener("blur", (_: Event) => { validate(ValidationMode.Blur) }) element.addEventListener("change", (_: Event) => { validate(ValidationMode.ValueChange) }) def name: String = element match { case i: html.Input => i.name case i: html.TextArea => i.name case i: html.Select => i.name case _ => element.getAttribute("name") } def focus(): Unit = element.focus() def blur(): Unit = element.blur() def select(): Unit = element match { case i: html.Input => i.select() case i: html.TextArea => i.select() case _ => throw new RuntimeException(s"Unsupported select() in FormInput for: ${element.outerHTML}") } def option: Option[String] = value match { case s if s != null && s.trim.nonEmpty => Some(s) case _ => None } def value: String = element match { case i: html.Input => i.value case i: html.TextArea => i.value case i: html.Select => i.value case _ => throw new RuntimeException(s"Unsupported getting value to FormInput for: ${element.outerHTML}") } def value_=(v: String): Unit = element match { case i: html.Input => i.value = v case i: html.TextArea => i.value = v case i: html.Select => i.value = v case _ => throw new RuntimeException(s"Unsupported setting value to FormInput for: ${element.outerHTML}") } def checked: Boolean = element match { case i: html.Input => i.checked case _ => throw new RuntimeException(s"Unsupported getting checked from FormInput for: ${element.outerHTML}") } def checked_=(b: Boolean): Unit = element match { case i: html.Input => i.checked = b case _ => throw new RuntimeException(s"Unsupported setting checked to FormInput for: ${element.outerHTML}") } def disabled: Boolean = element match { case i: html.Input => i.disabled case i: html.TextArea => i.disabled case i: html.Select => i.disabled case _ => throw new RuntimeException(s"Unsupported disabled in FormInput for: ${element.outerHTML}") } def disabled_=(b: Boolean): Unit = element match { case i: html.Input => i.disabled = b case i: html.TextArea => i.disabled = b case i: html.Select => i.disabled = b case _ => throw new RuntimeException(s"Unsupported disabled in FormInput for: ${element.outerHTML}") } def text: String = option.getOrElse(throw new RuntimeException(s"Value is empty for ${element.id}")) def show(): Unit = element.style.display = "inline" def hide(): Unit = element.style.display = "none" def clear(): Unit = value = "" object validation { private var list = List.empty[FormValidation] def apply(validation: Validation, modes: ValidationMode*): FormInput = { val m = if (modes.isEmpty) ValidationMode.all else modes.toSet list = list ::: List(FormValidation(validation, m)) FormInput.this } def all(): List[FormValidation] = list } def validate(mode: ValidationMode): Boolean = { val results = validation.all().flatMap(_.validate(mode, this).asOption) error.clear() val message = results.mkString("<br/>") if (results.nonEmpty) { error.show(message) } results.isEmpty } }
outr/youi
dom/src/main/scala/io/youi/form/FormInput.scala
Scala
mit
3,493
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources import java.util.Locale import scala.collection.mutable import org.apache.hadoop.fs.Path import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, QualifiedTableName} import org.apache.spark.sql.catalyst.CatalystTypeConverters.convertToScala import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.encoders.RowEncoder import org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.planning.ScanOperation import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, InsertIntoStatement, LogicalPlan, Project} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution.{RowDataSourceScanExec, SparkPlan} import org.apache.spark.sql.execution.command._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.StoreAssignmentPolicy import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String /** * Replaces generic operations with specific variants that are designed to work with Spark * SQL Data Sources. * * Note that, this rule must be run after `PreprocessTableCreation` and * `PreprocessTableInsertion`. */ case class DataSourceAnalysis(conf: SQLConf) extends Rule[LogicalPlan] with CastSupport { def resolver: Resolver = conf.resolver // Visible for testing. def convertStaticPartitions( sourceAttributes: Seq[Attribute], providedPartitions: Map[String, Option[String]], targetAttributes: Seq[Attribute], targetPartitionSchema: StructType): Seq[NamedExpression] = { assert(providedPartitions.exists(_._2.isDefined)) val staticPartitions = providedPartitions.flatMap { case (partKey, Some(partValue)) => (partKey, partValue) :: Nil case (_, None) => Nil } // The sum of the number of static partition columns and columns provided in the SELECT // clause needs to match the number of columns of the target table. if (staticPartitions.size + sourceAttributes.size != targetAttributes.size) { throw new AnalysisException( s"The data to be inserted needs to have the same number of " + s"columns as the target table: target table has ${targetAttributes.size} " + s"column(s) but the inserted data has ${sourceAttributes.size + staticPartitions.size} " + s"column(s), which contain ${staticPartitions.size} partition column(s) having " + s"assigned constant values.") } if (providedPartitions.size != targetPartitionSchema.fields.size) { throw new AnalysisException( s"The data to be inserted needs to have the same number of " + s"partition columns as the target table: target table " + s"has ${targetPartitionSchema.fields.size} partition column(s) but the inserted " + s"data has ${providedPartitions.size} partition columns specified.") } staticPartitions.foreach { case (partKey, partValue) => if (!targetPartitionSchema.fields.exists(field => resolver(field.name, partKey))) { throw new AnalysisException( s"$partKey is not a partition column. Partition columns are " + s"${targetPartitionSchema.fields.map(_.name).mkString("[", ",", "]")}") } } val partitionList = targetPartitionSchema.fields.map { field => val potentialSpecs = staticPartitions.filter { case (partKey, partValue) => resolver(field.name, partKey) } if (potentialSpecs.isEmpty) { None } else if (potentialSpecs.size == 1) { val partValue = potentialSpecs.head._2 conf.storeAssignmentPolicy match { // SPARK-30844: try our best to follow StoreAssignmentPolicy for static partition // values but not completely follow because we can't do static type checking due to // the reason that the parser has erased the type info of static partition values // and converted them to string. case StoreAssignmentPolicy.ANSI | StoreAssignmentPolicy.STRICT => Some(Alias(AnsiCast(Literal(partValue), field.dataType, Option(conf.sessionLocalTimeZone)), field.name)()) case _ => Some(Alias(cast(Literal(partValue), field.dataType), field.name)()) } } else { throw new AnalysisException( s"Partition column ${field.name} have multiple values specified, " + s"${potentialSpecs.mkString("[", ", ", "]")}. Please only specify a single value.") } } // We first drop all leading static partitions using dropWhile and check if there is // any static partition appear after dynamic partitions. partitionList.dropWhile(_.isDefined).collectFirst { case Some(_) => throw new AnalysisException( s"The ordering of partition columns is " + s"${targetPartitionSchema.fields.map(_.name).mkString("[", ",", "]")}. " + "All partition columns having constant values need to appear before other " + "partition columns that do not have an assigned constant value.") } assert(partitionList.take(staticPartitions.size).forall(_.isDefined)) val projectList = sourceAttributes.take(targetAttributes.size - targetPartitionSchema.fields.size) ++ partitionList.take(staticPartitions.size).map(_.get) ++ sourceAttributes.takeRight(targetPartitionSchema.fields.size - staticPartitions.size) projectList } override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators { case CreateTable(tableDesc, mode, None) if DDLUtils.isDatasourceTable(tableDesc) => CreateDataSourceTableCommand(tableDesc, ignoreIfExists = mode == SaveMode.Ignore) case CreateTable(tableDesc, mode, Some(query)) if query.resolved && DDLUtils.isDatasourceTable(tableDesc) => CreateDataSourceTableAsSelectCommand(tableDesc, mode, query, query.output.map(_.name)) case InsertIntoStatement(l @ LogicalRelation(_: InsertableRelation, _, _, _), parts, query, overwrite, false) if parts.isEmpty => InsertIntoDataSourceCommand(l, query, overwrite) case InsertIntoDir(_, storage, provider, query, overwrite) if provider.isDefined && provider.get.toLowerCase(Locale.ROOT) != DDLUtils.HIVE_PROVIDER => val outputPath = new Path(storage.locationUri.get) if (overwrite) DDLUtils.verifyNotReadPath(query, outputPath) InsertIntoDataSourceDirCommand(storage, provider.get, query, overwrite) case i @ InsertIntoStatement( l @ LogicalRelation(t: HadoopFsRelation, _, table, _), parts, query, overwrite, _) => // If the InsertIntoTable command is for a partitioned HadoopFsRelation and // the user has specified static partitions, we add a Project operator on top of the query // to include those constant column values in the query result. // // Example: // Let's say that we have a table "t", which is created by // CREATE TABLE t (a INT, b INT, c INT) USING parquet PARTITIONED BY (b, c) // The statement of "INSERT INTO TABLE t PARTITION (b=2, c) SELECT 1, 3" // will be converted to "INSERT INTO TABLE t PARTITION (b, c) SELECT 1, 2, 3". // // Basically, we will put those partition columns having a assigned value back // to the SELECT clause. The output of the SELECT clause is organized as // normal_columns static_partitioning_columns dynamic_partitioning_columns. // static_partitioning_columns are partitioning columns having assigned // values in the PARTITION clause (e.g. b in the above example). // dynamic_partitioning_columns are partitioning columns that do not assigned // values in the PARTITION clause (e.g. c in the above example). val actualQuery = if (parts.exists(_._2.isDefined)) { val projectList = convertStaticPartitions( sourceAttributes = query.output, providedPartitions = parts, targetAttributes = l.output, targetPartitionSchema = t.partitionSchema) Project(projectList, query) } else { query } // Sanity check if (t.location.rootPaths.size != 1) { throw new AnalysisException("Can only write data to relations with a single path.") } val outputPath = t.location.rootPaths.head val mode = if (overwrite) SaveMode.Overwrite else SaveMode.Append val partitionSchema = actualQuery.resolve( t.partitionSchema, t.sparkSession.sessionState.analyzer.resolver) val staticPartitions = parts.filter(_._2.nonEmpty).map { case (k, v) => k -> v.get } val insertCommand = InsertIntoHadoopFsRelationCommand( outputPath, staticPartitions, i.ifPartitionNotExists, partitionSchema, t.bucketSpec, t.fileFormat, t.options, actualQuery, mode, table, Some(t.location), actualQuery.output.map(_.name)) // For dynamic partition overwrite, we do not delete partition directories ahead. // We write to staging directories and move to final partition directories after writing // job is done. So it is ok to have outputPath try to overwrite inputpath. if (overwrite && !insertCommand.dynamicPartitionOverwrite) { DDLUtils.verifyNotReadPath(actualQuery, outputPath) } insertCommand } } /** * Replaces [[UnresolvedCatalogRelation]] with concrete relation logical plans. * * TODO: we should remove the special handling for hive tables after completely making hive as a * data source. */ class FindDataSourceTable(sparkSession: SparkSession) extends Rule[LogicalPlan] { private def readDataSourceTable(table: CatalogTable): LogicalPlan = { val qualifiedTableName = QualifiedTableName(table.database, table.identifier.table) val catalog = sparkSession.sessionState.catalog catalog.getCachedPlan(qualifiedTableName, () => { val pathOption = table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_)) val dataSource = DataSource( sparkSession, // In older version(prior to 2.1) of Spark, the table schema can be empty and should be // inferred at runtime. We should still support it. userSpecifiedSchema = if (table.schema.isEmpty) None else Some(table.schema), partitionColumns = table.partitionColumnNames, bucketSpec = table.bucketSpec, className = table.provider.get, options = table.storage.properties ++ pathOption, catalogTable = Some(table)) LogicalRelation(dataSource.resolveRelation(checkFilesExist = false), table) }) } override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators { case i @ InsertIntoStatement(UnresolvedCatalogRelation(tableMeta), _, _, _, _) if DDLUtils.isDatasourceTable(tableMeta) => i.copy(table = readDataSourceTable(tableMeta)) case i @ InsertIntoStatement(UnresolvedCatalogRelation(tableMeta), _, _, _, _) => i.copy(table = DDLUtils.readHiveTable(tableMeta)) case UnresolvedCatalogRelation(tableMeta) if DDLUtils.isDatasourceTable(tableMeta) => readDataSourceTable(tableMeta) case UnresolvedCatalogRelation(tableMeta) => DDLUtils.readHiveTable(tableMeta) } } /** * A Strategy for planning scans over data sources defined using the sources API. */ case class DataSourceStrategy(conf: SQLConf) extends Strategy with Logging with CastSupport { import DataSourceStrategy._ def apply(plan: LogicalPlan): Seq[execution.SparkPlan] = plan match { case ScanOperation(projects, filters, l @ LogicalRelation(t: CatalystScan, _, _, _)) => pruneFilterProjectRaw( l, projects, filters, (requestedColumns, allPredicates, _) => toCatalystRDD(l, requestedColumns, t.buildScan(requestedColumns, allPredicates))) :: Nil case ScanOperation(projects, filters, l @ LogicalRelation(t: PrunedFilteredScan, _, _, _)) => pruneFilterProject( l, projects, filters, (a, f) => toCatalystRDD(l, a, t.buildScan(a.map(_.name).toArray, f))) :: Nil case ScanOperation(projects, filters, l @ LogicalRelation(t: PrunedScan, _, _, _)) => pruneFilterProject( l, projects, filters, (a, _) => toCatalystRDD(l, a, t.buildScan(a.map(_.name).toArray))) :: Nil case l @ LogicalRelation(baseRelation: TableScan, _, _, _) => RowDataSourceScanExec( l.output, l.output.indices, Set.empty, Set.empty, toCatalystRDD(l, baseRelation.buildScan()), baseRelation, None) :: Nil case _ => Nil } // Based on Public API. private def pruneFilterProject( relation: LogicalRelation, projects: Seq[NamedExpression], filterPredicates: Seq[Expression], scanBuilder: (Seq[Attribute], Array[Filter]) => RDD[InternalRow]) = { pruneFilterProjectRaw( relation, projects, filterPredicates, (requestedColumns, _, pushedFilters) => { scanBuilder(requestedColumns, pushedFilters.toArray) }) } // Based on Catalyst expressions. The `scanBuilder` function accepts three arguments: // // 1. A `Seq[Attribute]`, containing all required column attributes. Used to handle relation // traits that support column pruning (e.g. `PrunedScan` and `PrunedFilteredScan`). // // 2. A `Seq[Expression]`, containing all gathered Catalyst filter expressions, only used for // `CatalystScan`. // // 3. A `Seq[Filter]`, containing all data source `Filter`s that are converted from (possibly a // subset of) Catalyst filter expressions and can be handled by `relation`. Used to handle // relation traits (`CatalystScan` excluded) that support filter push-down (e.g. // `PrunedFilteredScan` and `HadoopFsRelation`). // // Note that 2 and 3 shouldn't be used together. private def pruneFilterProjectRaw( relation: LogicalRelation, projects: Seq[NamedExpression], filterPredicates: Seq[Expression], scanBuilder: (Seq[Attribute], Seq[Expression], Seq[Filter]) => RDD[InternalRow]): SparkPlan = { val projectSet = AttributeSet(projects.flatMap(_.references)) val filterSet = AttributeSet(filterPredicates.flatMap(_.references)) val candidatePredicates = filterPredicates.map { _ transform { case a: AttributeReference => relation.attributeMap(a) // Match original case of attributes. }} val (unhandledPredicates, pushedFilters, handledFilters) = selectFilters(relation.relation, candidatePredicates) // Combines all Catalyst filter `Expression`s that are either not convertible to data source // `Filter`s or cannot be handled by `relation`. val filterCondition = unhandledPredicates.reduceLeftOption(expressions.And) if (projects.map(_.toAttribute) == projects && projectSet.size == projects.size && filterSet.subsetOf(projectSet)) { // When it is possible to just use column pruning to get the right projection and // when the columns of this projection are enough to evaluate all filter conditions, // just do a scan followed by a filter, with no extra project. val requestedColumns = projects // Safe due to if above. .asInstanceOf[Seq[Attribute]] // Match original case of attributes. .map(relation.attributeMap) val scan = RowDataSourceScanExec( relation.output, requestedColumns.map(relation.output.indexOf), pushedFilters.toSet, handledFilters, scanBuilder(requestedColumns, candidatePredicates, pushedFilters), relation.relation, relation.catalogTable.map(_.identifier)) filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan) } else { // A set of column attributes that are only referenced by pushed down filters. We can // eliminate them from requested columns. val handledSet = { val handledPredicates = filterPredicates.filterNot(unhandledPredicates.contains) val unhandledSet = AttributeSet(unhandledPredicates.flatMap(_.references)) AttributeSet(handledPredicates.flatMap(_.references)) -- (projectSet ++ unhandledSet).map(relation.attributeMap) } // Don't request columns that are only referenced by pushed filters. val requestedColumns = (projectSet ++ filterSet -- handledSet).map(relation.attributeMap).toSeq val scan = RowDataSourceScanExec( relation.output, requestedColumns.map(relation.output.indexOf), pushedFilters.toSet, handledFilters, scanBuilder(requestedColumns, candidatePredicates, pushedFilters), relation.relation, relation.catalogTable.map(_.identifier)) execution.ProjectExec( projects, filterCondition.map(execution.FilterExec(_, scan)).getOrElse(scan)) } } /** * Convert RDD of Row into RDD of InternalRow with objects in catalyst types */ private[this] def toCatalystRDD( relation: LogicalRelation, output: Seq[Attribute], rdd: RDD[Row]): RDD[InternalRow] = { DataSourceStrategy.toCatalystRDD(relation.relation, output, rdd) } /** * Convert RDD of Row into RDD of InternalRow with objects in catalyst types */ private[this] def toCatalystRDD(relation: LogicalRelation, rdd: RDD[Row]): RDD[InternalRow] = { toCatalystRDD(relation, relation.output, rdd) } } object DataSourceStrategy { /** * The attribute name may differ from the one in the schema if the query analyzer * is case insensitive. We should change attribute names to match the ones in the schema, * so we do not need to worry about case sensitivity anymore. */ protected[sql] def normalizeExprs( exprs: Seq[Expression], attributes: Seq[AttributeReference]): Seq[Expression] = { exprs.map { e => e transform { case a: AttributeReference => a.withName(attributes.find(_.semanticEquals(a)).getOrElse(a).name) } } } private def translateLeafNodeFilter(predicate: Expression): Option[Filter] = predicate match { case expressions.EqualTo(PushableColumn(name), Literal(v, t)) => Some(sources.EqualTo(name, convertToScala(v, t))) case expressions.EqualTo(Literal(v, t), PushableColumn(name)) => Some(sources.EqualTo(name, convertToScala(v, t))) case expressions.EqualNullSafe(PushableColumn(name), Literal(v, t)) => Some(sources.EqualNullSafe(name, convertToScala(v, t))) case expressions.EqualNullSafe(Literal(v, t), PushableColumn(name)) => Some(sources.EqualNullSafe(name, convertToScala(v, t))) case expressions.GreaterThan(PushableColumn(name), Literal(v, t)) => Some(sources.GreaterThan(name, convertToScala(v, t))) case expressions.GreaterThan(Literal(v, t), PushableColumn(name)) => Some(sources.LessThan(name, convertToScala(v, t))) case expressions.LessThan(PushableColumn(name), Literal(v, t)) => Some(sources.LessThan(name, convertToScala(v, t))) case expressions.LessThan(Literal(v, t), PushableColumn(name)) => Some(sources.GreaterThan(name, convertToScala(v, t))) case expressions.GreaterThanOrEqual(PushableColumn(name), Literal(v, t)) => Some(sources.GreaterThanOrEqual(name, convertToScala(v, t))) case expressions.GreaterThanOrEqual(Literal(v, t), PushableColumn(name)) => Some(sources.LessThanOrEqual(name, convertToScala(v, t))) case expressions.LessThanOrEqual(PushableColumn(name), Literal(v, t)) => Some(sources.LessThanOrEqual(name, convertToScala(v, t))) case expressions.LessThanOrEqual(Literal(v, t), PushableColumn(name)) => Some(sources.GreaterThanOrEqual(name, convertToScala(v, t))) case expressions.InSet(e @ PushableColumn(name), set) => val toScala = CatalystTypeConverters.createToScalaConverter(e.dataType) Some(sources.In(name, set.toArray.map(toScala))) // Because we only convert In to InSet in Optimizer when there are more than certain // items. So it is possible we still get an In expression here that needs to be pushed // down. case expressions.In(e @ PushableColumn(name), list) if list.forall(_.isInstanceOf[Literal]) => val hSet = list.map(_.eval(EmptyRow)) val toScala = CatalystTypeConverters.createToScalaConverter(e.dataType) Some(sources.In(name, hSet.toArray.map(toScala))) case expressions.IsNull(PushableColumn(name)) => Some(sources.IsNull(name)) case expressions.IsNotNull(PushableColumn(name)) => Some(sources.IsNotNull(name)) case expressions.StartsWith(PushableColumn(name), Literal(v: UTF8String, StringType)) => Some(sources.StringStartsWith(name, v.toString)) case expressions.EndsWith(PushableColumn(name), Literal(v: UTF8String, StringType)) => Some(sources.StringEndsWith(name, v.toString)) case expressions.Contains(PushableColumn(name), Literal(v: UTF8String, StringType)) => Some(sources.StringContains(name, v.toString)) case expressions.Literal(true, BooleanType) => Some(sources.AlwaysTrue) case expressions.Literal(false, BooleanType) => Some(sources.AlwaysFalse) case _ => None } /** * Tries to translate a Catalyst [[Expression]] into data source [[Filter]]. * * @return a `Some[Filter]` if the input [[Expression]] is convertible, otherwise a `None`. */ protected[sql] def translateFilter(predicate: Expression): Option[Filter] = { translateFilterWithMapping(predicate, None) } /** * Tries to translate a Catalyst [[Expression]] into data source [[Filter]]. * * @param predicate The input [[Expression]] to be translated as [[Filter]] * @param translatedFilterToExpr An optional map from leaf node filter expressions to its * translated [[Filter]]. The map is used for rebuilding * [[Expression]] from [[Filter]]. * @return a `Some[Filter]` if the input [[Expression]] is convertible, otherwise a `None`. */ protected[sql] def translateFilterWithMapping( predicate: Expression, translatedFilterToExpr: Option[mutable.HashMap[sources.Filter, Expression]]) : Option[Filter] = { predicate match { case expressions.And(left, right) => // See SPARK-12218 for detailed discussion // It is not safe to just convert one side if we do not understand the // other side. Here is an example used to explain the reason. // Let's say we have (a = 2 AND trim(b) = 'blah') OR (c > 0) // and we do not understand how to convert trim(b) = 'blah'. // If we only convert a = 2, we will end up with // (a = 2) OR (c > 0), which will generate wrong results. // Pushing one leg of AND down is only safe to do at the top level. // You can see ParquetFilters' createFilter for more details. for { leftFilter <- translateFilterWithMapping(left, translatedFilterToExpr) rightFilter <- translateFilterWithMapping(right, translatedFilterToExpr) } yield sources.And(leftFilter, rightFilter) case expressions.Or(left, right) => for { leftFilter <- translateFilterWithMapping(left, translatedFilterToExpr) rightFilter <- translateFilterWithMapping(right, translatedFilterToExpr) } yield sources.Or(leftFilter, rightFilter) case expressions.Not(child) => translateFilterWithMapping(child, translatedFilterToExpr).map(sources.Not) case other => val filter = translateLeafNodeFilter(other) if (filter.isDefined && translatedFilterToExpr.isDefined) { translatedFilterToExpr.get(filter.get) = predicate } filter } } protected[sql] def rebuildExpressionFromFilter( filter: Filter, translatedFilterToExpr: mutable.HashMap[sources.Filter, Expression]): Expression = { filter match { case sources.And(left, right) => expressions.And(rebuildExpressionFromFilter(left, translatedFilterToExpr), rebuildExpressionFromFilter(right, translatedFilterToExpr)) case sources.Or(left, right) => expressions.Or(rebuildExpressionFromFilter(left, translatedFilterToExpr), rebuildExpressionFromFilter(right, translatedFilterToExpr)) case sources.Not(pred) => expressions.Not(rebuildExpressionFromFilter(pred, translatedFilterToExpr)) case other => translatedFilterToExpr.getOrElse(other, throw new AnalysisException( s"Fail to rebuild expression: missing key $filter in `translatedFilterToExpr`")) } } /** * Selects Catalyst predicate [[Expression]]s which are convertible into data source [[Filter]]s * and can be handled by `relation`. * * @return A triplet of `Seq[Expression]`, `Seq[Filter]`, and `Seq[Filter]` . The first element * contains all Catalyst predicate [[Expression]]s that are either not convertible or * cannot be handled by `relation`. The second element contains all converted data source * [[Filter]]s that will be pushed down to the data source. The third element contains * all [[Filter]]s that are completely filtered at the DataSource. */ protected[sql] def selectFilters( relation: BaseRelation, predicates: Seq[Expression]): (Seq[Expression], Seq[Filter], Set[Filter]) = { // For conciseness, all Catalyst filter expressions of type `expressions.Expression` below are // called `predicate`s, while all data source filters of type `sources.Filter` are simply called // `filter`s. // A map from original Catalyst expressions to corresponding translated data source filters. // If a predicate is not in this map, it means it cannot be pushed down. val translatedMap: Map[Expression, Filter] = predicates.flatMap { p => translateFilter(p).map(f => p -> f) }.toMap val pushedFilters: Seq[Filter] = translatedMap.values.toSeq // Catalyst predicate expressions that cannot be converted to data source filters. val nonconvertiblePredicates = predicates.filterNot(translatedMap.contains) // Data source filters that cannot be handled by `relation`. An unhandled filter means // the data source cannot guarantee the rows returned can pass the filter. // As a result we must return it so Spark can plan an extra filter operator. val unhandledFilters = relation.unhandledFilters(translatedMap.values.toArray).toSet val unhandledPredicates = translatedMap.filter { case (p, f) => unhandledFilters.contains(f) }.keys val handledFilters = pushedFilters.toSet -- unhandledFilters (nonconvertiblePredicates ++ unhandledPredicates, pushedFilters, handledFilters) } /** * Convert RDD of Row into RDD of InternalRow with objects in catalyst types */ private[sql] def toCatalystRDD( relation: BaseRelation, output: Seq[Attribute], rdd: RDD[Row]): RDD[InternalRow] = { if (relation.needConversion) { val converters = RowEncoder(StructType.fromAttributes(output)) rdd.mapPartitions { iterator => iterator.map(converters.toRow) } } else { rdd.asInstanceOf[RDD[InternalRow]] } } } /** * Find the column name of an expression that can be pushed down. */ object PushableColumn { def unapply(e: Expression): Option[String] = { def helper(e: Expression) = e match { case a: Attribute => Some(a.name) case _ => None } helper(e) } }
goldmedal/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
Scala
apache-2.0
28,823
package org.apache.datacommons.prepbuddy.examples import org.apache.datacommons.prepbuddy.smoothers.SimpleMovingAverageMethod import org.apache.datacommons.prepbuddy.types.CSV import org.apache.spark.rdd.RDD import org.apache.spark.{SparkConf, SparkContext} object SmootherMain { def main(args: Array[String]) { if (args.length != 2) { System.out.println("--> FilePath with Assertion Result is Need To Be Specified") System.exit(0) } val conf: SparkConf = new SparkConf().setAppName("Smoother Main") val sc: SparkContext = new SparkContext(conf) val filePath: String = args(0) val expectedResultPath: String = args(1) val csvInput: RDD[String] = sc.textFile(filePath, 1) val movingAverage: SimpleMovingAverageMethod = new SimpleMovingAverageMethod(3) val smooth: Array[Double] = movingAverage.smooth(csvInput.map(CSV.parse(_).select(3))).collect() println("Smoother Count" + smooth.length) println("=========================================================") val expected: Array[Double] = sc.textFile(expectedResultPath).map(_.toDouble).collect() if (smooth sameElements expected) println("Assertion Successful") else println("Assertion Failed") sc.stop() } }
blpabhishek/prep-buddy
src/main/scala/org/apache/datacommons/prepbuddy/examples/SmootherMain.scala
Scala
apache-2.0
1,308
package com.portia import com.portia.algorithms.PageRank /** * Ranker main program * @author duytd */ object RankerMain { def main(args:Array[String]): Unit = { val ranker = new PageRank ranker.run() } }
duytd/blackspider
src/main/scala/com/portia/RankerMain.scala
Scala
gpl-3.0
221
package filodb.gateway.conversion import com.typesafe.scalalogging.StrictLogging import debox.Buffer import filodb.core.binaryrecord2.RecordBuilder import filodb.core.metadata.{Schema, Schemas} import filodb.memory.BinaryRegion import filodb.memory.format.UnsafeUtils import filodb.memory.format.vectors.{CustomBuckets, LongHistogram} /** * Base trait for common shard calculation and debug logic for all Influx Line Protocol based records for FiloDB */ trait InfluxRecord extends InputRecord { def bytes: Array[Byte] def kpiLen: Int def tagDelims: Buffer[Int] def fieldDelims: Buffer[Int] def fieldEnd: Int def schema: Schema def ts: Long import InfluxProtocolParser._ protected def endOfTags: Int = keyOffset(fieldDelims(0)) - 1 // Iterate through the tags for shard keys, extract values and calculate shard hash private var tagsShardHash = 7 val nonMetricShardValues = new collection.mutable.ArrayBuffer[String] { var nonMetricIndex = 0 parseKeyValues(bytes, tagDelims, endOfTags, new KVVisitor { def apply(bytes: Array[Byte], keyIndex: Int, keyLen: Int, valueIndex: Int, valueLen: Int): Unit = { if (nonMetricIndex < schema.options.nonMetricShardColumns.length) { val keyToCompare = schema.options.nonMetricShardKeyBytes(nonMetricIndex) if (BinaryRegion.equalBytes(bytes, keyIndex, keyLen, keyToCompare)) { // key match. Add value to nonMetricShardValues nonMetricShardValues += new String(bytes, valueIndex, valueLen) // calculate hash too nonMetricIndex += 1 val valueHash = BinaryRegion.hasher32.hash(bytes, valueIndex, valueLen, BinaryRegion.Seed) tagsShardHash = RecordBuilder.combineHash(tagsShardHash, valueHash) } } } }) } // WARNING: lots of allocation happening here override def toString: String = { s"""{ | measurement: ${new String(bytes, 0, kpiLen)} | ${tagDelims.length} tags: |${debugKeyValues(bytes, tagDelims, endOfTags)} | fields: |${debugKeyValues(bytes, fieldDelims, fieldEnd)} | time: $ts (${new org.joda.time.DateTime(ts)}) |}""".stripMargin } final def getMetric: String = new String(bytes, 0, kpiLen) final def shardKeyHash: Int = { val kpiHash = BinaryRegion.hasher32.hash(bytes, 0, kpiLen, BinaryRegion.Seed) RecordBuilder.combineHash(tagsShardHash, kpiHash) } // since they are sorted, just hash the entire tags together including delimiters val partitionKeyHash = { val firstTagIndex = keyOffset(tagDelims(0)) BinaryRegion.hasher32.hash(bytes, firstTagIndex, endOfTags - firstTagIndex, BinaryRegion.Seed) } } // Adds the parsed double, ignoring the field name, to the RecordBuilder, or NaN if it is not a double class SimpleDoubleAdder(builder: RecordBuilder) extends InfluxFieldVisitor { def doubleValue(bytes: Array[Byte], keyIndex: Int, keyLen: Int, value: Double): Unit = builder.addDouble(value) def stringValue(bytes: Array[Byte], keyIndex: Int, keyLen: Int, valueOffset: Int, valueLen: Int): Unit = builder.addDouble(Double.NaN) } /** * A Prom counter or gauge outputted using Telegraf Influx format with just one field * NOTE: Telegraf always sorts the tags so we don't need to do this * * We deduce counter or gauge based on the field name. Counters will have "counter" as the field name. * * @param bytes unescaped(parsed) bytes from raw text bytes * @param kpiLen the number of bytes taken by the KPI or Influx "measurement" field * @param tagDelims Buffer of tag delimiter offsets, one per tag value * @param fieldOffset byte array index of field key=value pair * @param fieldEnd the end offset of the fields * @param ts the UNIX epoch Long timestamp from the Influx record */ final case class InfluxPromSingleRecord(bytes: Array[Byte], kpiLen: Int, tagDelims: Buffer[Int], fieldDelims: Buffer[Int], fieldEnd: Int, ts: Long) extends InfluxRecord { require(fieldDelims.length == 1, s"Cannot use ${getClass.getName} with fieldDelims of length ${fieldDelims.length}") final def addToBuilder(builder: RecordBuilder): Unit = { // Add the timestamp and value first builder.startNewRecord(schema) builder.addLong(ts) InfluxProtocolParser.parseKeyValues(bytes, fieldDelims, fieldEnd, new SimpleDoubleAdder(builder)) // Add metric name, then the map/tags builder.addBlob(bytes, UnsafeUtils.arayOffset, kpiLen) builder.startMap() InfluxProtocolParser.parseKeyValues(bytes, tagDelims, endOfTags, new MapBuilderVisitor(builder)) builder.updatePartitionHash(partitionKeyHash) builder.endMap(false) builder.endRecord() } lazy val schema = { val counter = InfluxProtocolParser.firstKeyEquals(bytes, fieldDelims, InfluxProtocolParser.CounterKey) if (counter) Schemas.promCounter else Schemas.gauge } } object InfluxHistogramRecord extends StrictLogging { val sumLabel = "sum".getBytes val countLabel = "count".getBytes val infLabel = "+Inf".getBytes val leKey = "le".getBytes val leHash = BinaryRegion.hash32(leKey) val bucketSuffix = "bucket".getBytes val Underscore = '_'.toByte def copyMetricToBuffer(sourceBytes: Array[Byte], metricLen: Int): Unit = { // Only copy enough of metric to fit in underscore and longest suffix into metric buffer val bytesToCopy = Math.min(metricBufferSize - 1 - bucketSuffix.size, metricLen) System.arraycopy(sourceBytes, 0, metricBuffer, 0, bytesToCopy) metricBuffer(bytesToCopy) = Underscore } val _log = logger def addSuffixToMetricAndBuild(builder: RecordBuilder, baseMetricLen: Int, suffix: Array[Byte]): Unit = { BinaryRegion.copyArray(suffix, metricBuffer, baseMetricLen + 1) builder.addBlob(metricBuffer, UnsafeUtils.arayOffset, baseMetricLen + 1 + suffix.size) } // Per-thread buffer for metric name mangling val metricBufferLocal = new ThreadLocal[Array[Byte]]() val metricBufferSize = 256 def metricBuffer: Array[Byte] = { //scalastyle:off metricBufferLocal.get match { case null => val newBuf = new Array[Byte](metricBufferSize) metricBufferLocal.set(newBuf) newBuf case buffer: Array[Byte] => buffer } //scalastyle:on } } // Parses and sorts fields assuming they are buckets for histograms, to prepare for histogram // encoding and writing to BinaryRecord. The sum and count are also extracted. // To conserve memory, we keep arrays and do sorted insertion in place class HistogramFieldVisitor(numFields: Int) extends InfluxFieldVisitor { import InfluxHistogramRecord._ require(numFields >= 3, s"Not enough fields ($numFields) for histogram schema") var gotInf = false var sum = Double.NaN var count = Double.NaN val bucketTops = new Array[Double](numFields - 2) val bucketVals = new Array[Long](numFields - 2) var numBuckets = 0 def doubleValue(bytes: Array[Byte], keyIndex: Int, keyLen: Int, value: Double): Unit = { if (BinaryRegion.equalBytes(bytes, keyIndex, keyLen, sumLabel)) { sum = value } else if (BinaryRegion.equalBytes(bytes, keyIndex, keyLen, countLabel)) { count = value } else { // Assume it is a bucket. Convert the field bytes to a number val top = if (BinaryRegion.equalBytes(bytes, keyIndex, keyLen, infLabel)) { gotInf = true Double.PositiveInfinity } else { InfluxProtocolParser.parseDouble(bytes, keyIndex, keyLen) } // Find position to insert top and value in bucket. Buckets must be sorted val pos = if (numBuckets == 0) 0 else { val binSearchRes = java.util.Arrays.binarySearch(bucketTops, 0, numBuckets, top) if (binSearchRes < 0) (-binSearchRes - 1) else binSearchRes } // insert/shift over array elements and insert assert(numBuckets < (numFields - 2)) if (numBuckets > pos) { System.arraycopy(bucketTops, pos, bucketTops, pos + 1, numBuckets - pos) System.arraycopy(bucketVals, pos, bucketVals, pos + 1, numBuckets - pos) } bucketTops(pos) = top bucketVals(pos) = value.toLong numBuckets += 1 } } def stringValue(bytes: Array[Byte], keyIndex: Int, keyLen: Int, valueOffset: Int, valueLen: Int): Unit = { _log.warn(s"Got non numeric field in histogram record: key=${new String(bytes, keyIndex, keyLen)} " + s"value=[${new String(bytes, valueOffset, valueLen)}]\\nline=[${new String(bytes, 0, valueOffset + valueLen)}]") } } /** * A histogram outputted using Influx Line Protocol. One record contains multiple fields, one for each bucket * plus sum and count. * Much more efficient than Prom WriteRequest since it only has to compute shard hashes once. * Will be encoded as a single efficient BinaryRecord using FiloDB histogram schema. */ final case class InfluxHistogramRecord(bytes: Array[Byte], kpiLen: Int, tagDelims: Buffer[Int], fieldDelims: Buffer[Int], fieldEnd: Int, ts: Long) extends InfluxRecord { final def addToBuilder(builder: RecordBuilder): Unit = { // do some preprocessing: copy metric name to the thread local buffer // TODO: this would be needed in case it is not a histogram and we need to write a summary // InfluxPromHistogramRecord.copyMetricToBuffer(bytes, kpiLen) // Parse sorted buckets, sum, count from fields val visitor = new HistogramFieldVisitor(fieldDelims.length) InfluxProtocolParser.parseKeyValues(bytes, fieldDelims, fieldEnd, visitor) // Only create histogram record if we are able to parse above and it contains +Inf bucket // This also ensures that it's not a blank histogram, which cannot be ingested if (visitor.gotInf) { val buckets = CustomBuckets(visitor.bucketTops) val hist = LongHistogram(buckets, visitor.bucketVals) // Now, write out histogram builder.startNewRecord(Schemas.promHistogram) builder.addLong(ts) builder.addDouble(visitor.sum) builder.addDouble(visitor.count) builder.addBlob(hist.serialize()) // Add metric name, then the map/tags builder.addBlob(bytes, UnsafeUtils.arayOffset, kpiLen) builder.startMap() InfluxProtocolParser.parseKeyValues(bytes, tagDelims, endOfTags, new MapBuilderVisitor(builder)) builder.updatePartitionHash(partitionKeyHash) builder.endMap(false) builder.endRecord() } } def schema: Schema = Schemas.promHistogram }
tuplejump/FiloDB
gateway/src/main/scala/filodb/gateway/conversion/InfluxRecord.scala
Scala
apache-2.0
10,977
/*********************************************************************** * Copyright (c) 2013-2018 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.parquet import java.nio.ByteBuffer import java.util.{Date, UUID} import com.vividsolutions.jts.geom.Point import org.apache.parquet.io.api.{Binary, RecordConsumer} import org.locationtech.geomesa.features.serialization.ObjectType import org.locationtech.geomesa.features.serialization.ObjectType.ObjectType import org.opengis.feature.`type`.AttributeDescriptor /** * Writes a simple feature attribute to a Parquet file */ trait AttributeWriter { /** * Writes an attribute for the ith feature * @param recordConsumer the Parquet record consumer * @param value attribute value to write */ def apply(recordConsumer: RecordConsumer, value: AnyRef): Unit } object AttributeWriter { def apply(descriptor: AttributeDescriptor, index: Int): AttributeWriter = { val name = descriptor.getLocalName val classBinding = descriptor.getType.getBinding val bindings = ObjectType.selectType(classBinding, descriptor.getUserData) apply(name, index, bindings) } def apply(name: String, index: Int, bindings: Seq[ObjectType]): AttributeWriter = bindings.head match { // TODO linestrings and polygons https://geomesa.atlassian.net/browse/GEOMESA-1936 case ObjectType.GEOMETRY => new PointAttributeWriter(name, index) case ObjectType.DATE => new DateWriter(name, index) case ObjectType.DOUBLE => new DoubleWriter(name, index) case ObjectType.FLOAT => new FloatWriter(name, index) case ObjectType.INT => new IntegerWriter(name, index) case ObjectType.LONG => new LongWriter(name, index) case ObjectType.STRING => new StringWriter(name, index) case ObjectType.LIST => new ListWriter(name, index, bindings(1)) case ObjectType.MAP => new MapWriter(name, index, bindings(1), bindings(2)) case ObjectType.UUID => new UUIDWriter(name, index) } abstract class AbstractAttributeWriter(fieldName: String, fieldIndex: Int) extends AttributeWriter { def write(recordConsumer: RecordConsumer, value: AnyRef): Unit override def apply(recordConsumer: RecordConsumer, value: AnyRef): Unit = { if (value != null) { recordConsumer.startField(fieldName, fieldIndex) write(recordConsumer, value) recordConsumer.endField(fieldName, fieldIndex) } } } class PointAttributeWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { val pt = value.asInstanceOf[Point] recordConsumer.startGroup() recordConsumer.startField("x", 0) recordConsumer.addDouble(pt.getX) recordConsumer.endField("x", 0) recordConsumer.startField("y", 1) recordConsumer.addDouble(pt.getY) recordConsumer.endField("y", 1) recordConsumer.endGroup() } } class DateWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addLong(value.asInstanceOf[Date].getTime) } } class DoubleWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addDouble(value.asInstanceOf[java.lang.Double]) } } class FloatWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addFloat(value.asInstanceOf[java.lang.Float]) } } class IntegerWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addInteger(value.asInstanceOf[java.lang.Integer]) } } class LongWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addLong(value.asInstanceOf[Long]) } } class StringWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.addBinary(Binary.fromString(value.asInstanceOf[String])) } } class ListWriter(fieldName: String, fieldIndex: Int, valueType: ObjectType) extends AbstractAttributeWriter(fieldName, fieldIndex) { val elementWriter = AttributeWriter("element", 0, Seq(valueType)) override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.startGroup() val thelist = value.asInstanceOf[List[AnyRef]] if (thelist != null && thelist.nonEmpty) { recordConsumer.startField(fieldName, 0) thelist.foreach { e => recordConsumer.startGroup() if (e != null) { elementWriter(recordConsumer, e) } recordConsumer.endGroup() } recordConsumer.endField(fieldName, 0) } recordConsumer.endGroup() } } class MapWriter(fieldName: String, fieldIndex: Int, keyType: ObjectType, valueType: ObjectType) extends AbstractAttributeWriter(fieldName, fieldIndex) { val keyWriter = AttributeWriter("key", 0, Seq(keyType)) val valueWriter = AttributeWriter("value", 1, Seq(valueType)) override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { recordConsumer.startGroup() val themap = value.asInstanceOf[Map[AnyRef, AnyRef]] if (themap != null && themap.nonEmpty) { recordConsumer.startField(fieldName, 0) themap.foreach { case (k, v) => recordConsumer.startGroup() keyWriter(recordConsumer, k) if (v != null) { valueWriter(recordConsumer, v) } recordConsumer.endGroup() } recordConsumer.endField(fieldName, 0) } recordConsumer.endGroup() } } class UUIDWriter(fieldName: String, fieldIndex: Int) extends AbstractAttributeWriter(fieldName, fieldIndex) { override def write(recordConsumer: RecordConsumer, value: AnyRef): Unit = { val uuid = value.asInstanceOf[UUID] val bb = ByteBuffer.wrap(new Array[Byte](16)) bb.putLong(uuid.getMostSignificantBits) bb.putLong(uuid.getLeastSignificantBits) recordConsumer.addBinary(Binary.fromConstantByteArray(bb.array())) } } }
jahhulbert-ccri/geomesa
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-parquet/src/main/scala/org/locationtech/geomesa/parquet/AttributeWriter.scala
Scala
apache-2.0
7,189
package org.scaladebugger.api.profiles.traits.info.events import com.sun.jdi.{Location, ObjectReference, ThreadReference, VirtualMachine} import com.sun.jdi.event._ import com.sun.jdi.request.EventRequest import org.scaladebugger.test.helpers.ParallelMockFunSpec class MonitorEventSpec extends ParallelMockFunSpec { describe("MonitorEvent") { describe("#constructor") { it("should throw an exception if locatable is not a monitor event type") { intercept[IllegalArgumentException] { new MonitorEvent(mock[LocatableEvent]) } } } describe("#monitor") { it("should return MonitorContendedEnteredEvent's monitor") { val expected = mock[ObjectReference] val mockMonitorContendedEnteredEvent = mock[MonitorContendedEnteredEvent] val monitorEvent = new MonitorEvent(mockMonitorContendedEnteredEvent) (mockMonitorContendedEnteredEvent.monitor _).expects() .returning(expected).once() val actual = monitorEvent.monitor() actual should be (expected) } it("should return MonitorContendedEnterEvent's monitor") { val expected = mock[ObjectReference] val mockMonitorContendedEnterEvent = mock[MonitorContendedEnterEvent] val monitorEvent = new MonitorEvent(mockMonitorContendedEnterEvent) (mockMonitorContendedEnterEvent.monitor _).expects() .returning(expected).once() val actual = monitorEvent.monitor() actual should be (expected) } it("should return MonitorWaitedEvent's monitor") { val expected = mock[ObjectReference] val mockMonitorWaitedEvent = mock[MonitorWaitedEvent] val monitorEvent = new MonitorEvent(mockMonitorWaitedEvent) (mockMonitorWaitedEvent.monitor _).expects() .returning(expected).once() val actual = monitorEvent.monitor() actual should be (expected) } it("should return MonitorWaitEvent's monitor") { val expected = mock[ObjectReference] val mockMonitorWaitEvent = mock[MonitorWaitEvent] val monitorEvent = new MonitorEvent(mockMonitorWaitEvent) (mockMonitorWaitEvent.monitor _).expects() .returning(expected).once() val actual = monitorEvent.monitor() actual should be (expected) } } describe("#thread") { it("should return MonitorContendedEnteredEvent's thread") { val expected = mock[ThreadReference] val mockMonitorContendedEnteredEvent = mock[MonitorContendedEnteredEvent] val threadEvent = new MonitorEvent(mockMonitorContendedEnteredEvent) (mockMonitorContendedEnteredEvent.thread _).expects() .returning(expected).once() val actual = threadEvent.thread() actual should be (expected) } it("should return MonitorContendedEnterEvent's thread") { val expected = mock[ThreadReference] val mockMonitorContendedEnterEvent = mock[MonitorContendedEnterEvent] val threadEvent = new MonitorEvent(mockMonitorContendedEnterEvent) (mockMonitorContendedEnterEvent.thread _).expects() .returning(expected).once() val actual = threadEvent.thread() actual should be (expected) } it("should return MonitorWaitedEvent's thread") { val expected = mock[ThreadReference] val mockMonitorWaitedEvent = mock[MonitorWaitedEvent] val threadEvent = new MonitorEvent(mockMonitorWaitedEvent) (mockMonitorWaitedEvent.thread _).expects() .returning(expected).once() val actual = threadEvent.thread() actual should be (expected) } it("should return MonitorWaitEvent's thread") { val expected = mock[ThreadReference] val mockMonitorWaitEvent = mock[MonitorWaitEvent] val threadEvent = new MonitorEvent(mockMonitorWaitEvent) (mockMonitorWaitEvent.thread _).expects() .returning(expected).once() val actual = threadEvent.thread() actual should be (expected) } } describe("#location") { it("should return the locatable's location") { val expected = mock[Location] // NOTE: Need to provide a valid event type val mockMonitorWaitEvent = mock[MonitorWaitEvent] val threadEvent = new MonitorEvent(mockMonitorWaitEvent) (mockMonitorWaitEvent.location _).expects() .returning(expected).once() val actual = threadEvent.location() actual should be (expected) } } describe("#virtualMachine") { it("should return the locatable's virtual machine") { val expected = mock[VirtualMachine] // NOTE: Need to provide a valid event type val mockMonitorWaitEvent = mock[MonitorWaitEvent] val threadEvent = new MonitorEvent(mockMonitorWaitEvent) (mockMonitorWaitEvent.virtualMachine _).expects() .returning(expected).once() val actual = threadEvent.virtualMachine() actual should be (expected) } } describe("#request") { it("should return the locatable's request") { val expected = mock[EventRequest] // NOTE: Need to provide a valid event type val mockMonitorWaitEvent = mock[MonitorWaitEvent] val threadEvent = new MonitorEvent(mockMonitorWaitEvent) (mockMonitorWaitEvent.request _).expects() .returning(expected).once() val actual = threadEvent.request() actual should be (expected) } } } }
ensime/scala-debugger
scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/traits/info/events/MonitorEventSpec.scala
Scala
apache-2.0
5,597
package glasskey.spray import glasskey.model.OAuthErrorHelper import spray.httpx.SprayJsonSupport import spray.routing.HttpService trait OAuthRejectionUnwrapper extends HttpService with SprayJsonSupport with OAuthErrorHelper { import glasskey.model.ValidationError import glasskey.spray.model.OAuthRejection import glasskey.spray.resource.validation.PingValidationJsonProtocol._ import spray.http.StatusCodes.InternalServerError import spray.httpx.marshalling._ import spray.routing.{RejectionHandler, Route} implicit val tokenRejectionHandler = RejectionHandler { case OAuthRejection(wrappedEx, headers) :: _ => val error: ValidationError = wrappedEx complete((InternalServerError, error)) } // def jsonify(response: HttpResponse): HttpResponse = { // HttpResponse(status = StatusCodes.InternalServerError, entity = marshalUnsafe( // new ValidationError("server_error", response.entity.asString, Some(response.status.intValue)))) // } // implicit val apiRejectionHandler = RejectionHandler{ // case rejections => mapHttpResponse(jsonify) { // RejectionHandler.Default(rejections) // } // } override def timeoutRoute: Route = complete( (InternalServerError, marshal(new ValidationError("timeout", "spray-can timed out servicing the request.", Some(500))))) }
MonsantoCo/glass-key
glass-key-spray/src/main/scala/glasskey/spray/OAuthRejectionUnwrapper.scala
Scala
bsd-3-clause
1,345
/** * Copyright 2012-2014 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package securesocial.core import play.api.data.Form import play.api.data.Forms._ import securesocial.core._ import play.api.mvc.{ Result, Results, Request } import play.api.{ Logger, Play, Application } import Play.current import org.joda.time.DateTime import scala.concurrent.{ Await, Future } import play.api.libs.ws.WSResponse import play.Plugin import service.txbitsUserService import models.{ LogEvent, LogType, LogModel } object UsernamePasswordProvider { val sslEnabled: Boolean = { import Play.current val result = current.configuration.getBoolean("securesocial.ssl").getOrElse(false) if (!result && Play.isProd) { Logger.warn( "[securesocial] IMPORTANT: Play is running in production mode but you did not turn SSL on for SecureSocial." + "Not using SSL can make it really easy for an attacker to steal your users' credentials and/or the " + "authenticator cookie and gain access to the system." ) } result } val secondsToWait = { import scala.concurrent.duration._ 10.seconds } private val SendWelcomeEmailKey = "securesocial.userpass.sendWelcomeEmail" private val EnableTokenJob = "securesocial.userpass.enableTokenJob" val loginForm = Form( tuple( "username" -> nonEmptyText, "password" -> nonEmptyText ) ) lazy val sendWelcomeEmail = current.configuration.getBoolean(SendWelcomeEmailKey).getOrElse(true) lazy val enableTokenJob = current.configuration.getBoolean(EnableTokenJob).getOrElse(true) lazy val signupSkipLogin = false } /** * A token used for reset password and sign up operations * * @param uuid the token id * @param email the user email * @param creationTime the creation time * @param expirationTime the expiration time * @param isSignUp a boolean indicating wether the token was created for a sign up action or not */ case class Token(uuid: String, email: String, creationTime: DateTime, expirationTime: DateTime, isSignUp: Boolean, language: String) { def isExpired = expirationTime.isBeforeNow }
txbits/txbits
txbits/app/securesocial/core/UsernamePasswordProvider.scala
Scala
agpl-3.0
2,702
import java.io.File trait Base { val marker = new File("marker") }
mdedetrich/sbt
sbt/src/sbt-test/tests/test-quick/src/test/scala/Base.scala
Scala
bsd-3-clause
69
package org.jetbrains.plugins.scala.worksheet.processor import com.intellij.openapi.editor.Document import com.intellij.psi._ import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile private class WorksheetInterpretExprsIterator( file: ScalaFile, document: Document, lastProcessedLine: Option[Int] ) extends Iterator[Either[PsiErrorElement, PsiElement]] { import WorksheetInterpretExprsIterator._ private val firstElement = inReadAction { firstElementToProcess(lastProcessedLine)(file, document) } var current: PsiElement = firstElement.orNull override def hasNext: Boolean = current != null override def next(): Either[PsiErrorElement, PsiElement] = current match { case error: PsiErrorElement => current = null Left(error) case other => current = current.getNextSibling Right(other) } } private object WorksheetInterpretExprsIterator { private def firstElementToProcess(lastProcessedLine: Option[Int]) (implicit file: PsiFile, document: Document): Option[PsiElement] = lastProcessedLine match { case Some(line) => val topMost = topMostElementAtLine(line) topMost.flatMap(_.nextSibling) case None => Option(file.getFirstChild) } private def topMostElementAtLine(lastProcessedLine: Int) (implicit file: PsiFile, document: Document): Option[PsiElement] = for { element <- findElementAtLine(lastProcessedLine) topMost <- element.withParentsInFile.lastOption } yield topMost private def findElementAtLine(line: Int) (implicit file: PsiFile, document: Document): Option[PsiElement] = { val (start, end) = document.lineRange(line) Option(file.findElementAt((start + end) / 2)) } implicit class DocumentExt(private val document: Document) extends AnyVal { def lineRange(lineIdx: Int): (Int, Int) = (document.getLineStartOffset(lineIdx), document.getLineEndOffset(lineIdx)) } }
JetBrains/intellij-scala
scala/worksheet/src/org/jetbrains/plugins/scala/worksheet/processor/WorksheetInterpretExprsIterator.scala
Scala
apache-2.0
2,106
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.streaming.api.scala import org.apache.flink.annotation.{PublicEvolving, Public} import org.apache.flink.api.common.functions.CoGroupFunction import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.functions.KeySelector import org.apache.flink.api.java.typeutils.ResultTypeQueryable import org.apache.flink.streaming.api.datastream.{CoGroupedStreams => JavaCoGroupedStreams} import org.apache.flink.streaming.api.windowing.assigners.WindowAssigner import org.apache.flink.streaming.api.windowing.evictors.Evictor import org.apache.flink.streaming.api.windowing.triggers.Trigger import org.apache.flink.streaming.api.windowing.windows.Window import org.apache.flink.util.Collector import scala.collection.JavaConverters._ /** * `CoGroupedStreams` represents two [[DataStream]]s that have been co-grouped. * A streaming co-group operation is evaluated over elements in a window. * * To finalize the co-group operation you also need to specify a [[KeySelector]] for * both the first and second input and a [[WindowAssigner]] * * Note: Right now, the groups are being built in memory so you need to ensure that they don't * get too big. Otherwise the JVM might crash. * * Example: * * {{{ * val one: DataStream[(String, Int)] = ... * val two: DataStream[(String, Int)] = ... * * val result = one.coGroup(two) * .where(new MyFirstKeySelector()) * .equalTo(new MyFirstKeySelector()) * .window(TumblingEventTimeWindows.of(Time.of(5, TimeUnit.SECONDS))) * .apply(new MyCoGroupFunction()) * } }}} */ @Public class CoGroupedStreams[T1, T2](input1: DataStream[T1], input2: DataStream[T2]) { /** * Specifies a [[KeySelector]] for elements from the first input. */ def where[KEY: TypeInformation](keySelector: T1 => KEY): Where[KEY] = { val cleanFun = clean(keySelector) val keyType = implicitly[TypeInformation[KEY]] val javaSelector = new KeySelector[T1, KEY] with ResultTypeQueryable[KEY] { def getKey(in: T1) = cleanFun(in) override def getProducedType: TypeInformation[KEY] = keyType } new Where[KEY](javaSelector, keyType) } /** * A co-group operation that has [[KeySelector]]s defined for the first input. * * You need to specify a [[KeySelector]] for the second input using [[equalTo()]] * before you can proceeed with specifying a [[WindowAssigner]] using [[EqualTo.window()]]. * * @tparam KEY Type of the key. This must be the same for both inputs */ class Where[KEY](keySelector1: KeySelector[T1, KEY], keyType: TypeInformation[KEY]) { /** * Specifies a [[KeySelector]] for elements from the second input. */ def equalTo(keySelector: T2 => KEY): EqualTo = { val cleanFun = clean(keySelector) val localKeyType = keyType val javaSelector = new KeySelector[T2, KEY] with ResultTypeQueryable[KEY] { def getKey(in: T2) = cleanFun(in) override def getProducedType: TypeInformation[KEY] = localKeyType } new EqualTo(javaSelector) } /** * A co-group operation that a [[KeySelector]] defined for the first and the second input. * * A window can now be specified using [[window()]]. */ class EqualTo(keySelector2: KeySelector[T2, KEY]) { /** * Specifies the window on which the co-group operation works. */ @PublicEvolving def window[W <: Window]( assigner: WindowAssigner[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], W]) : WithWindow[W] = { if (keySelector1 == null || keySelector2 == null) { throw new UnsupportedOperationException( "You first need to specify KeySelectors for both inputs using where() and equalTo().") } new WithWindow[W](clean(assigner), null, null) } /** * A co-group operation that has [[KeySelector]]s defined for both inputs as * well as a [[WindowAssigner]]. * * @tparam W Type of { @link Window} on which the co-group operation works. */ @PublicEvolving class WithWindow[W <: Window]( windowAssigner: WindowAssigner[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], W], trigger: Trigger[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], _ >: W], evictor: Evictor[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], _ >: W]) { /** * Sets the [[Trigger]] that should be used to trigger window emission. */ @PublicEvolving def trigger(newTrigger: Trigger[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], _ >: W]) : WithWindow[W] = { new WithWindow[W](windowAssigner, newTrigger, evictor) } /** * Sets the [[Evictor]] that should be used to evict elements from a window before * emission. * * Note: When using an evictor window performance will degrade significantly, since * pre-aggregation of window results cannot be used. */ @PublicEvolving def evictor( newEvictor: Evictor[_ >: JavaCoGroupedStreams.TaggedUnion[T1, T2], _ >: W]) : WithWindow[W] = { new WithWindow[W](windowAssigner, trigger, newEvictor) } /** * Completes the co-group operation with the user function that is executed * for windowed groups. */ def apply[O: TypeInformation]( fun: (Iterator[T1], Iterator[T2]) => O): DataStream[O] = { require(fun != null, "CoGroup function must not be null.") val coGrouper = new CoGroupFunction[T1, T2, O] { val cleanFun = clean(fun) def coGroup( left: java.lang.Iterable[T1], right: java.lang.Iterable[T2], out: Collector[O]) = { out.collect(cleanFun(left.iterator().asScala, right.iterator().asScala)) } } apply(coGrouper) } /** * Completes the co-group operation with the user function that is executed * for windowed groups. */ def apply[O: TypeInformation]( fun: (Iterator[T1], Iterator[T2], Collector[O]) => Unit): DataStream[O] = { require(fun != null, "CoGroup function must not be null.") val coGrouper = new CoGroupFunction[T1, T2, O] { val cleanFun = clean(fun) def coGroup( left: java.lang.Iterable[T1], right: java.lang.Iterable[T2], out: Collector[O]) = { cleanFun(left.iterator.asScala, right.iterator.asScala, out) } } apply(coGrouper) } /** * Completes the co-group operation with the user function that is executed * for windowed groups. */ def apply[T: TypeInformation](function: CoGroupFunction[T1, T2, T]): DataStream[T] = { val coGroup = new JavaCoGroupedStreams[T1, T2](input1.javaStream, input2.javaStream) asScalaStream(coGroup .where(keySelector1) .equalTo(keySelector2) .window(windowAssigner) .trigger(trigger) .evictor(evictor) .apply(clean(function), implicitly[TypeInformation[T]])) } } } } /** * Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning * is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]]. */ private[flink] def clean[F <: AnyRef](f: F): F = { new StreamExecutionEnvironment(input1.javaStream.getExecutionEnvironment).scalaClean(f) } }
WangTaoTheTonic/flink
flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/CoGroupedStreams.scala
Scala
apache-2.0
8,456