code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package models.db import com.ponkotuy.tool.DiffCalc import scalikejdbc._ import com.ponkotuy.data /** * * @param instant : Instant Construction * @param develop : Development Material * @param revamping : Revamping(Upgrade) Material * @author ponkotuy * Date: 14/02/19. */ case class Material( id: Long, memberId: Long, fuel: Int, ammo: Int, steel: Int, bauxite: Int, instant: Int, bucket: Int, develop: Int, revamping: Int, created: Long) { def save()(implicit session: DBSession = Material.autoSession): Material = Material.save(this) def diff(x: data.Material): Double = { import DiffCalc._ Iterator( diffRatio(10000.0)(fuel, x.fuel), diffRatio(10000.0)(ammo, x.ammo), diffRatio(10000.0)(steel, x.steel), diffRatio(10000.0)(bauxite, x.bauxite), diffRatio(100.0)(instant, x.instant), diffRatio(100.0)(bucket, x.bucket), diffRatio(100.0)(develop, x.develop), diffRatio(100.0)(revamping, x.revamping) ).sum } def destroy()(implicit session: DBSession = Material.autoSession): Unit = Material.destroy(id)(session) } object Material extends SQLSyntaxSupport[Material] { lazy val m = Material.syntax("m") def apply(m: SyntaxProvider[Material])(rs: WrappedResultSet): Material = apply(m.resultName)(rs) def apply(m: ResultName[Material])(rs: WrappedResultSet): Material = autoConstruct(rs, m) def save(m: Material)(implicit session: DBSession = Material.autoSession): Material = { withSQL { update(Material).set( column.memberId -> m.memberId, column.fuel -> m.fuel, column.ammo -> m.ammo, column.steel -> m.steel, column.bauxite -> m.bauxite, column.instant -> m.instant, column.bucket -> m.bucket, column.develop -> m.develop, column.revamping -> m.revamping ) }.update() m } def create(m: data.Material, memberId: Long)( implicit session: DBSession = Material.autoSession): Long = { val created = System.currentTimeMillis() withSQL { insert.into(Material).namedValues( column.memberId -> memberId, column.fuel -> m.fuel, column.ammo -> m.ammo, column.steel -> m.steel, column.bauxite -> m.bauxite, column.instant -> m.instant, column.bucket -> m.bucket, column.develop -> m.develop, column.revamping -> m.revamping, column.created -> created ) }.updateAndReturnGeneratedKey().apply() } /** 指定ユーザの最新1件を取ってくる */ def findByUser(memberId: Long)(implicit session: DBSession = Material.autoSession): Option[Material] = withSQL { select.from(Material as m) .where.eq(m.memberId, memberId) .orderBy(m.created).desc .limit(1) }.map(Material(m)).toOption().apply() def findAllByUser(memberId: Long, from: Long = 0, to: Long = Long.MaxValue)( implicit session: DBSession = autoSession): List[Material] = { withSQL { select.from(Material as m) .where.eq(m.memberId, memberId).and.between(m.created, from, to) .orderBy(m.created).asc }.map(Material(m)).list().apply() } def destroy(id: Long)(implicit session: DBSession = autoSession): Unit = applyUpdate { delete.from(Material).where.eq(column.id, id) } }
nekoworkshop/MyFleetGirls
server/app/models/db/Material.scala
Scala
mit
3,231
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.{execution, DataFrame, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range, Repartition, RepartitionOperation, Union} import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecution} import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec} import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec} import org.apache.spark.sql.execution.exchange.{EnsureRequirements, REPARTITION_BY_COL, ReusedExchangeExec, ShuffleExchangeExec} import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} import org.apache.spark.sql.execution.reuse.ReuseExchangeAndSubquery import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { import testImplicits._ setupTestData() private val EnsureRequirements = new EnsureRequirements() private def testPartialAggregationPlan(query: LogicalPlan): Unit = { val planner = spark.sessionState.planner import planner._ val plannedOption = Aggregation(query).headOption val planned = plannedOption.getOrElse( fail(s"Could query play aggregation query $query. Is it an aggregation query?")) val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n } // For the new aggregation code path, there will be four aggregate operator for // distinct aggregations. assert( aggregations.size == 2 || aggregations.size == 4, s"The plan of query $query does not have partial aggregations.") } test("count is partially aggregated") { val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("count distinct is partially aggregated") { val query = testData.groupBy('value).agg(count_distinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("mixed aggregates are partially aggregated") { val query = testData.groupBy('value).agg(count('value), count_distinct('key)).queryExecution.analyzed testPartialAggregationPlan(query) } test("mixed aggregates with same distinct columns") { def assertNoExpand(plan: SparkPlan): Unit = { assert(plan.collect { case e: ExpandExec => e }.isEmpty) } withTempView("v") { Seq((1, 1.0, 1.0), (1, 2.0, 2.0)).toDF("i", "j", "k").createTempView("v") // one distinct column val query1 = sql("SELECT sum(DISTINCT j), max(DISTINCT j) FROM v GROUP BY i") assertNoExpand(query1.queryExecution.executedPlan) // 2 distinct columns val query2 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT j, k) FROM v GROUP BY i") assertNoExpand(query2.queryExecution.executedPlan) // 2 distinct columns with different order val query3 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT k, j) FROM v GROUP BY i") assertNoExpand(query3.queryExecution.executedPlan) } } test("sizeInBytes estimation of limit operator for broadcast hash join optimization") { def checkPlan(fieldTypes: Seq[DataType]): Unit = { withTempView("testLimit") { val fields = fieldTypes.zipWithIndex.map { case (dataType, index) => StructField(s"c${index}", dataType, true) } :+ StructField("key", IntegerType, true) val schema = StructType(fields) val row = Row.fromSeq(Seq.fill(fields.size)(null)) val rowRDD = sparkContext.parallelize(row :: Nil) spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("testLimit") val planned = sql( """ |SELECT l.a, l.b |FROM testData2 l JOIN (SELECT * FROM testLimit LIMIT 1) r ON (l.a = r.key) """.stripMargin).queryExecution.sparkPlan val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join } val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join } assert(broadcastHashJoins.size === 1, "Should use broadcast hash join") assert(sortMergeJoins.isEmpty, "Should not use sort merge join") } } val simpleTypes = NullType :: BooleanType :: ByteType :: ShortType :: IntegerType :: LongType :: FloatType :: DoubleType :: DecimalType(10, 5) :: DecimalType.SYSTEM_DEFAULT :: DateType :: TimestampType :: StringType :: BinaryType :: Nil withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "16434") { checkPlan(simpleTypes) } val complexTypes = ArrayType(DoubleType, true) :: ArrayType(StringType, false) :: MapType(IntegerType, StringType, true) :: MapType(IntegerType, ArrayType(DoubleType), false) :: StructType(Seq( StructField("a", IntegerType, nullable = true), StructField("b", ArrayType(DoubleType), nullable = false), StructField("c", DoubleType, nullable = false))) :: Nil withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "901617") { checkPlan(complexTypes) } } test("InMemoryRelation statistics propagation") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") { withTempView("tiny") { testData.limit(3).createOrReplaceTempView("tiny") sql("CACHE TABLE tiny") val a = testData.as("a") val b = spark.table("tiny").as("b") val planned = a.join(b, $"a.key" === $"b.key").queryExecution.sparkPlan val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join } val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join } assert(broadcastHashJoins.size === 1, "Should use broadcast hash join") assert(sortMergeJoins.isEmpty, "Should not use shuffled hash join") spark.catalog.clearCache() } } } test("SPARK-11390 explain should print PushedFilters of PhysicalRDD") { withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") { withTempPath { file => val path = file.getCanonicalPath testData.write.parquet(path) val df = spark.read.parquet(path) df.createOrReplaceTempView("testPushed") withTempView("testPushed") { val exp = sql("select * from testPushed where key = 15").queryExecution.sparkPlan assert(exp.toString.contains("PushedFilters: [IsNotNull(key), EqualTo(key,15)]")) } } } } test("efficient terminal limit -> sort should use TakeOrderedAndProject") { val query = testData.select('key, 'value).sort('key).limit(2) val planned = query.queryExecution.executedPlan assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec]) assert(planned.output === testData.select('key, 'value).logicalPlan.output) } test("terminal limit -> project -> sort should use TakeOrderedAndProject") { val query = testData.select('key, 'value).sort('key).select('value, 'key).limit(2) val planned = query.queryExecution.executedPlan assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec]) assert(planned.output === testData.select('value, 'key).logicalPlan.output) } test("terminal limits that are not handled by TakeOrderedAndProject should use CollectLimit") { val query = testData.select('value).limit(2) val planned = query.queryExecution.sparkPlan assert(planned.isInstanceOf[CollectLimitExec]) assert(planned.output === testData.select('value).logicalPlan.output) } test("TakeOrderedAndProject can appear in the middle of plans") { val query = testData.select('key, 'value).sort('key).limit(2).filter('key === 3) val planned = query.queryExecution.executedPlan assert(planned.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined) } test("CollectLimit can appear in the middle of a plan when caching is used") { val query = testData.select('key, 'value).limit(2).cache() val planned = query.queryExecution.optimizedPlan.asInstanceOf[InMemoryRelation] assert(planned.cachedPlan.isInstanceOf[CollectLimitExec]) } test("TakeOrderedAndProjectExec appears only when number of limit is below the threshold.") { withSQLConf(SQLConf.TOP_K_SORT_FALLBACK_THRESHOLD.key -> "1000") { val query0 = testData.select('value).orderBy('key).limit(100) val planned0 = query0.queryExecution.executedPlan assert(planned0.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined) val query1 = testData.select('value).orderBy('key).limit(2000) val planned1 = query1.queryExecution.executedPlan assert(planned1.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isEmpty) } } test("PartitioningCollection") { withTempView("normal", "small", "tiny") { testData.createOrReplaceTempView("normal") testData.limit(10).createOrReplaceTempView("small") testData.limit(3).createOrReplaceTempView("tiny") // Disable broadcast join withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { { val plan = sql( """ |SELECT * |FROM | normal JOIN small ON (normal.key = small.key) | JOIN tiny ON (small.key = tiny.key) """.stripMargin ).queryExecution.executedPlan val numExchanges = collect(plan) { case exchange: ShuffleExchangeExec => exchange }.length assert(numExchanges === 5) } { val plan = sql( """ |SELECT * |FROM | normal JOIN small ON (normal.key = small.key) | JOIN tiny ON (normal.key = tiny.key) """.stripMargin ).queryExecution.executedPlan // This second query joins on different keys: val numExchanges = collect(plan) { case exchange: ShuffleExchangeExec => exchange }.length assert(numExchanges === 5) } } } } test("collapse adjacent repartitions") { val doubleRepartitioned = testData.repartition(10).repartition(20).coalesce(5) def countRepartitions(plan: LogicalPlan): Int = plan.collect { case r: Repartition => r }.length assert(countRepartitions(doubleRepartitioned.queryExecution.analyzed) === 3) assert(countRepartitions(doubleRepartitioned.queryExecution.optimizedPlan) === 2) doubleRepartitioned.queryExecution.optimizedPlan match { case Repartition (numPartitions, shuffle, Repartition(_, shuffleChild, _)) => assert(numPartitions === 5) assert(shuffle === false) assert(shuffleChild) } } /////////////////////////////////////////////////////////////////////////// // Unit tests of EnsureRequirements for Exchange /////////////////////////////////////////////////////////////////////////// // When it comes to testing whether EnsureRequirements properly ensures distribution requirements, // there two dimensions that need to be considered: are the child partitionings compatible and // do they satisfy the distribution requirements? As a result, we need at least four test cases. private def assertDistributionRequirementsAreSatisfied(outputPlan: SparkPlan): Unit = { if (outputPlan.children.length > 1) { val childPartitionings = outputPlan.children.zip(outputPlan.requiredChildDistribution) .filter { case (_, UnspecifiedDistribution) => false case (_, _: BroadcastDistribution) => false case _ => true }.map(_._1.outputPartitioning) if (childPartitionings.map(_.numPartitions).toSet.size > 1) { fail(s"Partitionings doesn't have same number of partitions: $childPartitionings") } } outputPlan.children.zip(outputPlan.requiredChildDistribution).foreach { case (child, requiredDist) => assert(child.outputPartitioning.satisfies(requiredDist), s"$child output partitioning does not satisfy $requiredDist:\\n$outputPlan") } } test("EnsureRequirements with child partitionings with different numbers of output partitions") { val clustering = Literal(1) :: Nil val distribution = ClusteredDistribution(clustering) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 1)), DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 2)) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) } test("EnsureRequirements with compatible child partitionings that do not satisfy distribution") { val distribution = ClusteredDistribution(Literal(1) :: Nil) // The left and right inputs have compatible partitionings but they do not satisfy the // distribution because they are clustered on different columns. Thus, we need to shuffle. val childPartitioning = HashPartitioning(Literal(2) :: Nil, 1) assert(!childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = childPartitioning), DummySparkPlan(outputPartitioning = childPartitioning) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.isEmpty) { fail(s"Exchange should have been added:\\n$outputPlan") } } test("EnsureRequirements with compatible child partitionings that satisfy distribution") { // In this case, all requirements are satisfied and no exchange should be added. val distribution = ClusteredDistribution(Literal(1) :: Nil) val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = childPartitioning), DummySparkPlan(outputPartitioning = childPartitioning) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"Exchange should not have been added:\\n$outputPlan") } } // This is a regression test for SPARK-9703 test("EnsureRequirements should not repartition if only ordering requirement is unsatisfied") { // Consider an operator that imposes both output distribution and ordering requirements on its // children, such as sort merge join. If the distribution requirements are satisfied but // the output ordering requirements are unsatisfied, then the planner should only add sorts and // should not need to add additional shuffles / exchanges. val outputOrdering = Seq(SortOrder(Literal(1), Ascending)) val distribution = ClusteredDistribution(Literal(1) :: Nil) val inputPlan = DummySparkPlan( children = Seq( DummySparkPlan(outputPartitioning = SinglePartition), DummySparkPlan(outputPartitioning = SinglePartition) ), requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(outputOrdering, outputOrdering) ) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"No Exchanges should have been added:\\n$outputPlan") } } test("EnsureRequirements eliminates Exchange if child has same partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val partitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(partitioning.satisfies(distribution)) val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning)) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 2) { fail(s"Topmost Exchange should have been eliminated:\\n$outputPlan") } } test("EnsureRequirements does not eliminate Exchange with different partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val partitioning = HashPartitioning(Literal(2) :: Nil, 5) assert(!partitioning.satisfies(distribution)) val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning), REPARTITION_BY_COL) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 1) { fail(s"Topmost Exchange should not have been eliminated:\\n$outputPlan") } } test("EnsureRequirements should respect ClusteredDistribution's num partitioning") { val distribution = ClusteredDistribution(Literal(1) :: Nil, Some(13)) // Number of partitions differ val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 13) val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5) assert(!childPartitioning.satisfies(distribution)) val inputPlan = DummySparkPlan( children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil, requiredChildDistribution = Seq(distribution), requiredChildOrdering = Seq(Seq.empty)) val outputPlan = EnsureRequirements.apply(inputPlan) val shuffle = outputPlan.collect { case e: ShuffleExchangeExec => e } assert(shuffle.size === 1) assert(shuffle.head.outputPartitioning === finalPartitioning) } test("Reuse exchanges") { val distribution = ClusteredDistribution(Literal(1) :: Nil) val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 5) val childPartitioning = HashPartitioning(Literal(2) :: Nil, 5) assert(!childPartitioning.satisfies(distribution)) val shuffle = ShuffleExchangeExec(finalPartitioning, DummySparkPlan( children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil, requiredChildDistribution = Seq(distribution), requiredChildOrdering = Seq(Seq.empty))) val inputPlan = SortMergeJoinExec( Literal(1) :: Nil, Literal(1) :: Nil, Inner, None, shuffle, shuffle.copy()) val outputPlan = ReuseExchangeAndSubquery.apply(inputPlan) if (outputPlan.collect { case e: ReusedExchangeExec => true }.size != 1) { fail(s"Should re-use the shuffle:\\n$outputPlan") } if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size != 1) { fail(s"Should have only one shuffle:\\n$outputPlan") } // nested exchanges val inputPlan2 = SortMergeJoinExec( Literal(1) :: Nil, Literal(1) :: Nil, Inner, None, ShuffleExchangeExec(finalPartitioning, inputPlan), ShuffleExchangeExec(finalPartitioning, inputPlan)) val outputPlan2 = ReuseExchangeAndSubquery.apply(inputPlan2) if (outputPlan2.collect { case e: ReusedExchangeExec => true }.size != 2) { fail(s"Should re-use the two shuffles:\\n$outputPlan2") } if (outputPlan2.collect { case e: ShuffleExchangeExec => true }.size != 2) { fail(s"Should have only two shuffles:\\n$outputPlan") } } /////////////////////////////////////////////////////////////////////////// // Unit tests of EnsureRequirements for Sort /////////////////////////////////////////////////////////////////////////// private val exprA = Literal(1) private val exprB = Literal(2) private val exprC = Literal(3) private val orderingA = SortOrder(exprA, Ascending) private val orderingB = SortOrder(exprB, Ascending) private val orderingC = SortOrder(exprC, Ascending) private val planA = DummySparkPlan(outputOrdering = Seq(orderingA), outputPartitioning = HashPartitioning(exprA :: Nil, 5)) private val planB = DummySparkPlan(outputOrdering = Seq(orderingB), outputPartitioning = HashPartitioning(exprB :: Nil, 5)) private val planC = DummySparkPlan(outputOrdering = Seq(orderingC), outputPartitioning = HashPartitioning(exprC :: Nil, 5)) assert(orderingA != orderingB && orderingA != orderingC && orderingB != orderingC) private def assertSortRequirementsAreSatisfied( childPlan: SparkPlan, requiredOrdering: Seq[SortOrder], shouldHaveSort: Boolean): Unit = { val inputPlan = DummySparkPlan( children = childPlan :: Nil, requiredChildOrdering = Seq(requiredOrdering), requiredChildDistribution = Seq(UnspecifiedDistribution) ) val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (shouldHaveSort) { if (outputPlan.collect { case s: SortExec => true }.isEmpty) { fail(s"Sort should have been added:\\n$outputPlan") } } else { if (outputPlan.collect { case s: SortExec => true }.nonEmpty) { fail(s"No sorts should have been added:\\n$outputPlan") } } } test("EnsureRequirements skips sort when either side of join keys is required after inner SMJ") { Seq(Inner, Cross).foreach { joinType => val innerSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB) // Both left and right keys should be sorted after the SMJ. Seq(orderingA, orderingB).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = innerSmj, requiredOrdering = Seq(ordering), shouldHaveSort = false) } } } test("EnsureRequirements skips sort when key order of a parent SMJ is propagated from its " + "child SMJ") { Seq(Inner, Cross).foreach { joinType => val childSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB) val parentSmj = SortMergeJoinExec(exprB :: Nil, exprC :: Nil, joinType, None, childSmj, planC) // After the second SMJ, exprA, exprB and exprC should all be sorted. Seq(orderingA, orderingB, orderingC).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = parentSmj, requiredOrdering = Seq(ordering), shouldHaveSort = false) } } } test("EnsureRequirements for sort operator after left outer sort merge join") { // Only left key is sorted after left outer SMJ (thus doesn't need a sort). val leftSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, LeftOuter, None, planA, planB) Seq((orderingA, false), (orderingB, true)).foreach { case (ordering, needSort) => assertSortRequirementsAreSatisfied( childPlan = leftSmj, requiredOrdering = Seq(ordering), shouldHaveSort = needSort) } } test("EnsureRequirements for sort operator after right outer sort merge join") { // Only right key is sorted after right outer SMJ (thus doesn't need a sort). val rightSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, RightOuter, None, planA, planB) Seq((orderingA, true), (orderingB, false)).foreach { case (ordering, needSort) => assertSortRequirementsAreSatisfied( childPlan = rightSmj, requiredOrdering = Seq(ordering), shouldHaveSort = needSort) } } test("EnsureRequirements adds sort after full outer sort merge join") { // Neither keys is sorted after full outer SMJ, so they both need sorts. val fullSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, FullOuter, None, planA, planB) Seq(orderingA, orderingB).foreach { ordering => assertSortRequirementsAreSatisfied( childPlan = fullSmj, requiredOrdering = Seq(ordering), shouldHaveSort = true) } } test("EnsureRequirements adds sort when there is no existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq.empty), requiredOrdering = Seq(orderingB), shouldHaveSort = true) } test("EnsureRequirements skips sort when required ordering is prefix of existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA, orderingB)), requiredOrdering = Seq(orderingA), shouldHaveSort = false) } test("EnsureRequirements skips sort when required ordering is semantically equal to " + "existing ordering") { val exprId: ExprId = NamedExpression.newExprId val attribute1 = AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId, qualifier = Seq("col1_qualifier") ) val attribute2 = AttributeReference( name = "col1", dataType = LongType, nullable = false ) (exprId = exprId) val orderingA1 = SortOrder(attribute1, Ascending) val orderingA2 = SortOrder(attribute2, Ascending) assert(orderingA1 != orderingA2, s"$orderingA1 should NOT equal to $orderingA2") assert(orderingA1.semanticEquals(orderingA2), s"$orderingA1 should be semantically equal to $orderingA2") assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA1)), requiredOrdering = Seq(orderingA2), shouldHaveSort = false) } // This is a regression test for SPARK-11135 test("EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering") { assertSortRequirementsAreSatisfied( childPlan = DummySparkPlan(outputOrdering = Seq(orderingA)), requiredOrdering = Seq(orderingA, orderingB), shouldHaveSort = true) } test("SPARK-24242: RangeExec should have correct output ordering and partitioning") { val df = spark.range(10) val rangeExec = df.queryExecution.executedPlan.collect { case r: RangeExec => r } val range = df.queryExecution.optimizedPlan.collect { case r: Range => r } assert(rangeExec.head.outputOrdering == range.head.outputOrdering) assert(rangeExec.head.outputPartitioning == RangePartitioning(rangeExec.head.outputOrdering, df.rdd.getNumPartitions)) val rangeInOnePartition = spark.range(1, 10, 1, 1) val rangeExecInOnePartition = rangeInOnePartition.queryExecution.executedPlan.collect { case r: RangeExec => r } assert(rangeExecInOnePartition.head.outputPartitioning == SinglePartition) val rangeInZeroPartition = spark.range(-10, -9, -20, 1) val rangeExecInZeroPartition = rangeInZeroPartition.queryExecution.executedPlan.collect { case r: RangeExec => r } assert(rangeExecInZeroPartition.head.outputPartitioning == UnknownPartitioning(0)) } test("SPARK-24495: EnsureRequirements can return wrong plan when reusing the same key in join") { val plan1 = DummySparkPlan(outputOrdering = Seq(orderingA), outputPartitioning = HashPartitioning(exprA :: exprA :: Nil, 5)) val plan2 = DummySparkPlan(outputOrdering = Seq(orderingB), outputPartitioning = HashPartitioning(exprB :: Nil, 5)) val smjExec = SortMergeJoinExec( exprA :: exprA :: Nil, exprB :: exprC :: Nil, Inner, None, plan1, plan2) val outputPlan = EnsureRequirements.apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, _, _, _) => assert(leftKeys == Seq(exprA, exprA)) assert(rightKeys == Seq(exprB, exprC)) case _ => fail() } } test("SPARK-27485: EnsureRequirements.reorder should handle duplicate expressions") { val plan1 = DummySparkPlan( outputPartitioning = HashPartitioning(exprA :: exprB :: exprA :: Nil, 5)) val plan2 = DummySparkPlan() val smjExec = SortMergeJoinExec( leftKeys = exprA :: exprB :: exprB :: Nil, rightKeys = exprA :: exprC :: exprC :: Nil, joinType = Inner, condition = None, left = plan1, right = plan2) val outputPlan = EnsureRequirements.apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, SortExec(_, _, ShuffleExchangeExec(HashPartitioning(leftPartitioningExpressions, _), _, _), _), SortExec(_, _, ShuffleExchangeExec(HashPartitioning(rightPartitioningExpressions, _), _, _), _), _) => assert(leftKeys === smjExec.leftKeys) assert(rightKeys === smjExec.rightKeys) assert(leftKeys === leftPartitioningExpressions) assert(rightKeys === rightPartitioningExpressions) case _ => fail(outputPlan.toString) } } test("SPARK-24500: create union with stream of children") { val df = Union(Stream( Range(1, 1, 1, 1), Range(1, 2, 1, 1))) df.queryExecution.executedPlan.execute() } test("SPARK-25278: physical nodes should be different instances for same logical nodes") { val range = Range(1, 1, 1, 1) val df = Union(range, range) val ranges = df.queryExecution.optimizedPlan.collect { case r: Range => r } assert(ranges.length == 2) val execRanges = df.queryExecution.sparkPlan.collect { case r: RangeExec => r } assert(execRanges.length == 2) // Ensure the two RangeExec instances are different instances assert(!execRanges.head.eq(execRanges.last)) } test("SPARK-24556: always rewrite output partitioning in ReusedExchangeExec " + "and InMemoryTableScanExec", DisableAdaptiveExecution("Reuse is dynamic in AQE")) { def checkOutputPartitioningRewrite( plans: Seq[SparkPlan], expectedPartitioningClass: Class[_]): Unit = { assert(plans.size == 1) val plan = plans.head val partitioning = plan.outputPartitioning assert(partitioning.getClass == expectedPartitioningClass) val partitionedAttrs = partitioning.asInstanceOf[Expression].references assert(partitionedAttrs.subsetOf(plan.outputSet)) } def checkReusedExchangeOutputPartitioningRewrite( df: DataFrame, expectedPartitioningClass: Class[_]): Unit = { val reusedExchange = collect(df.queryExecution.executedPlan) { case r: ReusedExchangeExec => r } checkOutputPartitioningRewrite(reusedExchange, expectedPartitioningClass) } def checkInMemoryTableScanOutputPartitioningRewrite( df: DataFrame, expectedPartitioningClass: Class[_]): Unit = { val inMemoryScan = collect(df.queryExecution.executedPlan) { case m: InMemoryTableScanExec => m } checkOutputPartitioningRewrite(inMemoryScan, expectedPartitioningClass) } // when enable AQE, the reusedExchange is inserted when executed. withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { // ReusedExchange is HashPartitioning val df1 = Seq(1 -> "a", 2 -> "b").toDF("i", "j").repartition($"i") val df2 = Seq(1 -> "a", 2 -> "b").toDF("i", "j").repartition($"i") checkReusedExchangeOutputPartitioningRewrite(df1.union(df2), classOf[HashPartitioning]) // ReusedExchange is RangePartitioning val df3 = Seq(1 -> "a", 2 -> "b").toDF("i", "j").orderBy($"i") val df4 = Seq(1 -> "a", 2 -> "b").toDF("i", "j").orderBy($"i") checkReusedExchangeOutputPartitioningRewrite(df3.union(df4), classOf[RangePartitioning]) // InMemoryTableScan is HashPartitioning Seq(1 -> "a", 2 -> "b").toDF("i", "j").repartition($"i").persist() checkInMemoryTableScanOutputPartitioningRewrite( Seq(1 -> "a", 2 -> "b").toDF("i", "j").repartition($"i"), classOf[HashPartitioning]) // InMemoryTableScan is RangePartitioning spark.range(1, 100, 1, 10).toDF().persist() checkInMemoryTableScanOutputPartitioningRewrite( spark.range(1, 100, 1, 10).toDF(), classOf[RangePartitioning]) } // InMemoryTableScan is PartitioningCollection withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { Seq(1 -> "a", 2 -> "b").toDF("i", "j") .join(Seq(1 -> "a", 2 -> "b").toDF("m", "n"), $"i" === $"m").persist() checkInMemoryTableScanOutputPartitioningRewrite( Seq(1 -> "a", 2 -> "b").toDF("i", "j") .join(Seq(1 -> "a", 2 -> "b").toDF("m", "n"), $"i" === $"m"), classOf[PartitioningCollection]) } } test("SPARK-26812: wrong nullability for complex datatypes in union") { def testUnionOutputType(input1: DataType, input2: DataType, output: DataType): Unit = { val query = Union( LocalRelation(StructField("a", input1)), LocalRelation(StructField("a", input2))) assert(query.output.head.dataType == output) } // Map testUnionOutputType( MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = true), MapType(StringType, StringType, valueContainsNull = true)) testUnionOutputType( MapType(StringType, StringType, valueContainsNull = true), MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = true)) testUnionOutputType( MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = false), MapType(StringType, StringType, valueContainsNull = false)) // Array testUnionOutputType( ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = true), ArrayType(StringType, containsNull = true)) testUnionOutputType( ArrayType(StringType, containsNull = true), ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = true)) testUnionOutputType( ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = false), ArrayType(StringType, containsNull = false)) // Struct testUnionOutputType( StructType(Seq( StructField("f1", StringType, nullable = false), StructField("f2", StringType, nullable = true), StructField("f3", StringType, nullable = false))), StructType(Seq( StructField("f1", StringType, nullable = true), StructField("f2", StringType, nullable = false), StructField("f3", StringType, nullable = false))), StructType(Seq( StructField("f1", StringType, nullable = true), StructField("f2", StringType, nullable = true), StructField("f3", StringType, nullable = false)))) } test("Do not analyze subqueries twice") { // Analyzing the subquery twice will result in stacked // CheckOverflow & PromotePrecision expressions. val df = sql( """ |SELECT id, | (SELECT 1.3000000 * AVG(CAST(id AS DECIMAL(10, 3))) FROM range(13)) AS ref |FROM range(5) |""".stripMargin) val Seq(subquery) = stripAQEPlan(df.queryExecution.executedPlan).subqueriesAll subquery.foreach { node => node.expressions.foreach { expression => expression.foreach { case PromotePrecision(_: PromotePrecision) => fail(s"$expression contains stacked PromotePrecision expressions.") case CheckOverflow(_: CheckOverflow, _, _) => fail(s"$expression contains stacked CheckOverflow expressions.") case _ => // Ok } } } } test("aliases in the project should not introduce extra shuffle") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("df1", "df2") { spark.range(10).selectExpr("id AS key", "0").repartition($"key").createTempView("df1") spark.range(20).selectExpr("id AS key", "0").repartition($"key").createTempView("df2") val planned = sql( """ |SELECT * FROM | (SELECT key AS k from df1) t1 |INNER JOIN | (SELECT key AS k from df2) t2 |ON t1.k = t2.k """.stripMargin).queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } } test("SPARK-33399: aliases should be handled properly in PartitioningCollection output" + " partitioning") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("t1", "t2", "t3") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") spark.range(30).repartition($"id").createTempView("t3") val planned = sql( """ |SELECT t3.id as t3id |FROM ( | SELECT t1.id as t1id, t2.id as t2id | FROM t1, t2 | WHERE t1.id = t2.id |) t12, t3 |WHERE t1id = t3.id """.stripMargin).queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 3) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" => true case _ => false })) } } } test("SPARK-33399: aliases should be handled properly in HashPartitioning") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("t1", "t2", "t3") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") spark.range(30).repartition($"id").createTempView("t3") val planned = sql( """ |SELECT t1id, t3.id as t3id |FROM ( | SELECT t1.id as t1id | FROM t1 LEFT SEMI JOIN t2 | ON t1.id = t2.id |) t12 INNER JOIN t3 |WHERE t1id = t3.id """.stripMargin).queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 3) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case HashPartitioning(Seq(a: AttributeReference), _) => a.name == "t1id" case _ => false })) } } } test("SPARK-33399: alias handling should happen properly for RangePartitioning") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { val df = spark.range(1, 100) .select(col("id").as("id1")).groupBy("id1").count() // Plan for this will be Range -> ProjectWithAlias -> HashAggregate -> HashAggregate // if Project normalizes alias in its Range outputPartitioning, then no Exchange should come // in between HashAggregates val planned = df.queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.isEmpty) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case RangePartitioning(Seq(SortOrder(ar: AttributeReference, _, _, _)), _) => ar.name == "id1" case _ => false })) } } test("SPARK-33399: aliased should be handled properly " + "for partitioning and sortorder involving complex expressions") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("t1", "t2", "t3") { spark.range(10).select(col("id").as("id1")).createTempView("t1") spark.range(20).select(col("id").as("id2")).createTempView("t2") spark.range(30).select(col("id").as("id3")).createTempView("t3") val planned = sql( """ |SELECT t3.id3 as t3id |FROM ( | SELECT t1.id1 as t1id, t2.id2 as t2id | FROM t1, t2 | WHERE t1.id1 * 10 = t2.id2 * 10 |) t12, t3 |WHERE t1id * 10 = t3.id3 * 10 """.stripMargin).queryExecution.executedPlan val sortNodes = collect(planned) { case s: SortExec => s } assert(sortNodes.size == 3) val exchangeNodes = collect(planned) { case e: ShuffleExchangeExec => e } assert(exchangeNodes.size == 3) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case HashPartitioning(Seq(Multiply(ar1: AttributeReference, _, _)), _) => ar1.name == "t1id" case _ => false })) } } } test("SPARK-33399: alias handling should happen properly for SinglePartition") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { val df = spark.range(1, 100, 1, 1) .select(col("id").as("id1")).groupBy("id1").count() val planned = df.queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.isEmpty) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case SinglePartition => true case _ => false })) } } test("SPARK-33399: No extra exchanges in case of" + " [Inner Join -> Project with aliases -> HashAggregate]") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("t1", "t2") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") val planned = sql( """ |SELECT t1id, t2id |FROM ( | SELECT t1.id as t1id, t2.id as t2id | FROM t1 INNER JOIN t2 | WHERE t1.id = t2.id |) t12 |GROUP BY t1id, t2id """.stripMargin).queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputPartitioning match { case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _), HashPartitioning(Seq(k2: AttributeReference), _))) => k1.name == "t1id" && k2.name == "t2id" case _ => false })) } } } test("SPARK-33400: Normalization of sortOrder should take care of sameOrderExprs") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("t1", "t2", "t3") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") spark.range(30).repartition($"id").createTempView("t3") val planned = sql( """ |SELECT t2id, t3.id as t3id |FROM ( | SELECT t1.id as t1id, t2.id as t2id | FROM t1, t2 | WHERE t1.id = t2.id |) t12, t3 |WHERE t2id = t3.id """.stripMargin).queryExecution.executedPlan val sortNodes = collect(planned) { case s: SortExec => s } assert(sortNodes.size == 3) val projects = collect(planned) { case p: ProjectExec => p } assert(projects.exists(_.outputOrdering match { case Seq(SortOrder(_, Ascending, NullsFirst, sameOrderExprs)) => sameOrderExprs.size == 1 && sameOrderExprs.head.isInstanceOf[AttributeReference] && sameOrderExprs.head.asInstanceOf[AttributeReference].name == "t2id" case _ => false })) } } } test("sort order doesn't have repeated expressions") { withSQLConf( SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { withTempView("t1", "t2") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") val planned = sql( """ | SELECT t12.id, t1.id | FROM (SELECT t1.id FROM t1, t2 WHERE t1.id * 2 = t2.id) t12, t1 | where 2 * t12.id = t1.id """.stripMargin).queryExecution.executedPlan // t12 is already sorted on `t1.id * 2`. and we need to sort it on `2 * t12.id` // for 2nd join. So sorting on t12 can be avoided val sortNodes = planned.collect { case s: SortExec => s } assert(sortNodes.size == 3) val outputOrdering = planned.outputOrdering assert(outputOrdering.size == 1) // Sort order should have 3 childrens, not 4. This is because t1.id*2 and 2*t1.id are same assert(outputOrdering.head.children.size == 3) assert(outputOrdering.head.children.count(_.isInstanceOf[AttributeReference]) == 2) assert(outputOrdering.head.children.count(_.isInstanceOf[Multiply]) == 1) } } } test("aliases to expressions should not be replaced") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("df1", "df2") { spark.range(10).selectExpr("id AS key", "0").repartition($"key").createTempView("df1") spark.range(20).selectExpr("id AS key", "0").repartition($"key").createTempView("df2") val planned = sql( """ |SELECT * FROM | (SELECT key + 1 AS k1 from df1) t1 |INNER JOIN | (SELECT key + 1 AS k2 from df2) t2 |ON t1.k1 = t2.k2 |""".stripMargin).queryExecution.executedPlan val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } // Make sure aliases to an expression (key + 1) are not replaced. Seq("k1", "k2").foreach { alias => assert(exchanges.exists(_.outputPartitioning match { case HashPartitioning(Seq(a: AttributeReference), _) => a.name == alias case _ => false })) } } } } test("aliases in the aggregate expressions should not introduce extra shuffle") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { val t1 = spark.range(10).selectExpr("floor(id/4) as k1") val t2 = spark.range(20).selectExpr("floor(id/4) as k2") val agg1 = t1.groupBy("k1").agg(count(lit("1")).as("cnt1")) val agg2 = t2.groupBy("k2").agg(count(lit("1")).as("cnt2")).withColumnRenamed("k2", "k3") val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan assert(collect(planned) { case h: HashAggregateExec => h }.nonEmpty) val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } test("aliases in the object hash/sort aggregate expressions should not introduce extra shuffle") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { Seq(true, false).foreach { useObjectHashAgg => withSQLConf(SQLConf.USE_OBJECT_HASH_AGG.key -> useObjectHashAgg.toString) { val t1 = spark.range(10).selectExpr("floor(id/4) as k1") val t2 = spark.range(20).selectExpr("floor(id/4) as k2") val agg1 = t1.groupBy("k1").agg(collect_list("k1")) val agg2 = t2.groupBy("k2").agg(collect_list("k2")).withColumnRenamed("k2", "k3") val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan if (useObjectHashAgg) { assert(collect(planned) { case o: ObjectHashAggregateExec => o }.nonEmpty) } else { assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty) } val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } } } test("aliases in the sort aggregate expressions should not introduce extra sort") { withSQLConf( SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", SQLConf.USE_OBJECT_HASH_AGG.key -> "false") { val t1 = spark.range(10).selectExpr("floor(id/4) as k1") val t2 = spark.range(20).selectExpr("floor(id/4) as k2") val agg1 = t1.groupBy("k1").agg(collect_list("k1")).withColumnRenamed("k1", "k3") val agg2 = t2.groupBy("k2").agg(collect_list("k2")) val planned = agg1.join(agg2, $"k3" === $"k2").queryExecution.executedPlan assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty) // We expect two SortExec nodes on each side of join. val sorts = collect(planned) { case s: SortExec => s } assert(sorts.size == 4) } } testWithWholeStageCodegenOnAndOff("Change the number of partitions to zero " + "when a range is empty") { _ => val range = spark.range(1, 1, 1, 1000) val numPartitions = range.rdd.getNumPartitions assert(numPartitions == 0) } test("SPARK-33758: Prune unnecessary output partitioning") { withSQLConf( SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { withTempView("t1", "t2") { spark.range(10).repartition($"id").createTempView("t1") spark.range(20).repartition($"id").createTempView("t2") val planned = sql( """ | SELECT t1.id as t1id, t2.id as t2id | FROM t1, t2 | WHERE t1.id = t2.id """.stripMargin).queryExecution.executedPlan assert(planned.outputPartitioning match { case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _), HashPartitioning(Seq(k2: AttributeReference), _))) => k1.name == "t1id" && k2.name == "t2id" }) val planned2 = sql( """ | SELECT t1.id as t1id | FROM t1, t2 | WHERE t1.id = t2.id """.stripMargin).queryExecution.executedPlan assert(planned2.outputPartitioning match { case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" => true }) } } } test("SPARK-34919: Change partitioning to SinglePartition if partition number is 1") { def checkSinglePartitioning(df: DataFrame): Unit = { assert( df.queryExecution.analyzed.collect { case r: RepartitionOperation => r }.size == 1) assert( collect(df.queryExecution.executedPlan) { case s: ShuffleExchangeExec if s.outputPartitioning == SinglePartition => s }.size == 1) } checkSinglePartitioning(sql("SELECT /*+ REPARTITION(1) */ * FROM VALUES(1),(2),(3) AS t(c)")) checkSinglePartitioning(sql("SELECT /*+ REPARTITION(1, c) */ * FROM VALUES(1),(2),(3) AS t(c)")) } } // Used for unit-testing EnsureRequirements private case class DummySparkPlan( override val children: Seq[SparkPlan] = Nil, override val outputOrdering: Seq[SortOrder] = Nil, override val outputPartitioning: Partitioning = UnknownPartitioning(0), override val requiredChildDistribution: Seq[Distribution] = Nil, override val requiredChildOrdering: Seq[Seq[SortOrder]] = Nil ) extends SparkPlan { override protected def doExecute(): RDD[InternalRow] = throw new UnsupportedOperationException override def output: Seq[Attribute] = Seq.empty override protected def withNewChildrenInternal(newChildren: IndexedSeq[SparkPlan]): SparkPlan = copy(children = newChildren) }
hvanhovell/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
Scala
apache-2.0
53,116
package io.questions.model.questionnaire import java.time._ import io.questions.QuestionsSpec import io.questions.model.questionnaire.PrimitiveAnswer.{ DateTimeAnswer, EnumerationAnswer, StringAnswer } import org.scalacheck.Gen class PrimitiveAnswerSpec extends QuestionsSpec { "valueEquals" - { "answers equal as expected" in { forAll(primitiveAnswerGen, primitiveAnswerGen) { (a1, a2) ⇒ whenever(a1.getClass !== a2.getClass) { PrimitiveAnswer.valueEquals(a1, a1).right.value mustBe true PrimitiveAnswer.valueEquals(a2, a2).right.value mustBe true // we must consider a special case used for predicate construction (a1, a2) match { case (s1: StringAnswer, s2: EnumerationAnswer) ⇒ PrimitiveAnswer.valueEquals(a1, a2).right.value mustBe (s1.answer === s2.answer) case (s1: EnumerationAnswer, s2: StringAnswer) ⇒ PrimitiveAnswer.valueEquals(a1, a2).right.value mustBe (s1.answer === s2.answer) case _ ⇒ PrimitiveAnswer.valueEquals(a1, a2).left.value mustBe s"Mismatched types in valueEquals: ${a1.getClass} && ${a2.getClass}" } } } } "string and enumeration answers can be compared via valueEquals to facilitate predicate construction" in { forAll(stringAnswerGen, enumerationAnswerGen) { (a1, a2) ⇒ PrimitiveAnswer.valueEquals(a1, a2).right.value mustBe (a1.answer === a2.answer) PrimitiveAnswer.valueEquals(a2, a1).right.value mustBe (a2.answer === a1.answer) } } "DateTime answers" - { "Two date time answers with different values are not equal" in { forAll(dateTimeAnswerGen, dateTimeAnswerGen) { (a1, a2) ⇒ PrimitiveAnswer.valueEquals(a1, a2).right.value mustBe (a1 === a2) } } } } "typeCheck" - { "answers typecheck as expected" in { forAll(primitiveAnswerGen, primitiveAnswerGen) { (a1, a2) ⇒ whenever(a1.getClass !== a2.getClass) { PrimitiveAnswer.typeCheck(a1, a1).right.value mustBe true PrimitiveAnswer.typeCheck(a2, a2).right.value mustBe true // we must consider a special case used for predicate construction (a1, a2) match { case (_: StringAnswer, _: EnumerationAnswer) ⇒ PrimitiveAnswer.typeCheck(a1, a2).right.value mustBe true case (_: EnumerationAnswer, _: StringAnswer) ⇒ PrimitiveAnswer.typeCheck(a1, a2).right.value mustBe true case _ ⇒ PrimitiveAnswer.typeCheck(a1, a2).left.value mustBe s"Mismatched types in typeCheck: ${a1.getClass} && ${a2.getClass}" } } } } "string and enumeration answers typecheck to facilitate predicate construction" in { forAll(stringAnswerGen, enumerationAnswerGen) { (a1, a2) ⇒ PrimitiveAnswer.typeCheck(a1, a2).right.value mustBe true PrimitiveAnswer.typeCheck(a2, a1).right.value mustBe true } } } "DateTimeAnswer" - { "asLocalDate" - { "is left if we have no local date" in { forAll(Gen.option(localTimeGen), Gen.option(zoneTimeGen)) { (time, zone) ⇒ val answer = DateTimeAnswer(None, time, zone) answer.asLocalDate.left.value mustBe "No LocalDate available" } } "is right if we have a local date, and returns the local date" in { forAll(localDateGen, Gen.option(localTimeGen), Gen.option(zoneTimeGen)) { (date, time, zone) ⇒ val answer = DateTimeAnswer(Some(date), time, zone) answer.asLocalDate.right.value mustBe date } } } "asLocalTime" - { "is left if we have no local time" in { forAll(Gen.option(localDateGen), Gen.option(zoneTimeGen)) { (date, zone) ⇒ val answer = DateTimeAnswer(date, None, zone) answer.asLocalTime.left.value mustBe "No LocalTime available" } } "is right if we have a local time, and returns the local time" in { forAll(Gen.option(localDateGen), localTimeGen, Gen.option(zoneTimeGen)) { (date, time, zone) ⇒ val answer = DateTimeAnswer(date, Some(time), zone) answer.asLocalTime.right.value mustBe time } } } "asLocalDateTime" - { "is left if we have no local date" in { forAll(Gen.option(localTimeGen), Gen.option(zoneTimeGen)) { (time, zone) ⇒ val answer = DateTimeAnswer(None, time, zone) answer.asLocalDateTime.left.value mustBe "Can't construct a LocalDateTime without a LocalDate or a LocalTime" } } "is left if we have no local time" in { forAll(Gen.option(localDateGen), Gen.option(zoneTimeGen)) { (date, zone) ⇒ val answer = DateTimeAnswer(date, None, zone) answer.asLocalDateTime.left.value mustBe "Can't construct a LocalDateTime without a LocalDate or a LocalTime" } } "is right if we have a local date an time, and returns the local date time" in { forAll(localDateGen, localTimeGen, Gen.option(zoneTimeGen)) { (date, time, zone) ⇒ val answer = DateTimeAnswer(Some(date), Some(time), zone) answer.asLocalDateTime.right.value mustBe LocalDateTime.of(date, time) } } } "asOffsetTime" - { "is left if we have no local date" in { forAll(Gen.option(localTimeGen), Gen.option(zoneTimeGen)) { (time, zone) ⇒ val answer = DateTimeAnswer(None, time, zone) answer.asOffsetTime.left.value mustBe "Can't construct an OffsetTime without a LocalDate, LocalTime, or a ZoneId" } } "is left if we have no local time" in { forAll(Gen.option(localDateGen), Gen.option(zoneTimeGen)) { (date, zone) ⇒ val answer = DateTimeAnswer(date, None, zone) answer.asOffsetTime.left.value mustBe "Can't construct an OffsetTime without a LocalDate, LocalTime, or a ZoneId" } } "is left if we have no zone id" in { forAll(Gen.option(localDateGen), Gen.option(localTimeGen)) { (date, time) ⇒ val answer = DateTimeAnswer(date, time, None) answer.asOffsetTime.left.value mustBe "Can't construct an OffsetTime without a LocalDate, LocalTime, or a ZoneId" } } "is right if we have a local time an zone id, and returns the offset time" in { forAll(localDateGen, localTimeGen, zoneTimeGen) { (date, time, zone) ⇒ val answer = DateTimeAnswer(Some(date), Some(time), Some(zone)) answer.asOffsetTime.right.value mustBe OffsetTime.of(time, zone.getRules.getOffset(LocalDateTime.of(date, time))) } } } "asZonedDateTime" - { "is left if we have no local date" in { forAll(Gen.option(localTimeGen), Gen.option(zoneTimeGen)) { (time, zone) ⇒ val answer = DateTimeAnswer(None, time, zone) answer.asZonedDateTime.left.value mustBe "Can't construct a ZonedDateTime without a LocalDate, LocalTime, or a ZoneId" } } "is left if we have no local time" in { forAll(Gen.option(localDateGen), Gen.option(zoneTimeGen)) { (date, zone) ⇒ val answer = DateTimeAnswer(date, None, zone) answer.asZonedDateTime.left.value mustBe "Can't construct a ZonedDateTime without a LocalDate, LocalTime, or a ZoneId" } } "is left if we have no zone id" in { forAll(Gen.option(localDateGen), Gen.option(localTimeGen)) { (date, time) ⇒ val answer = DateTimeAnswer(date, time, None) answer.asZonedDateTime.left.value mustBe "Can't construct a ZonedDateTime without a LocalDate, LocalTime, or a ZoneId" } } "is right if we have all required data, and returns the ZonedDateTime" in { forAll(localDateGen, localTimeGen, zoneTimeGen) { (date, time, zone) ⇒ val answer = DateTimeAnswer(Some(date), Some(time), Some(zone)) answer.asZonedDateTime.right.value mustBe ZonedDateTime.of(date, time, zone.getRules.getOffset(LocalDateTime.of(date, time))) } } } } }
channingwalton/qanda
questionnaire/src/test/scala/io/questions/model/questionnaire/PrimitiveAnswerSpec.scala
Scala
mit
8,144
// // Copyright 2013, Martin Pokorny <martin@truffulatree.org> // // This Source Code Form is subject to the terms of the Mozilla Public License, // v. 2.0. If a copy of the MPL was not distributed with this file, You can // obtain one at http://mozilla.org/MPL/2.0/. // package org.truffulatree.scampi2 import org.bridj.Pointer trait Errors { mpi2: Scampi2 with Mpi2LibraryComponent => def errorClass(errorcode: Int): Int = withOutVar { result: Pointer[Int] => mpi2.mpiCall(mpi2.lib.MPI_Error_class(errorcode, result)) result(0) } def errorString(errorcode: Int): String = getString(mpi2.lib.MPI_MAX_ERROR_STRING) { (len, buffer) => withOutVar { strlen: Pointer[Int] => mpi2.mpiCall(mpi2.lib.MPI_Error_string(errorcode, buffer, strlen)) } } def addErrorClass: Int = withOutVar { result: Pointer[Int] => mpi2.mpiCall(mpi2.lib.MPI_Add_error_class(result)) result(0) } def addErrorCode(errorclass: Int): Int = withOutVar { result: Pointer[Int] => mpi2.mpiCall(mpi2.lib.MPI_Add_error_code(errorclass, result)) result(0) } def addErrorString(errorcode: Int, string: String) { require( errorcode > mpi2.lib.MPI_ERR_LASTCODE, s"Error string for ${errorcode} cannot be set") mpi2.mpiCall( mpi2.lib.MPI_Add_error_string( errorcode, Pointer.pointerToCString(string).as(classOf[Byte]))) } val indexOutOfRangeErrorMsg = "Index out of range" val invalidValueErrorMsg = "Invalid value" val bufferCompatibilityErrorMsg = "Datatype is incompatible with provided buffer and offset" val countExceedsLengthErrorMsg = "Element count exceeds buffer length" val numBlocksUnequalToSizeErrorMsg = "Number of blocks not equal to communicator size" val structBlocksAlignmentErrorMsg = "Displacements in StructBlock are improperly aligned" val scatterBufferSizeErrorMsg = "Scatter buffer size is incompatible with communicator size" val gatherBufferSizeErrorMsg = "Gather buffer size is incompatible with communicator size" val reduceBufferLengthErrorMsg = "Unequal buffer lengths in call to reduction function" val structBlockLengthErrorMsg = "StructBlock length must be non-negative" }
mpokorny/scampi
src/main/scala/org/truffulatree/scampi2/Errors.scala
Scala
mpl-2.0
2,245
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rx.lang.scala import rx.functions.FuncN import rx.lang.scala.observables.ConnectableObservable import scala.concurrent.duration import java.util import collection.JavaConversions._ import scala.collection.generic.CanBuildFrom import scala.annotation.unchecked.uncheckedVariance import scala.collection.{Iterable, Traversable, immutable} import scala.collection.mutable.ArrayBuffer import scala.reflect.ClassTag /** * The Observable interface that implements the Reactive Pattern. * * @define subscribeObserverMain * Call this method to subscribe an [[rx.lang.scala.Observer]] for receiving * items and notifications from the Observable. * * A typical implementation of `subscribe` does the following: * * It stores a reference to the Observer in a collection object, such as a `List[T]` object. * * It returns a reference to the [[rx.lang.scala.Subscription]] interface. This enables Observers to * unsubscribe, that is, to stop receiving items and notifications before the Observable stops * sending them, which also invokes the Observer's [[rx.lang.scala.Observer.onCompleted onCompleted]] method. * * An `Observable[T]` instance is responsible for accepting all subscriptions * and notifying all Observers. Unless the documentation for a particular * `Observable[T]` implementation indicates otherwise, Observers should make no * assumptions about the order in which multiple Observers will receive their notifications. * * @define subscribeObserverParamObserver * the observer * @define subscribeObserverParamScheduler * the [[rx.lang.scala.Scheduler]] on which Observers subscribe to the Observable * * @define subscribeSubscriberMain * Call this method to subscribe an [[Subscriber]] for receiving items and notifications from the [[Observable]]. * * A typical implementation of `subscribe` does the following: * * It stores a reference to the Observer in a collection object, such as a `List[T]` object. * * It returns a reference to the [[rx.lang.scala.Subscription]] interface. This enables [[Subscriber]]s to * unsubscribe, that is, to stop receiving items and notifications before the Observable stops * sending them, which also invokes the Subscriber's [[rx.lang.scala.Observer.onCompleted onCompleted]] method. * * An [[Observable]] instance is responsible for accepting all subscriptions * and notifying all [[Subscriber]]s. Unless the documentation for a particular * [[Observable]] implementation indicates otherwise, [[Subscriber]]s should make no * assumptions about the order in which multiple [[Subscriber]]s will receive their notifications. * * @define subscribeSubscriberParamObserver * the [[Subscriber]] * @define subscribeSubscriberParamScheduler * the [[rx.lang.scala.Scheduler]] on which [[Subscriber]]s subscribe to the Observable * * @define subscribeAllReturn * a [[rx.lang.scala.Subscription]] reference whose `unsubscribe` method can be called to stop receiving items * before the Observable has finished sending them * * @define subscribeCallbacksMainWithNotifications * Call this method to receive items and notifications from this observable. * * @define subscribeCallbacksMainNoNotifications * Call this method to receive items from this observable. * * @define subscribeCallbacksParamOnNext * this function will be called whenever the Observable emits an item * @define subscribeCallbacksParamOnError * this function will be called if an error occurs * @define subscribeCallbacksParamOnComplete * this function will be called when this Observable has finished emitting items * @define subscribeCallbacksParamScheduler * the scheduler to use * * @define debounceVsThrottle * Information on debounce vs throttle: * - [[http://drupalmotion.com/article/debounce-and-throttle-visual-explanation]] * - [[http://unscriptable.com/2009/03/20/debouncing-javascript-methods/]] * - [[http://www.illyriad.co.uk/blog/index.php/2011/09/javascript-dont-spam-your-server-debounce-and-throttle/]] * * */ trait Observable[+T] { import scala.collection.JavaConverters._ import scala.collection.Seq import scala.concurrent.duration.{Duration, TimeUnit, MILLISECONDS} import scala.collection.mutable import rx.functions._ import rx.lang.scala.observables.BlockingObservable import ImplicitFunctionConversions._ import JavaConversions._ private [scala] val asJavaObservable: rx.Observable[_ <: T] /** * $subscribeObserverMain * * @return $subscribeAllReturn */ def subscribe(): Subscription = { asJavaObservable.subscribe() } /** * $subscribeObserverMain * * @param observer $subscribeObserverParamObserver * @return $subscribeAllReturn */ def subscribe(observer: Observer[T]): Subscription = { asJavaObservable.subscribe(observer.asJavaObserver) } /** * $subscribeObserverMain * * @param observer $subscribeObserverParamObserver * @return $subscribeAllReturn */ def apply(observer: Observer[T]): Subscription = subscribe(observer) /** * $subscribeSubscriberMain * * @param subscriber $subscribeSubscriberParamObserver * @return $subscribeAllReturn */ def subscribe(subscriber: Subscriber[T]): Subscription = { // Add the casting to avoid compile error "ambiguous reference to overloaded definition" val thisJava = asJavaObservable.asInstanceOf[rx.Observable[T]] thisJava.subscribe(subscriber.asJavaSubscriber) } /** * Subscribe to Observable and invoke `OnSubscribe` function without any * contract protection, error handling, unsubscribe, or execution hooks. * * This should only be used for implementing an `Operator` that requires nested subscriptions. * * Normal use should use [[Observable.subscribe]] which ensures the Rx contract and other functionality. * * @param subscriber * @return [[Subscription]] which is the Subscriber passed in * @since 0.17 */ def unsafeSubscribe(subscriber: Subscriber[T]): Subscription = { asJavaObservable.unsafeSubscribe(subscriber.asJavaSubscriber) } /** * $subscribeSubscriberMain * * @param subscriber $subscribeSubscriberParamObserver * @return $subscribeAllReturn */ def apply(subscriber: Subscriber[T]): Subscription = subscribe(subscriber) /** * $subscribeCallbacksMainNoNotifications * * @param onNext $subscribeCallbacksParamOnNext * @return $subscribeAllReturn */ def subscribe(onNext: T => Unit): Subscription = { asJavaObservable.subscribe(scalaFunction1ProducingUnitToAction1(onNext)) } /** * $subscribeCallbacksMainWithNotifications * * @param onNext $subscribeCallbacksParamOnNext * @param onError $subscribeCallbacksParamOnError * @return $subscribeAllReturn */ def subscribe(onNext: T => Unit, onError: Throwable => Unit): Subscription = { asJavaObservable.subscribe( scalaFunction1ProducingUnitToAction1(onNext), scalaFunction1ProducingUnitToAction1(onError) ) } /** * $subscribeCallbacksMainWithNotifications * * @param onNext $subscribeCallbacksParamOnNext * @param onError $subscribeCallbacksParamOnError * @param onCompleted $subscribeCallbacksParamOnComplete * @return $subscribeAllReturn */ def subscribe(onNext: T => Unit, onError: Throwable => Unit, onCompleted: () => Unit): Subscription = { asJavaObservable.subscribe( scalaFunction1ProducingUnitToAction1(onNext), scalaFunction1ProducingUnitToAction1(onError), scalaFunction0ProducingUnitToAction0(onCompleted) ) } /** * Returns a pair of a start function and an [[rx.lang.scala.Observable]] that upon calling the start function causes the source Observable to * push results into the specified subject. * * @param subject * the `rx.lang.scala.subjects.Subject` to push source items into * @return a pair of a start function and an [[rx.lang.scala.Observable]] such that when the start function * is called, the Observable starts to push results into the specified Subject */ def multicast[R >: T](subject: rx.lang.scala.Subject[R]): ConnectableObservable[R] = { val s: rx.subjects.Subject[_ >: T, _<: R] = subject.asJavaSubject new ConnectableObservable[R](asJavaObservable.multicast(s)) } /** * Returns an Observable that emits items produced by multicasting the source Observable within a selector function. * * @param subjectFactory the `Subject` factory * @param selector the selector function, which can use the multicasted source Observable subject to the policies * enforced by the created `Subject` * @return an Observable that emits the items produced by multicasting the source Observable within a selector function */ def multicast[R >: T, U](subjectFactory: () => rx.lang.scala.Subject[R], selector: Observable[R] => Observable[U]): Observable[U] = { val subjectFactoryJava: Func0[rx.subjects.Subject[_ >: T, _ <: R]] = () => subjectFactory().asJavaSubject val selectorJava: Func1[rx.Observable[R], rx.Observable[U]] = (jo: rx.Observable[R]) => selector(toScalaObservable[R](jo)).asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable[U](asJavaObservable.multicast[R, U](subjectFactoryJava, selectorJava)) } /** * Returns an Observable that first emits the items emitted by `this`, and then `elem`. * * @param elem the item to be appended * @return an Observable that first emits the items emitted by `this`, and then `elem`. */ def :+[U >: T](elem: U): Observable[U] = { this ++ Observable.items(elem) } /** * Returns an Observable that first emits the items emitted by `this`, and then the items emitted * by `that`. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/concat.png"> * * @param that * an Observable to be appended * @return an Observable that emits items that are the result of combining the items emitted by * this and that, one after the other */ def ++[U >: T](that: Observable[U]): Observable[U] = { val o1: rx.Observable[_ <: U] = this.asJavaObservable val o2: rx.Observable[_ <: U] = that.asJavaObservable toScalaObservable(rx.Observable.concat(o1, o2)) } /** * Returns an Observable that emits a specified item before it begins to emit items emitted by the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/startWith.png"> * * @param elem the item to emit * @return an Observable that emits the specified item before it begins to emit items emitted by the source Observable */ def +:[U >: T](elem: U): Observable[U] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable(thisJava.startWith(elem)) } /** * Returns an Observable that emits the items emitted by several Observables, one after the * other. * * This operation is only available if `this` is of type `Observable[Observable[U]]` for some `U`, * otherwise you'll get a compilation error. * * @usecase def concat[U]: Observable[U] */ def concat[U](implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[U] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[_ <: U]] = o2.map(_.asJavaObservable) val o4: rx.Observable[_ <: rx.Observable[_ <: U]] = o3.asJavaObservable val o5 = rx.Observable.concat[U](o4) toScalaObservable[U](o5) } /** * Returns a new Observable that emits items resulting from applying a function that you supply to each item * emitted by the source Observable, where that function returns an Observable, and then emitting the items * that result from concatinating those resulting Observables. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/concatMap.png"> * * @param f a function that, when applied to an item emitted by the source Observable, returns an Observable * @return an Observable that emits the result of applying the transformation function to each item emitted * by the source Observable and concatinating the Observables obtained from this transformation */ def concatMap[R](f: T => Observable[R]): Observable[R] = { toScalaObservable[R](asJavaObservable.concatMap[R](new Func1[T, rx.Observable[_ <: R]] { def call(t1: T): rx.Observable[_ <: R] = { f(t1).asJavaObservable } })) } /** * Wraps this Observable in another Observable that ensures that the resulting * Observable is chronologically well-behaved. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/synchronize.png"> * * A well-behaved Observable does not interleave its invocations of the [[rx.lang.scala.Observer.onNext onNext]], [[rx.lang.scala.Observer.onCompleted onCompleted]], and [[rx.lang.scala.Observer.onError onError]] methods of * its [[rx.lang.scala.Observer]]s; it invokes `onCompleted` or `onError` only once; and it never invokes `onNext` after invoking either `onCompleted` or `onError`. * [[Observable.serialize serialize]] enforces this, and the Observable it returns invokes `onNext` and `onCompleted` or `onError` synchronously. * * @return an Observable that is a chronologically well-behaved version of the source * Observable, and that synchronously notifies its [[rx.lang.scala.Observer]]s */ def serialize: Observable[T] = { toScalaObservable[T](asJavaObservable.serialize) } /** * Wraps each item emitted by a source Observable in a timestamped tuple. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/timestamp.png"> * * @return an Observable that emits timestamped items from the source Observable */ def timestamp: Observable[(Long, T)] = { toScalaObservable[rx.schedulers.Timestamped[_ <: T]](asJavaObservable.timestamp()) .map((t: rx.schedulers.Timestamped[_ <: T]) => (t.getTimestampMillis, t.getValue)) } /** * Wraps each item emitted by a source Observable in a timestamped tuple * with timestamps provided by the given Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timestamp.s.png"> * * @param scheduler [[rx.lang.scala.Scheduler]] to use as a time source. * @return an Observable that emits timestamped items from the source * Observable with timestamps provided by the given Scheduler */ def timestamp(scheduler: Scheduler): Observable[(Long, T)] = { toScalaObservable[rx.schedulers.Timestamped[_ <: T]](asJavaObservable.timestamp(scheduler)) .map((t: rx.schedulers.Timestamped[_ <: T]) => (t.getTimestampMillis, t.getValue)) } /** * Returns an Observable formed from this Observable and another Observable by combining * corresponding elements in pairs. * The number of `onNext` invocations of the resulting `Observable[(T, U)]` * is the minumum of the number of `onNext` invocations of `this` and `that`. */ def zip[U](that: Observable[U]): Observable[(T, U)] = { zipWith(that, (t: T, u: U) => (t, u)) } /** * Returns an Observable formed from `this` Observable and `other` Iterable by combining * corresponding elements in pairs. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/zip.i.png"> * <p> * Note that the `other` Iterable is evaluated as items are observed from the source Observable; it is * not pre-consumed. This allows you to zip infinite streams on either side. * * @param other the Iterable sequence * @return an Observable that pairs up values from the source Observable and the `other` Iterable. */ def zip[U](other: Iterable[U]): Observable[(T, U)] = { zipWith(other, (t: T, u: U) => (t, u)) } /** * Returns an Observable that emits items that are the result of applying a specified function to pairs of * values, one each from the source Observable and a specified Iterable sequence. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/zip.i.png"> * <p> * Note that the `other` Iterable is evaluated as items are observed from the source Observable; it is * not pre-consumed. This allows you to zip infinite streams on either side. * * @param other the Iterable sequence * @param selector a function that combines the pairs of items from the Observable and the Iterable to generate * the items to be emitted by the resulting Observable * @return an Observable that pairs up values from the source Observable and the `other` Iterable * sequence and emits the results of `selector` applied to these pairs */ def zipWith[U, R](other: Iterable[U], selector: (T, U) => R): Observable[R] = { val thisJava = asJavaObservable.asInstanceOf[rx.Observable[T]] toScalaObservable[R](thisJava.zip(other.asJava, selector)) } /** * Returns an Observable formed from this Observable and another Observable by combining * corresponding elements using the selector function. * The number of `onNext` invocations of the resulting `Observable[(T, U)]` * is the minumum of the number of `onNext` invocations of `this` and `that`. */ def zipWith[U, R](that: Observable[U], selector: (T,U) => R): Observable[R] = { toScalaObservable[R](rx.Observable.zip[T, U, R](this.asJavaObservable, that.asJavaObservable, selector)) } /** * Zips this Observable with its indices. * * @return An Observable emitting pairs consisting of all elements of this Observable paired with * their index. Indices start at 0. */ def zipWithIndex: Observable[(T, Int)] = { zip(0 until Int.MaxValue) } /** * Creates an Observable which produces buffers of collected values. * * This Observable produces buffers. Buffers are created when the specified `openings` * Observable produces an object. That object is used to construct an Observable to emit buffers, feeding it into `closings` function. * Buffers are emitted when the created Observable produces an object. * * @param openings * The [[rx.lang.scala.Observable]] which, when it produces an object, will cause * another buffer to be created. * @param closings * The function which is used to produce an [[rx.lang.scala.Observable]] for every buffer created. * When this [[rx.lang.scala.Observable]] produces an object, the associated buffer * is emitted. * @return * An [[rx.lang.scala.Observable]] which produces buffers which are created and emitted when the specified [[rx.lang.scala.Observable]]s publish certain objects. */ def buffer[Opening](openings: Observable[Opening], closings: Opening => Observable[Any]): Observable[Seq[T]] = { val opening: rx.Observable[_ <: Opening] = openings.asJavaObservable val closing: Func1[_ >: Opening, _ <: rx.Observable[_ <: Any]] = (o: Opening) => closings(o).asJavaObservable val jObs: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer[Opening, Any](opening, closing) Observable.jObsOfListToScObsOfSeq(jObs.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. * * This Observable produces connected non-overlapping buffers, each containing `count` * elements. When the source Observable completes or encounters an error, the current * buffer is emitted, and the event is propagated. * * @param count * The maximum size of each buffer before it should be emitted. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping buffers containing at most * `count` produced values. */ def buffer(count: Int): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(count) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. * * This Observable produces buffers every `skip` values, each containing `count` * elements. When the source Observable completes or encounters an error, the current * buffer is emitted, and the event is propagated. * * @param count * The maximum size of each buffer before it should be emitted. * @param skip * How many produced values need to be skipped before starting a new buffer. Note that when `skip` and * `count` are equals that this is the same operation as `buffer(int)`. * @return * An [[rx.lang.scala.Observable]] which produces buffers every `skip` values containing at most * `count` produced values. */ def buffer(count: Int, skip: Int): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(count, skip) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. * * This Observable produces connected non-overlapping buffers, each of a fixed duration * specified by the `timespan` argument. When the source Observable completes or encounters * an error, the current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted, and * replaced with a new buffer. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping buffers with a fixed duration. */ def buffer(timespan: Duration): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(timespan.length, timespan.unit) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. * * This Observable produces connected non-overlapping buffers, each of a fixed duration * specified by the `timespan` argument. When the source Observable completes or encounters * an error, the current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted, and * replaced with a new buffer. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a buffer. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping buffers with a fixed duration. */ def buffer(timespan: Duration, scheduler: Scheduler): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(timespan.length, timespan.unit, scheduler) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. This Observable produces connected * non-overlapping buffers, each of a fixed duration specified by the `timespan` argument or a maximum size * specified by the `count` argument (which ever is reached first). When the source Observable completes * or encounters an error, the current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted, and * replaced with a new buffer. * @param count * The maximum size of each buffer before it should be emitted. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping buffers which are emitted after * a fixed duration or when the buffer has reached maximum capacity (which ever occurs first). */ def buffer(timespan: Duration, count: Int): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(timespan.length, timespan.unit, count) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. This Observable produces connected * non-overlapping buffers, each of a fixed duration specified by the `timespan` argument or a maximum size * specified by the `count` argument (which ever is reached first). When the source Observable completes * or encounters an error, the current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted, and * replaced with a new buffer. * @param count * The maximum size of each buffer before it should be emitted. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a buffer. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping buffers which are emitted after * a fixed duration or when the buffer has reached maximum capacity (which ever occurs first). */ def buffer(timespan: Duration, count: Int, scheduler: Scheduler): Observable[Seq[T]] = { val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(timespan.length, timespan.unit, count, scheduler) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. This Observable starts a new buffer * periodically, which is determined by the `timeshift` argument. Each buffer is emitted after a fixed timespan * specified by the `timespan` argument. When the source Observable completes or encounters an error, the * current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted. * @param timeshift * The period of time after which a new buffer will be created. * @return * An [[rx.lang.scala.Observable]] which produces new buffers periodically, and these are emitted after * a fixed timespan has elapsed. */ def buffer(timespan: Duration, timeshift: Duration): Observable[Seq[T]] = { val span: Long = timespan.length val shift: Long = timespan.unit.convert(timeshift.length, timeshift.unit) val unit: TimeUnit = timespan.unit val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(span, shift, unit) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Creates an Observable which produces buffers of collected values. This Observable starts a new buffer * periodically, which is determined by the `timeshift` argument. Each buffer is emitted after a fixed timespan * specified by the `timespan` argument. When the source Observable completes or encounters an error, the * current buffer is emitted and the event is propagated. * * @param timespan * The period of time each buffer is collecting values before it should be emitted. * @param timeshift * The period of time after which a new buffer will be created. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a buffer. * @return * An [[rx.lang.scala.Observable]] which produces new buffers periodically, and these are emitted after * a fixed timespan has elapsed. */ def buffer(timespan: Duration, timeshift: Duration, scheduler: Scheduler): Observable[Seq[T]] = { val span: Long = timespan.length val shift: Long = timespan.unit.convert(timeshift.length, timeshift.unit) val unit: TimeUnit = timespan.unit val oJava: rx.Observable[_ <: java.util.List[_]] = asJavaObservable.buffer(span, shift, unit, scheduler) Observable.jObsOfListToScObsOfSeq(oJava.asInstanceOf[rx.Observable[_ <: java.util.List[T]]]) } /** * Returns an Observable that emits non-overlapping buffered items from the source Observable each time the * specified boundary Observable emits an item. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/buffer8.png"> * <p> * Completion of either the source or the boundary Observable causes the returned Observable to emit the * latest buffer and complete. * * @param boundary the boundary Observable. Note: This is a by-name parameter, * so it is only evaluated when someone subscribes to the returned Observable. * @return an Observable that emits buffered items from the source Observable when the boundary Observable * emits an item */ def buffer(boundary: => Observable[Any]): Observable[Seq[T]] = { val f = new Func0[rx.Observable[_ <: Any]]() { override def call(): rx.Observable[_ <: Any] = boundary.asJavaObservable } toScalaObservable(asJavaObservable.buffer[Any](f)).map(_.asScala) } /** * Returns an Observable that emits non-overlapping buffered items from the source Observable each time the * specified boundary Observable emits an item. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/buffer8.png"> * <p> * Completion of either the source or the boundary Observable causes the returned Observable to emit the * latest buffer and complete. * * @param boundary the boundary Observable * @param initialCapacity the initial capacity of each buffer chunk * @return an Observable that emits buffered items from the source Observable when the boundary Observable * emits an item */ def buffer(boundary: Observable[Any], initialCapacity: Int): Observable[Seq[T]] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[T]] toScalaObservable(thisJava.buffer(boundary.asJavaObservable, initialCapacity)).map(_.asScala) } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows. The boundary of each window is determined by the items emitted from a specified * boundary-governing Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/window8.png" /> * * @param boundary an Observable whose emitted items close and open windows. Note: This is a by-name parameter, * so it is only evaluated when someone subscribes to the returned Observable. * @return An Observable which produces connected non-overlapping windows. The boundary of each window is * determined by the items emitted from a specified boundary-governing Observable. */ def window(boundary: => Observable[Any]): Observable[Observable[T]] = { val func = new Func0[rx.Observable[_ <: Any]]() { override def call(): rx.Observable[_ <: Any] = boundary.asJavaObservable } val jo: rx.Observable[_ <: rx.Observable[_ <: T]] = asJavaObservable.window[Any](func) toScalaObservable(jo).map(toScalaObservable[T](_)) } /** * Creates an Observable which produces windows of collected values. Chunks are created when the specified `openings` * Observable produces an object. That object is used to construct an Observable to emit windows, feeding it into `closings` function. * Windows are emitted when the created Observable produces an object. * * @param openings * The [[rx.lang.scala.Observable]] which when it produces an object, will cause * another window to be created. * @param closings * The function which is used to produce an [[rx.lang.scala.Observable]] for every window created. * When this [[rx.lang.scala.Observable]] produces an object, the associated window * is emitted. * @return * An [[rx.lang.scala.Observable]] which produces windows which are created and emitted when the specified [[rx.lang.scala.Observable]]s publish certain objects. */ def window[Opening](openings: Observable[Opening], closings: Opening => Observable[Any]) = { Observable.jObsOfJObsToScObsOfScObs( asJavaObservable.window[Opening, Any](openings.asJavaObservable, (op: Opening) => closings(op).asJavaObservable)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows, each containing `count` elements. When the source Observable completes or * encounters an error, the current window is emitted, and the event is propagated. * * @param count * The maximum size of each window before it should be emitted. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping windows containing at most * `count` produced values. */ def window(count: Int): Observable[Observable[T]] = { // this unnecessary ascription is needed because of this bug (without, compiler crashes): // https://issues.scala-lang.org/browse/SI-7818 Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(count)) : Observable[Observable[T]] } /** * Creates an Observable which produces windows of collected values. This Observable produces windows every * `skip` values, each containing `count` elements. When the source Observable completes or encounters an error, * the current window is emitted and the event is propagated. * * @param count * The maximum size of each window before it should be emitted. * @param skip * How many produced values need to be skipped before starting a new window. Note that when `skip` and * `count` are equal that this is the same operation as `window(int)`. * @return * An [[rx.lang.scala.Observable]] which produces windows every `skip` values containing at most * `count` produced values. */ def window(count: Int, skip: Int): Observable[Observable[T]] = { Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(count, skip)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows, each of a fixed duration specified by the `timespan` argument. When the source * Observable completes or encounters an error, the current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted, and * replaced with a new window. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping windows with a fixed duration. */ def window(timespan: Duration): Observable[Observable[T]] = { Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(timespan.length, timespan.unit)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows, each of a fixed duration specified by the `timespan` argument. When the source * Observable completes or encounters an error, the current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted, and * replaced with a new window. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a window. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping windows with a fixed duration. */ def window(timespan: Duration, scheduler: Scheduler): Observable[Observable[T]] = { Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(timespan.length, timespan.unit, scheduler)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows, each of a fixed duration specified by the `timespan` argument or a maximum size * specified by the `count` argument (which ever is reached first). When the source Observable completes * or encounters an error, the current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted, and * replaced with a new window. * @param count * The maximum size of each window before it should be emitted. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping windows which are emitted after * a fixed duration or when the window has reached maximum capacity (which ever occurs first). */ def window(timespan: Duration, count: Int): Observable[Observable[T]] = { Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(timespan.length, timespan.unit, count)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable produces connected * non-overlapping windows, each of a fixed duration specified by the `timespan` argument or a maximum size * specified by the `count` argument (which ever is reached first). When the source Observable completes * or encounters an error, the current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted, and * replaced with a new window. * @param count * The maximum size of each window before it should be emitted. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a window. * @return * An [[rx.lang.scala.Observable]] which produces connected non-overlapping windows which are emitted after * a fixed duration or when the window has reached maximum capacity (which ever occurs first). */ def window(timespan: Duration, count: Int, scheduler: Scheduler): Observable[Observable[T]] = { Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(timespan.length, timespan.unit, count, scheduler)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable starts a new window * periodically, which is determined by the `timeshift` argument. Each window is emitted after a fixed timespan * specified by the `timespan` argument. When the source Observable completes or encounters an error, the * current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted. * @param timeshift * The period of time after which a new window will be created. * @return * An [[rx.lang.scala.Observable]] which produces new windows periodically, and these are emitted after * a fixed timespan has elapsed. */ def window(timespan: Duration, timeshift: Duration): Observable[Observable[T]] = { val span: Long = timespan.length val shift: Long = timespan.unit.convert(timeshift.length, timeshift.unit) val unit: TimeUnit = timespan.unit Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(span, shift, unit)) : Observable[Observable[T]] // SI-7818 } /** * Creates an Observable which produces windows of collected values. This Observable starts a new window * periodically, which is determined by the `timeshift` argument. Each window is emitted after a fixed timespan * specified by the `timespan` argument. When the source Observable completes or encounters an error, the * current window is emitted and the event is propagated. * * @param timespan * The period of time each window is collecting values before it should be emitted. * @param timeshift * The period of time after which a new window will be created. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use when determining the end and start of a window. * @return * An [[rx.lang.scala.Observable]] which produces new windows periodically, and these are emitted after * a fixed timespan has elapsed. */ def window(timespan: Duration, timeshift: Duration, scheduler: Scheduler): Observable[Observable[T]] = { val span: Long = timespan.length val shift: Long = timespan.unit.convert(timeshift.length, timeshift.unit) val unit: TimeUnit = timespan.unit Observable.jObsOfJObsToScObsOfScObs(asJavaObservable.window(span, shift, unit, scheduler)) : Observable[Observable[T]] // SI-7818 } /** * Returns an Observable which only emits those items for which a given predicate holds. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/filter.png"> * * @param predicate * a function that evaluates the items emitted by the source Observable, returning `true` if they pass the filter * @return an Observable that emits only those items in the original Observable that the filter * evaluates as `true` */ def filter(predicate: T => Boolean): Observable[T] = { toScalaObservable[T](asJavaObservable.filter(predicate)) } /** * Registers an function to be called when this Observable invokes [[rx.lang.scala.Observer.onCompleted onCompleted]] or [[rx.lang.scala.Observer.onError onError]]. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/finallyDo.png"> * * @param action * an function to be invoked when the source Observable finishes * @return an Observable that emits the same items as the source Observable, then invokes the function */ def finallyDo(action: => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.finallyDo(() => action)) } /** * Creates a new Observable by applying a function that you supply to each item emitted by * the source Observable, where that function returns an Observable, and then merging those * resulting Observables and emitting the results of this merger. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/flatMap.png"> * * @param f * a function that, when applied to an item emitted by the source Observable, returns * an Observable * @return an Observable that emits the result of applying the transformation function to each * item emitted by the source Observable and merging the results of the Observables * obtained from this transformation. */ def flatMap[R](f: T => Observable[R]): Observable[R] = { toScalaObservable[R](asJavaObservable.flatMap[R](new Func1[T, rx.Observable[_ <: R]]{ def call(t1: T): rx.Observable[_ <: R] = { f(t1).asJavaObservable } })) } /** * Returns an Observable that applies a function to each item emitted or notification raised by the source * Observable and then flattens the Observables returned from these functions and emits the resulting items. * * <img width="640" height="410" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeMap.nce.png"> * * @tparam R the result type * @param onNext a function that returns an Observable to merge for each item emitted by the source Observable * @param onError a function that returns an Observable to merge for an onError notification from the source * Observable * @param onCompleted a function that returns an Observable to merge for an onCompleted notification from the source * Observable * @return an Observable that emits the results of merging the Observables returned from applying the * specified functions to the emissions and notifications of the source Observable */ def flatMap[R](onNext: T => Observable[R], onError: Throwable => Observable[R], onCompleted: () => Observable[R]): Observable[R] = { val jOnNext = new Func1[T, rx.Observable[_ <: R]] { override def call(t: T): rx.Observable[_ <: R] = onNext(t).asJavaObservable } val jOnError = new Func1[Throwable, rx.Observable[_ <: R]] { override def call(e: Throwable): rx.Observable[_ <: R] = onError(e).asJavaObservable } val jOnCompleted = new Func0[rx.Observable[_ <: R]] { override def call(): rx.Observable[_ <: R] = onCompleted().asJavaObservable } toScalaObservable[R](asJavaObservable.mergeMap[R](jOnNext, jOnError, jOnCompleted)) } /** * Returns an Observable that emits the results of a specified function to the pair of values emitted by the * source Observable and a specified collection Observable. * * <img width="640" height="390" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeMap.r.png"> * * @tparam U the type of items emitted by the collection Observable * @tparam R the type of items emitted by the resulting Observable * @param collectionSelector a function that returns an Observable for each item emitted by the source Observable * @param resultSelector a function that combines one item emitted by each of the source and collection Observables and * returns an item to be emitted by the resulting Observable * @return an Observable that emits the results of applying a function to a pair of values emitted by the * source Observable and the collection Observable */ def flatMap[U, R](collectionSelector: T => Observable[U], resultSelector: (T, U) => R): Observable[R] = { val jCollectionSelector = new Func1[T, rx.Observable[_ <: U]] { override def call(t: T): rx.Observable[_ <: U] = collectionSelector(t).asJavaObservable } toScalaObservable[R](asJavaObservable.mergeMap[U, R](jCollectionSelector, resultSelector)) } /** * Returns an Observable that merges each item emitted by the source Observable with the values in an * Iterable corresponding to that item that is generated by a selector. * * <img width="640" height="310" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeMapIterable.png"> * * @tparam R the type of item emitted by the resulting Observable * @param collectionSelector a function that returns an Iterable sequence of values for when given an item emitted by the * source Observable * @return an Observable that emits the results of merging the items emitted by the source Observable with * the values in the Iterables corresponding to those items, as generated by `collectionSelector */ def flatMapIterable[R](collectionSelector: T => Iterable[R]): Observable[R] = { val jCollectionSelector = new Func1[T, java.lang.Iterable[_ <: R]] { override def call(t: T): java.lang.Iterable[_ <: R] = collectionSelector(t).asJava } toScalaObservable[R](asJavaObservable.mergeMapIterable[R](jCollectionSelector)) } /** * Returns an Observable that emits the results of applying a function to the pair of values from the source * Observable and an Iterable corresponding to that item that is generated by a selector. * * <img width="640" height="390" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeMapIterable.r.png"> * * @tparam U the collection element type * @tparam R the type of item emited by the resulting Observable * @param collectionSelector a function that returns an Iterable sequence of values for each item emitted by the source * Observable * @param resultSelector a function that returns an item based on the item emitted by the source Observable and the * Iterable returned for that item by the `collectionSelector` * @return an Observable that emits the items returned by `resultSelector` for each item in the source Observable */ def flatMapIterable[U, R](collectionSelector: T => Iterable[U], resultSelector: (T, U) => R): Observable[R] = { val jCollectionSelector = new Func1[T, java.lang.Iterable[_ <: U]] { override def call(t: T): java.lang.Iterable[_ <: U] = collectionSelector(t).asJava } toScalaObservable[R](asJavaObservable.mergeMapIterable[U, R](jCollectionSelector, resultSelector)) } /** * Returns an Observable that applies the given function to each item emitted by an * Observable and emits the result. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/map.png"> * * @param func * a function to apply to each item emitted by the Observable * @return an Observable that emits the items from the source Observable, transformed by the * given function */ def map[R](func: T => R): Observable[R] = { toScalaObservable[R](asJavaObservable.map[R](new Func1[T,R] { def call(t1: T): R = func(t1) })) } /** * Turns all of the notifications from a source Observable into [[rx.lang.scala.Observer.onNext onNext]] emissions, * and marks them with their original notification types within [[rx.lang.scala.Notification]] objects. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/materialize.png"> * * @return an Observable whose items are the result of materializing the items and * notifications of the source Observable */ def materialize: Observable[Notification[T]] = { toScalaObservable[rx.Notification[_ <: T]](asJavaObservable.materialize()).map(Notification(_)) } /** * Asynchronously subscribes and unsubscribes Observers on the specified [[rx.lang.scala.Scheduler]]. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/subscribeOn.png"> * * @param scheduler * the [[rx.lang.scala.Scheduler]] to perform subscription and unsubscription actions on * @return the source Observable modified so that its subscriptions and unsubscriptions happen * on the specified [[rx.lang.scala.Scheduler]] */ def subscribeOn(scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.subscribeOn(scheduler)) } /** * Asynchronously unsubscribes on the specified [[Scheduler]]. * * @param scheduler the [[Scheduler]] to perform subscription and unsubscription actions on * @return the source Observable modified so that its unsubscriptions happen on the specified [[Scheduler]] * @since 0.17 */ def unsubscribeOn(scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.unsubscribeOn(scheduler)) } /** * Asynchronously notify [[rx.lang.scala.Observer]]s on the specified [[rx.lang.scala.Scheduler]]. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/observeOn.png"> * * @param scheduler * the [[rx.lang.scala.Scheduler]] to notify [[rx.lang.scala.Observer]]s on * @return the source Observable modified so that its [[rx.lang.scala.Observer]]s are notified on the * specified [[rx.lang.scala.Scheduler]] */ def observeOn(scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.observeOn(scheduler)) } /** * Returns an Observable that reverses the effect of [[rx.lang.scala.Observable.materialize]] by * transforming the [[rx.lang.scala.Notification]] objects emitted by the source Observable into the items * or notifications they represent. * * This operation is only available if `this` is of type `Observable[Notification[U]]` for some `U`, * otherwise you will get a compilation error. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/dematerialize.png"> * * @return an Observable that emits the items and notifications embedded in the [[rx.lang.scala.Notification]] objects emitted by the source Observable * * @usecase def dematerialize[U]: Observable[U] * @inheritdoc * */ // with =:= it does not work, why? def dematerialize[U](implicit evidence: Observable[T] <:< Observable[Notification[U]]): Observable[U] = { val o1: Observable[Notification[U]] = this val o2: Observable[rx.Notification[_ <: U]] = o1.map(_.asJavaNotification) val o3 = o2.asJavaObservable.dematerialize[U]() toScalaObservable[U](o3) } /** * Instruct an Observable to pass control to another Observable rather than invoking [[rx.lang.scala.Observer.onError onError]] if it encounters an error. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/onErrorResumeNext.png"> * * By default, when an Observable encounters an error that prevents it from emitting the * expected item to its [[rx.lang.scala.Observer]], the Observable invokes its Observer's * `onError` method, and then quits without invoking any more of its Observer's * methods. The `onErrorResumeNext` method changes this behavior. If you pass a * function that returns an Observable (`resumeFunction`) to * `onErrorResumeNext`, if the original Observable encounters an error, instead of * invoking its Observer's `onError` method, it will instead relinquish control to * the Observable returned from `resumeFunction`, which will invoke the Observer's * [[rx.lang.scala.Observer.onNext onNext]] method if it is able to do so. In such a case, because no * Observable necessarily invokes `onError`, the Observer may never know that an * error happened. * * You can use this to prevent errors from propagating or to supply fallback data should errors * be encountered. * * @param resumeFunction * a function that returns an Observable that will take over if the source Observable * encounters an error * @return the original Observable, with appropriately modified behavior */ def onErrorResumeNext[U >: T](resumeFunction: Throwable => Observable[U]): Observable[U] = { val f: Func1[Throwable, rx.Observable[_ <: U]] = (t: Throwable) => resumeFunction(t).asJavaObservable val f2 = f.asInstanceOf[Func1[Throwable, rx.Observable[Nothing]]] toScalaObservable[U](asJavaObservable.onErrorResumeNext(f2)) } /** * Instruct an Observable to pass control to another Observable rather than invoking [[rx.lang.scala.Observer.onError onError]] if it encounters an error. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/onErrorResumeNext.png"> * * By default, when an Observable encounters an error that prevents it from emitting the * expected item to its [[rx.lang.scala.Observer]], the Observable invokes its Observer's * `onError` method, and then quits without invoking any more of its Observer's * methods. The `onErrorResumeNext` method changes this behavior. If you pass * another Observable (`resumeSequence`) to an Observable's * `onErrorResumeNext` method, if the original Observable encounters an error, * instead of invoking its Observer's `onError` method, it will instead relinquish * control to `resumeSequence` which will invoke the Observer's [[rx.lang.scala.Observer.onNext onNext]] * method if it is able to do so. In such a case, because no * Observable necessarily invokes `onError`, the Observer may never know that an * error happened. * * You can use this to prevent errors from propagating or to supply fallback data should errors * be encountered. * * @param resumeSequence * a function that returns an Observable that will take over if the source Observable * encounters an error * @return the original Observable, with appropriately modified behavior */ def onErrorResumeNext[U >: T](resumeSequence: Observable[U]): Observable[U] = { val rSeq1: rx.Observable[_ <: U] = resumeSequence.asJavaObservable val rSeq2: rx.Observable[Nothing] = rSeq1.asInstanceOf[rx.Observable[Nothing]] toScalaObservable[U](asJavaObservable.onErrorResumeNext(rSeq2)) } /** * Instruct an Observable to pass control to another Observable rather than invoking [[rx.lang.scala.Observer.onError onError]] if it encounters an error of type `java.lang.Exception`. * * This differs from `Observable.onErrorResumeNext` in that this one does not handle `java.lang.Throwable` or `java.lang.Error` but lets those continue through. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/onErrorResumeNext.png"> * * By default, when an Observable encounters an error that prevents it from emitting the * expected item to its [[rx.lang.scala.Observer]], the Observable invokes its Observer's * `onError` method, and then quits without invoking any more of its Observer's * methods. The `onErrorResumeNext` method changes this behavior. If you pass * another Observable (`resumeSequence`) to an Observable's * `onErrorResumeNext` method, if the original Observable encounters an error, * instead of invoking its Observer's `onError` method, it will instead relinquish * control to `resumeSequence` which will invoke the Observer's [[rx.lang.scala.Observer.onNext onNext]] * method if it is able to do so. In such a case, because no * Observable necessarily invokes `onError`, the Observer may never know that an * error happened. * * You can use this to prevent errors from propagating or to supply fallback data should errors * be encountered. * * @param resumeSequence * a function that returns an Observable that will take over if the source Observable * encounters an error * @return the original Observable, with appropriately modified behavior */ def onExceptionResumeNext[U >: T](resumeSequence: Observable[U]): Observable[U] = { val rSeq1: rx.Observable[_ <: U] = resumeSequence.asJavaObservable val rSeq2: rx.Observable[Nothing] = rSeq1.asInstanceOf[rx.Observable[Nothing]] toScalaObservable[U](asJavaObservable.onExceptionResumeNext(rSeq2)) } /** * Instruct an Observable to emit an item (returned by a specified function) rather than * invoking [[rx.lang.scala.Observer.onError onError]] if it encounters an error. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/onErrorReturn.png"> * * By default, when an Observable encounters an error that prevents it from emitting the * expected item to its [[rx.lang.scala.Observer]], the Observable invokes its Observer's * `onError` method, and then quits without invoking any more of its Observer's * methods. The `onErrorReturn` method changes this behavior. If you pass a function * (`resumeFunction`) to an Observable's `onErrorReturn` method, if the * original Observable encounters an error, instead of invoking its Observer's * `onError` method, it will instead pass the return value of * `resumeFunction` to the Observer's [[rx.lang.scala.Observer.onNext onNext]] method. * * You can use this to prevent errors from propagating or to supply fallback data should errors * be encountered. * * @param resumeFunction * a function that returns an item that the new Observable will emit if the source * Observable encounters an error * @return the original Observable with appropriately modified behavior */ def onErrorReturn[U >: T](resumeFunction: Throwable => U): Observable[U] = { val f1: Func1[Throwable, _ <: U] = resumeFunction val f2 = f1.asInstanceOf[Func1[Throwable, Nothing]] toScalaObservable[U](asJavaObservable.onErrorReturn(f2)) } /** * Intercepts `onError` notifications from the source Observable and replaces them with the * `onNext` emissions of an Observable returned by a specified function. This allows the source * sequence to continue even if it issues multiple `onError` notifications. * * <img width="640" height="310" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/onErrorFlatMap.png"> * * @param resumeFunction a function that accepts an `Throwable` and an `Option` associated with this error representing * the Throwable issued by the source Observable, and returns an Observable that emits items * that will be emitted in place of the error. If no value is associated with the error, the value * will be `None`. * @return the original Observable, with appropriately modified behavior */ def onErrorFlatMap[U >: T](resumeFunction: (Throwable, Option[Any]) => Observable[U]): Observable[U] = { val f = new Func1[rx.exceptions.OnErrorThrowable, rx.Observable[_ <: U]] { override def call(t: rx.exceptions.OnErrorThrowable): rx.Observable[_ <: U] = { val v = if (t.isValueNull) Some(t.getValue) else None resumeFunction(t.getCause, v).asJavaObservable } } val thisJava = asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable[U](thisJava.onErrorFlatMap(f)) } /** * Returns an Observable that applies a function of your choosing to the first item emitted by a * source Observable, then feeds the result of that function along with the second item emitted * by the source Observable into the same function, and so on until all items have been emitted * by the source Observable, and emits the final result from the final call to your function as * its sole item. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/reduce.png"> * * This technique, which is called "reduce" or "aggregate" here, is sometimes called "fold," * "accumulate," "compress," or "inject" in other programming contexts. Groovy, for instance, * has an `inject` method that does a similar operation on lists. * * @param accumulator * An accumulator function to be invoked on each item emitted by the source * Observable, whose result will be used in the next accumulator call * @return an Observable that emits a single item that is the result of accumulating the * output from the source Observable */ def reduce[U >: T](accumulator: (U, U) => U): Observable[U] = { val func: Func2[_ >: U, _ >: U, _ <: U] = accumulator val func2 = func.asInstanceOf[Func2[T, T, T]] toScalaObservable[U](asJavaObservable.asInstanceOf[rx.Observable[T]].reduce(func2)) } /** * Returns a pair of a start function and an [[rx.lang.scala.Observable]] that shares a single subscription to the underlying * Observable that will replay all of its items and notifications to any future [[rx.lang.scala.Observer]]. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.png"> * * @return a pair of a start function and an [[rx.lang.scala.Observable]] such that when the start function * is called, the Observable starts to emit items to its [[rx.lang.scala.Observer]]s */ def replay: ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay()) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on the items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.f.png"> * * @param selector the selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @return an Observable that emits items that are the results of invoking the selector on a `ConnectableObservable` * that shares a single subscription to the source Observable */ def replay[U >: T, R](selector: Observable[U] => Observable[R]): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying `bufferSize` notifications. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.fn.png"> * * @param selector the selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param bufferSize the buffer size that limits the number of items the connectable observable can replay * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable replaying * no more than `bufferSize` items */ def replay[U >: T, R](selector: Observable[U] => Observable[R], bufferSize: Int): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, bufferSize)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying no more than `bufferSize` items that were emitted within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.fnt.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param bufferSize the buffer size that limits the number of items the connectable observable can replay * @param time the duration of the window in which the replayed items must have been emitted * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, and * replays no more than `bufferSize` items that were emitted within the window defined by `time` */ def replay[U >: T, R](selector: Observable[U] => Observable[R], bufferSize: Int, time: Duration): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, bufferSize, time.length, time.unit)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying no more than `bufferSize` items that were emitted within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.fnts.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param bufferSize the buffer size that limits the number of items the connectable observable can replay * @param time the duration of the window in which the replayed items must have been emitted * @param scheduler the Scheduler that is the time source for the window * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, and * replays no more than `bufferSize` items that were emitted within the window defined by `time` * @throws IllegalArgumentException if `bufferSize` is less than zero */ def replay[U >: T, R](selector: Observable[U] => Observable[R], bufferSize: Int, time: Duration, scheduler: Scheduler): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, bufferSize, time.length, time.unit, scheduler)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying a maximum of `bufferSize` items. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.fns.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param bufferSize the buffer size that limits the number of items the connectable observable can replay * @param scheduler the Scheduler on which the replay is observed * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying no more than `bufferSize` notifications */ def replay[U >: T, R](selector: Observable[U] => Observable[R], bufferSize: Int, scheduler: Scheduler): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, bufferSize, scheduler)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying all items that were emitted within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.ft.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param time the duration of the window in which the replayed items must have been emitted * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying all items that were emitted within the window defined by `time` */ def replay[U >: T, R](selector: Observable[U] => Observable[R], time: Duration, scheduler: Scheduler): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, time.length, time.unit, scheduler)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.fs.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param scheduler the Scheduler where the replay is observed * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying all items */ def replay[U >: T, R](selector: Observable[U] => Observable[R], scheduler: Scheduler): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, scheduler)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items that were emitted during a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.nt.png"> * * @param bufferSize the buffer size that limits the number of items that can be replayed * @param time the duration of the window in which the replayed items must have been emitted * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items that were emitted during the window defined by `time` */ def replay(bufferSize: Int, time: Duration): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(bufferSize, time.length, time.unit)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable and * that replays a maximum of `bufferSize` items that are emitted within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.nts.png"> * * @param bufferSize the buffer size that limits the number of items that can be replayed * @param time the duration of the window in which the replayed items must have been emitted * @param scheduler the scheduler that is used as a time source for the window * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items that were emitted during the window defined by `time`` * @throws IllegalArgumentException if `bufferSize` is less than zero */ def replay(bufferSize: Int, time: Duration, scheduler: Scheduler): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(bufferSize, time.length, time.unit, scheduler)) } /** * Returns an Observable that emits items that are the results of invoking a specified selector on items * emitted by a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying all items that were emitted within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.ft.png"> * * @param selector a selector function, which can use the multicasted sequence as many times as needed, without * causing multiple subscriptions to the Observable * @param time the duration of the window in which the replayed items must have been emitted * @return an Observable that emits items that are the results of invoking the selector on items emitted by * a `ConnectableObservable` that shares a single subscription to the source Observable, * replaying all items that were emitted within the window defined by `time`` */ def replay[U >: T, R](selector: Observable[U] => Observable[R], time: Duration): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.replay(fJava, time.length, time.unit)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable that * replays at most `bufferSize` items emitted by that Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.n.png"> * * @param bufferSize the buffer size that limits the number of items that can be replayed * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items emitted by that Observable */ def replay(bufferSize: Int): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(bufferSize)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items emitted by that Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.ns.png"> * * @param bufferSize the buffer size that limits the number of items that can be replayed * @param scheduler the scheduler on which the Observers will observe the emitted items * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays at most `bufferSize` items that were emitted by the Observable */ def replay(bufferSize: Int, scheduler: Scheduler): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(bufferSize, scheduler)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable and * replays all items emitted by that Observable within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.t.png"> * * @param time the duration of the window in which the replayed items must have been emitted * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays the items that were emitted during the window defined by `time` */ def replay(time: Duration): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(time.length, time.unit)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable and * replays all items emitted by that Observable within a specified time window. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.ts.png"> * * @param time the duration of the window in which the replayed items must have been emitted * @param scheduler the Scheduler that is the time source for the window * @return a `ConnectableObservable` that shares a single subscription to the source Observable and * replays the items that were emitted during the window defined by `time` */ def replay(time: Duration, scheduler: Scheduler): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(time.length, time.unit, scheduler)) } /** * Returns a `ConnectableObservable` that shares a single subscription to the source Observable that * will replay all of its items and notifications to any future `Observer` on the given `Scheduler`. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/replay.s.png"> * * @param scheduler the Scheduler on which the Observers will observe the emitted items * @return a `ConnectableObservable` that shares a single subscription to the source Observable that * will replay all of its items and notifications to any future `bserver` on the given `Scheduler` */ def replay(scheduler: Scheduler): ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.replay(scheduler)) } /** * This method has similar behavior to [[rx.lang.scala.Observable.replay]] except that this auto-subscribes to * the source Observable rather than returning a start function and an Observable. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/cache.png"> * * This is useful when you want an Observable to cache responses and you can't control the * subscribe/unsubscribe behavior of all the [[rx.lang.scala.Observer]]s. * * When you call `cache`, it does not yet subscribe to the * source Observable. This only happens when `subscribe` is called * the first time on the Observable returned by `cache()`. * * Note: You sacrifice the ability to unsubscribe from the origin when you use the * `cache()` operator so be careful not to use this operator on Observables that * emit an infinite or very large number of items that will use up memory. * * @return an Observable that when first subscribed to, caches all of its notifications for * the benefit of subsequent subscribers. */ def cache: Observable[T] = { toScalaObservable[T](asJavaObservable.cache()) } /** * Returns a new [[Observable]] that multicasts (shares) the original [[Observable]]. As long a * there is more than 1 [[Subscriber]], this [[Observable]] will be subscribed and emitting data. * When all subscribers have unsubscribed it will unsubscribe from the source [[Observable]]. * * This is an alias for `publish().refCount()` * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishRefCount.png"> * * @return a [[Observable]] that upon connection causes the source Observable to emit items to its [[Subscriber]]s * @since 0.19 */ def share: Observable[T] = { toScalaObservable[T](asJavaObservable.share()) } /** * Returns an Observable that emits a Boolean that indicates whether the source Observable emitted a * specified item. * * Note: this method uses `==` to compare elements. It's a bit different from RxJava which uses `Object.equals`. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/contains.png"> * *@param elem the item to search for in the emissions from the source Observable * @return an Observable that emits `true` if the specified item is emitted by the source Observable, * or `false` if the source Observable completes without emitting that item */ def contains[U >: T](elem: U): Observable[Boolean] = { exists(_ == elem) } /** * Returns a a pair of a start function and an [[rx.lang.scala.Observable]], which waits until the start function is called before it begins emitting * items to those [[rx.lang.scala.Observer]]s that have subscribed to it. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/publishConnect.png"> * * @return an [[rx.lang.scala.observables.ConnectableObservable]]. */ def publish: ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.publish()) } /** * Returns an Observable that emits `initialValue` followed by the items emitted by a `ConnectableObservable` that shares a single subscription to the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishConnect.i.png"> * * @param initialValue the initial value to be emitted by the resulting Observable * @return a `ConnectableObservable` that shares a single subscription to the underlying Observable and starts with `initialValue` */ def publish[U >: T](initialValue: U): ConnectableObservable[U] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] new ConnectableObservable[U](thisJava.publish(initialValue)) } /** * Returns an Observable that emits the results of invoking a specified selector on items emitted by a `ConnectableObservable` * that shares a single subscription to the underlying sequence. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishConnect.f.png"> * * @param selector a function that can use the multicasted source sequence as many times as needed, without * causing multiple subscriptions to the source sequence. Subscribers to the given source will * receive all notifications of the source from the time of the subscription forward. * @return an Observable that emits the results of invoking the selector on the items emitted by a `ConnectableObservable` * that shares a single subscription to the underlying sequence */ def publish[U >: T, R](selector: Observable[U] => Observable[R]): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.publish(fJava)) } /** * Returns an Observable that emits `initialValue` followed by the results of invoking a specified * selector on items emitted by a `ConnectableObservable` that shares a single subscription to the * source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishConnect.if.png"> * * @param selector a function that can use the multicasted source sequence as many times as needed, without * causing multiple subscriptions to the source Observable. Subscribers to the source will * receive all notifications of the source from the time of the subscription forward * @param initialValue the initial value of the underlying `BehaviorSubject` * @return an Observable that emits `initialValue` followed by the results of invoking the selector * on a `ConnectableObservable` that shares a single subscription to the underlying Observable */ def publish[U >: T, R](selector: Observable[U] => Observable[R], initialValue: U): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] val fJava: Func1[rx.Observable[U], rx.Observable[R]] = (jo: rx.Observable[U]) => selector(toScalaObservable[U](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable[R](thisJava.publish(fJava, initialValue)) } /** * Returns a [[ConnectableObservable]] that emits only the last item emitted by the source Observable. * A [[ConnectableObservable]] resembles an ordinary Observable, except that it does not begin emitting items * when it is subscribed to, but only when its `connect` method is called. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishLast.png"> * * @return a [[ConnectableObservable]] that emits only the last item emitted by the source Observable */ def publishLast: ConnectableObservable[T] = { new ConnectableObservable[T](asJavaObservable.publishLast()) } /** * Returns an Observable that emits an item that results from invoking a specified selector on the last item * emitted by a [[ConnectableObservable]] that shares a single subscription to the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/publishLast.f.png"> * * @param selector a function that can use the multicasted source sequence as many times as needed, without * causing multiple subscriptions to the source Observable. Subscribers to the source will only * receive the last item emitted by the source. * @return an Observable that emits an item that is the result of invoking the selector on a [[ConnectableObservable]] * that shares a single subscription to the source Observable */ def publishLast[R](selector: Observable[T] => Observable[R]): Observable[R] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[T]] val fJava = new rx.functions.Func1[rx.Observable[T], rx.Observable[R]]() { override def call(jo: rx.Observable[T]): rx.Observable[R] = selector(toScalaObservable[T](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] } toScalaObservable[R](thisJava.publishLast(fJava)) } // TODO add Scala-like aggregate function /** * Returns an Observable that applies a function of your choosing to the first item emitted by a * source Observable, then feeds the result of that function along with the second item emitted * by an Observable into the same function, and so on until all items have been emitted by the * source Observable, emitting the final result from the final call to your function as its sole * item. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/reduceSeed.png"> * * This technique, which is called "reduce" or "aggregate" here, is sometimes called "fold," * "accumulate," "compress," or "inject" in other programming contexts. Groovy, for instance, * has an `inject` method that does a similar operation on lists. * * @param initialValue * the initial (seed) accumulator value * @param accumulator * an accumulator function to be invoked on each item emitted by the source * Observable, the result of which will be used in the next accumulator call * @return an Observable that emits a single item that is the result of accumulating the output * from the items emitted by the source Observable */ def foldLeft[R](initialValue: R)(accumulator: (R, T) => R): Observable[R] = { toScalaObservable[R](asJavaObservable.reduce(initialValue, new Func2[R,T,R]{ def call(t1: R, t2: T): R = accumulator(t1,t2) })) } /** * Returns an Observable that emits the results of sampling the items emitted by the source * Observable at a specified time interval. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/sample.png"> * * @param duration the sampling rate * @return an Observable that emits the results of sampling the items emitted by the source * Observable at the specified time interval */ def sample(duration: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.sample(duration.length, duration.unit)) } /** * Returns an Observable that emits the results of sampling the items emitted by the source * Observable at a specified time interval. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/sample.png"> * * @param duration the sampling rate * @param scheduler * the [[rx.lang.scala.Scheduler]] to use when sampling * @return an Observable that emits the results of sampling the items emitted by the source * Observable at the specified time interval */ def sample(duration: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.sample(duration.length, duration.unit, scheduler)) } /** * Return an Observable that emits the results of sampling the items emitted by the source Observable * whenever the specified sampler Observable emits an item or completes. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/sample.o.png"> * * @param sampler * the Observable to use for sampling the source Observable * @return an Observable that emits the results of sampling the items emitted by this Observable whenever * the sampler Observable emits an item or completes */ def sample(sampler: Observable[Any]): Observable[T] = { toScalaObservable[T](asJavaObservable.sample(sampler)) } /** * Returns an Observable that applies a function of your choosing to the first item emitted by a * source Observable, then feeds the result of that function along with the second item emitted * by an Observable into the same function, and so on until all items have been emitted by the * source Observable, emitting the result of each of these iterations. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/scanSeed.png"> * * This sort of function is sometimes called an accumulator. * * Note that when you pass a seed to `scan()` the resulting Observable will emit * that seed as its first emitted item. * * @param initialValue * the initial (seed) accumulator value * @param accumulator * an accumulator function to be invoked on each item emitted by the source * Observable, whose result will be emitted to [[rx.lang.scala.Observer]]s via * [[rx.lang.scala.Observer.onNext onNext]] and used in the next accumulator call. * @return an Observable that emits the results of each call to the accumulator function */ def scan[R](initialValue: R)(accumulator: (R, T) => R): Observable[R] = { toScalaObservable[R](asJavaObservable.scan(initialValue, new Func2[R,T,R]{ def call(t1: R, t2: T): R = accumulator(t1,t2) })) } /** * Returns an Observable that applies a function of your choosing to the * first item emitted by a source Observable, then feeds the result of that * function along with the second item emitted by an Observable into the * same function, and so on until all items have been emitted by the source * Observable, emitting the result of each of these iterations. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/scan.png"> * <p> * * @param accumulator * an accumulator function to be invoked on each item emitted by the source * Observable, whose result will be emitted to [[rx.lang.scala.Observer]]s via * [[rx.lang.scala.Observer.onNext onNext]] and used in the next accumulator call. * @return * an Observable that emits the results of each call to the * accumulator function */ def scan[U >: T](accumulator: (U, U) => U): Observable[U] = { val func: Func2[_ >: U, _ >: U, _ <: U] = accumulator val func2 = func.asInstanceOf[Func2[T, T, T]] toScalaObservable[U](asJavaObservable.asInstanceOf[rx.Observable[T]].scan(func2)) } /** * Returns an Observable that emits a Boolean that indicates whether all of the items emitted by * the source Observable satisfy a condition. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/all.png"> * * @param predicate * a function that evaluates an item and returns a Boolean * @return an Observable that emits `true` if all items emitted by the source * Observable satisfy the predicate; otherwise, `false` */ def forall(predicate: T => Boolean): Observable[Boolean] = { // type mismatch; found : rx.Observable[java.lang.Boolean] required: rx.Observable[_ <: scala.Boolean] // new Observable[Boolean](asJavaNotification.all(predicate)) // it's more fun in Scala: this.map(predicate).foldLeft(true)(_ && _) } /** * Returns an Observable that skips the first `num` items emitted by the source * Observable and emits the remainder. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skip.png"> * * @param n * the number of items to skip * @return an Observable that is identical to the source Observable except that it does not * emit the first `num` items that the source emits */ def drop(n: Int): Observable[T] = { toScalaObservable[T](asJavaObservable.skip(n)) } /** * Returns an Observable that drops values emitted by the source Observable before a specified time window * elapses. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skip.t.png"> * * @param time the length of the time window to drop * @return an Observable that drops values emitted by the source Observable before the time window defined * by `time` elapses and emits the remainder */ def drop(time: Duration): Observable[T] = { toScalaObservable(asJavaObservable.skip(time.length, time.unit)) } /** * Returns an Observable that drops values emitted by the source Observable before a specified time window * elapses. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skip.t.png"> * * @param time the length of the time window to drop * @param scheduler the `Scheduler` on which the timed wait happens * @return an Observable that drops values emitted by the source Observable before the time window defined * by `time` elapses and emits the remainder */ def drop(time: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable(asJavaObservable.skip(time.length, time.unit, scheduler)) } /** * Returns an Observable that bypasses all items from the source Observable as long as the specified * condition holds true. Emits all further source items as soon as the condition becomes false. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skipWhile.png"> * * @param predicate * A function to test each item emitted from the source Observable for a condition. * @return an Observable that emits all items from the source Observable as soon as the condition * becomes false. */ def dropWhile(predicate: T => Boolean): Observable[T] = { toScalaObservable(asJavaObservable.skipWhile(predicate)) } /** * Returns an Observable that drops a specified number of items from the end of the sequence emitted by the * source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skipLast.png"> * <p> * This Observer accumulates a queue long enough to store the first `n` items. As more items are * received, items are taken from the front of the queue and emitted by the returned Observable. This causes * such items to be delayed. * * @param n number of items to drop from the end of the source sequence * @return an Observable that emits the items emitted by the source Observable except for the dropped ones * at the end * @throws IndexOutOfBoundsException if `n` is less than zero */ def dropRight(n: Int): Observable[T] = { toScalaObservable(asJavaObservable.skipLast(n)) } /** * Returns an Observable that drops items emitted by the source Observable during a specified time window * before the source completes. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skipLast.t.png"> * * Note: this action will cache the latest items arriving in the specified time window. * * @param time the length of the time window * @return an Observable that drops those items emitted by the source Observable in a time window before the * source completes defined by `time` */ def dropRight(time: Duration): Observable[T] = { toScalaObservable(asJavaObservable.skipLast(time.length, time.unit)) } /** * Returns an Observable that drops items emitted by the source Observable during a specified time window * (defined on a specified scheduler) before the source completes. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skipLast.ts.png"> * * Note: this action will cache the latest items arriving in the specified time window. * * @param time the length of the time window * @param scheduler the scheduler used as the time source * @return an Observable that drops those items emitted by the source Observable in a time window before the * source completes defined by `time` and `scheduler` */ def dropRight(time: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable(asJavaObservable.skipLast(time.length, time.unit, scheduler)) } /** * Returns an Observable that skips items emitted by the source Observable until a second Observable emits an item. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/skipUntil.png"> * * @param other the second Observable that has to emit an item before the source Observable's elements begin * to be mirrored by the resulting Observable * @return an Observable that skips items from the source Observable until the second Observable emits an * item, then emits the remaining items * @see <a href="https://github.com/Netflix/RxJava/wiki/Filtering-Observables#wiki-skipuntil">RxJava Wiki: skipUntil()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229358.aspx">MSDN: Observable.SkipUntil</a> */ def dropUntil[E](other: Observable[E]): Observable[T] = { toScalaObservable[T](asJavaObservable.skipUntil(other)) } /** * Returns an Observable that emits only the first `num` items emitted by the source * Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/take.png"> * * This method returns an Observable that will invoke a subscribing [[rx.lang.scala.Observer]]'s * [[rx.lang.scala.Observer.onNext onNext]] function a maximum of `num` times before invoking * [[rx.lang.scala.Observer.onCompleted onCompleted]]. * * @param n * the number of items to take * @return an Observable that emits only the first `num` items from the source * Observable, or all of the items from the source Observable if that Observable emits * fewer than `num` items */ def take(n: Int): Observable[T] = { toScalaObservable[T](asJavaObservable.take(n)) } /** * Returns an Observable that emits those items emitted by source Observable before a specified time runs out. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/take.t.png"> * * @param time the length of the time window * @return an Observable that emits those items emitted by the source Observable before the time runs out */ def take(time: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.take(time.length, time.unit)) } /** * Returns an Observable that emits those items emitted by source Observable before a specified time (on * specified Scheduler) runs out * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/take.ts.png"> * * @param time the length of the time window * @param scheduler the Scheduler used for time source * @return an Observable that emits those items emitted by the source Observable before the time runs out, * according to the specified Scheduler */ def take(time: Duration, scheduler: Scheduler) { toScalaObservable[T](asJavaObservable.take(time.length, time.unit, scheduler.asJavaScheduler)) } /** * Returns an Observable that emits items emitted by the source Observable so long as a * specified condition is true. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/takeWhile.png"> * * @param predicate * a function that evaluates an item emitted by the source Observable and returns a * Boolean * @return an Observable that emits the items from the source Observable so long as each item * satisfies the condition defined by `predicate` */ def takeWhile(predicate: T => Boolean): Observable[T] = { toScalaObservable[T](asJavaObservable.takeWhile(predicate)) } /** * Returns an Observable that emits only the last `count` items emitted by the source * Observable. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/last.png"> * * @param count * the number of items to emit from the end of the sequence emitted by the source * Observable * @return an Observable that emits only the last `count` items emitted by the source * Observable */ def takeRight(count: Int): Observable[T] = { toScalaObservable[T](asJavaObservable.takeLast(count)) } /** * Return an Observable that emits the items from the source Observable that were emitted in a specified * window of `time` before the Observable completed. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/takeLast.t.png"> * * @param time the length of the time window * @return an Observable that emits the items from the source Observable that were emitted in the window of * time before the Observable completed specified by `time` */ def takeRight(time: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.takeLast(time.length, time.unit)) } /** * Return an Observable that emits the items from the source Observable that were emitted in a specified * window of `time` before the Observable completed, where the timing information is provided by a specified * Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/takeLast.ts.png"> * * @param time the length of the time window * @param scheduler the Scheduler that provides the timestamps for the Observed items * @return an Observable that emits the items from the source Observable that were emitted in the window of * time before the Observable completed specified by `time`, where the timing information is * provided by `scheduler` */ def takeRight(time: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.takeLast(time.length, time.unit, scheduler.asJavaScheduler)) } /** * Return an Observable that emits at most a specified number of items from the source Observable that were * emitted in a specified window of time before the Observable completed. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/takeLast.tn.png"> * * @param count the maximum number of items to emit * @param time the length of the time window * @return an Observable that emits at most `count` items from the source Observable that were emitted * in a specified window of time before the Observable completed * @throws IllegalArgumentException if `count` is less than zero */ def takeRight(count: Int, time: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.takeLast(count, time.length, time.unit)) } /** * Return an Observable that emits at most a specified number of items from the source Observable that were * emitted in a specified window of `time` before the Observable completed, where the timing information is * provided by a given Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/takeLast.tns.png"> * * @param count the maximum number of items to emit * @param time the length of the time window * @param scheduler the Scheduler that provides the timestamps for the observed items * @return an Observable that emits at most `count` items from the source Observable that were emitted * in a specified window of time before the Observable completed, where the timing information is * provided by the given `scheduler` * @throws IllegalArgumentException if `count` is less than zero */ def takeRight(count: Int, time: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.takeLast(count, time.length, time.unit, scheduler.asJavaScheduler)) } /** * Returns an Observable that emits the items from the source Observable only until the * `other` Observable emits an item. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/takeUntil.png"> * * @param that * the Observable whose first emitted item will cause `takeUntil` to stop * emitting items from the source Observable * @tparam E * the type of items emitted by `other` * @return an Observable that emits the items of the source Observable until such time as * `other` emits its first item */ def takeUntil[E](that: Observable[E]): Observable[T] = { toScalaObservable[T](asJavaObservable.takeUntil(that.asJavaObservable)) } /** * Returns an Observable that emits a single item, a list composed of all the items emitted by * the source Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toList.png"> * * Normally, an Observable that returns multiple items will do so by invoking its [[rx.lang.scala.Observer]]'s * [[rx.lang.scala.Observer.onNext onNext]] method for each such item. You can change * this behavior, instructing the Observable to compose a list of all of these items and then to * invoke the Observer's `onNext` function once, passing it the entire list, by * calling the Observable's `toList` method prior to calling its `Observable.subscribe` method. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item: a List containing all of the items emitted by * the source Observable. */ def toSeq: Observable[Seq[T]] = { Observable.jObsOfListToScObsOfSeq(asJavaObservable.toList) : Observable[Seq[T]] // SI-7818 } /** * Groups the items emitted by this Observable according to a specified discriminator function. * * @param f * a function that extracts the key from an item * @tparam K * the type of keys returned by the discriminator function. * @return an Observable that emits `(key, observable)` pairs, where `observable` * contains all items for which `f` returned `key`. */ def groupBy[K](f: T => K): Observable[(K, Observable[T])] = { val o1 = asJavaObservable.groupBy[K](f) : rx.Observable[_ <: rx.observables.GroupedObservable[K, _ <: T]] val func = (o: rx.observables.GroupedObservable[K, _ <: T]) => (o.getKey, toScalaObservable[T](o)) toScalaObservable[(K, Observable[T])](o1.map[(K, Observable[T])](func)) } /** * Groups the items emitted by this Observable according to a specified discriminator function and terminates these groups * according to a function. * * @param f * a function that extracts the key from an item * @param closings * the function that accepts the key of a given group and an observable representing that group, and returns * an observable that emits a single Closing when the group should be closed. * @tparam K * the type of the keys returned by the discriminator function. * @return an Observable that emits `(key, observable)` pairs, where `observable` * contains all items for which `f` returned `key` before `closings` emits a value. */ def groupByUntil[K](f: T => K, closings: (K, Observable[T])=>Observable[Any]): Observable[(K, Observable[T])] = { val fclosing: Func1[_ >: rx.observables.GroupedObservable[K, _ <: T], _ <: rx.Observable[_ <: Any]] = (jGrObs: rx.observables.GroupedObservable[K, _ <: T]) => closings(jGrObs.getKey, toScalaObservable[T](jGrObs)).asJavaObservable val o1 = asJavaObservable.groupByUntil[K, Any](f, fclosing) : rx.Observable[_ <: rx.observables.GroupedObservable[K, _ <: T]] val func = (o: rx.observables.GroupedObservable[K, _ <: T]) => (o.getKey, toScalaObservable[T](o)) toScalaObservable[(K, Observable[T])](o1.map[(K, Observable[T])](func)) } /** * Groups the items emitted by an [[Observable]] (transformed by a selector) according to a specified key selector function * until the duration Observable expires for the key. * * <img width="640" height="375" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/groupByUntil.png"> * * <em>Note:</em> The `Observable` in the pair `(K, Observable[V])` will cache the items it is to emit until such time as it * is subscribed to. For this reason, in order to avoid memory leaks, you should not simply ignore those `Observable` that * do not concern you. Instead, you can signal to them that they may discard their buffers by applying an operator like `take(0)` to them. * * @param keySelector a function to extract the key for each item * @param valueSelector a function to map each item emitted by the source [[Observable]] to an item emitted by one * of the resulting `Observable[V]`s * @param closings a function to signal the expiration of a group * @return an [[Observable]] that emits pairs of key and `Observable[V]`, each of which corresponds to a key * value and each of which emits all items emitted by the source [[Observable]] during that * key's duration that share that same key value, transformed by the value selector */ def groupByUntil[K, V](keySelector: T => K, valueSelector: T => V, closings: (K, Observable[V]) => Observable[Any]): Observable[(K, Observable[V])] = { val jKeySelector: Func1[_ >: T, _ <: K] = keySelector val jValueSelector: Func1[_ >: T, _ <: V] = valueSelector val jDurationSelector = new Func1[rx.observables.GroupedObservable[_ <: K, _ <: V], rx.Observable[_ <: Any]] { override def call(jgo: rx.observables.GroupedObservable[_ <: K, _ <: V]): rx.Observable[_ <: Any] = closings(jgo.getKey, toScalaObservable[V](jgo)) } val f = (o: rx.observables.GroupedObservable[K, _ <: V]) => (o.getKey, toScalaObservable[V](o)) val jo = asJavaObservable.groupByUntil[K, V, Any](jKeySelector, jValueSelector, jDurationSelector).map[(K, Observable[V])](f) toScalaObservable[(K, Observable[V])](jo) } /** * Correlates the items emitted by two Observables based on overlapping durations. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/join_.png"> * * @param other * the second Observable to join items from * @param leftDurationSelector * a function to select a duration for each item emitted by the source Observable, * used to determine overlap * @param rightDurationSelector * a function to select a duration for each item emitted by the inner Observable, * used to determine overlap * @param resultSelector * a function that computes an item to be emitted by the resulting Observable for any * two overlapping items emitted by the two Observables * @return * an Observable that emits items correlating to items emitted by the source Observables * that have overlapping durations * @see <a href="https://github.com/Netflix/RxJava/wiki/Combining-Observables#join">RxJava Wiki: join()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229750.aspx">MSDN: Observable.Join</a> */ def join[S, R] ( other: Observable[S], leftDurationSelector: T => Observable[Any], rightDurationSelector: S => Observable[Any], resultSelector: (T,S) => R ): Observable[R] = { val outer : rx.Observable[_ <: T] = this.asJavaObservable val inner : rx.Observable[_ <: S] = other.asJavaObservable val left: Func1[_ >: T, _<: rx.Observable[_ <: Any]] = (t: T) => leftDurationSelector(t).asJavaObservable val right: Func1[_ >: S, _<: rx.Observable[_ <: Any]] = (s: S) => rightDurationSelector(s).asJavaObservable val f: Func2[_>: T, _ >: S, _ <: R] = resultSelector toScalaObservable[R]( outer.asInstanceOf[rx.Observable[T]].join[S, Any, Any, R]( inner.asInstanceOf[rx.Observable[S]], left. asInstanceOf[Func1[T, rx.Observable[Any]]], right.asInstanceOf[Func1[S, rx.Observable[Any]]], f.asInstanceOf[Func2[T,S,R]]) ) } /** * Returns an Observable that correlates two Observables when they overlap in time and groups the results. * * <img width="640" height="380" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/groupJoin.png"> * * @param other the other Observable to correlate items from the source Observable with * @param leftDuration a function that returns an Observable whose emissions indicate the duration of the values of * the source Observable * @param rightDuration a function that returns an Observable whose emissions indicate the duration of the values of * the `other` Observable * @param resultSelector a function that takes an item emitted by each Observable and returns the value to be emitted * by the resulting Observable * @return an Observable that emits items based on combining those items emitted by the source Observables * whose durations overlap */ def groupJoin[S, R](other: Observable[S], leftDuration: T => Observable[Any], rightDuration: S => Observable[Any], resultSelector: (T, Observable[S]) => R): Observable[R] = { val outer: rx.Observable[_ <: T] = this.asJavaObservable val inner: rx.Observable[_ <: S] = other.asJavaObservable val left: Func1[_ >: T, _ <: rx.Observable[_ <: Any]] = (t: T) => leftDuration(t).asJavaObservable val right: Func1[_ >: S, _ <: rx.Observable[_ <: Any]] = (s: S) => rightDuration(s).asJavaObservable val f: Func2[_ >: T, _ >: rx.Observable[S], _ <: R] = (t: T, o: rx.Observable[S]) => resultSelector(t, toScalaObservable[S](o)) toScalaObservable[R]( outer.asInstanceOf[rx.Observable[T]].groupJoin[S, Any, Any, R]( inner.asInstanceOf[rx.Observable[S]], left.asInstanceOf[Func1[T, rx.Observable[Any]]], right.asInstanceOf[Func1[S, rx.Observable[Any]]], f) ) } /** * Returns a new Observable by applying a function that you supply to each item emitted by the source * Observable that returns an Observable, and then emitting the items emitted by the most recently emitted * of these Observables. * * <img width="640" height="350" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/switchMap.png"> * * @param f a function that, when applied to an item emitted by the source Observable, returns an Observable * @return an Observable that emits the items emitted by the Observable returned from applying a function to * the most recently emitted item emitted by the source Observable */ def switchMap[R](f: T => Observable[R]): Observable[R] = { toScalaObservable[R](asJavaObservable.switchMap[R](new Func1[T, rx.Observable[_ <: R]] { def call(t: T): rx.Observable[_ <: R] = f(t).asJavaObservable })) } /** * Given an Observable that emits Observables, creates a single Observable that * emits the items emitted by the most recently published of those Observables. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/switchDo.png"> * * This operation is only available if `this` is of type `Observable[Observable[U]]` for some `U`, * otherwise you'll get a compilation error. * * @return an Observable that emits only the items emitted by the most recently published * Observable * * @usecase def switch[U]: Observable[U] * @inheritdoc */ def switch[U](implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[U] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[_ <: U]] = o2.map(_.asJavaObservable) val o4: rx.Observable[_ <: rx.Observable[_ <: U]] = o3.asJavaObservable val o5 = rx.Observable.switchOnNext[U](o4) toScalaObservable[U](o5) } // Naming: We follow C# (switch), not Java (switchOnNext), because Java just had to avoid clash with keyword /** * Flattens two Observables into one Observable, without any transformation. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/merge.png"> * * You can combine items emitted by two Observables so that they act like a single * Observable by using the `merge` method. * * @param that * an Observable to be merged * @return an Observable that emits items from `this` and `that` until * `this` or `that` emits `onError` or `onComplete`. */ def merge[U >: T](that: Observable[U]): Observable[U] = { val thisJava: rx.Observable[_ <: U] = this.asJavaObservable val thatJava: rx.Observable[_ <: U] = that.asJavaObservable toScalaObservable[U](rx.Observable.merge(thisJava, thatJava)) } /** * This behaves like [[rx.lang.scala.Observable.merge]] except that if any of the merged Observables * notify of an error via [[rx.lang.scala.Observer.onError onError]], `mergeDelayError` will * refrain from propagating that error notification until all of the merged Observables have * finished emitting items. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeDelayError.png"> * * Even if multiple merged Observables send `onError` notifications, `mergeDelayError` will only invoke the `onError` method of its * Observers once. * * This method allows an Observer to receive all successfully emitted items from all of the * source Observables without being interrupted by an error notification from one of them. * * @param that * an Observable to be merged * @return an Observable that emits items that are the result of flattening the items emitted by * `this` and `that` */ def mergeDelayError[U >: T](that: Observable[U]): Observable[U] = { toScalaObservable[U](rx.Observable.mergeDelayError[U](this.asJavaObservable, that.asJavaObservable)) } /** * Flattens the sequence of Observables emitted by `this` into one Observable, without any * transformation. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/merge.png"> * * You can combine the items emitted by multiple Observables so that they act like a single * Observable by using this method. * * This operation is only available if `this` is of type `Observable[Observable[U]]` for some `U`, * otherwise you'll get a compilation error. * * @return an Observable that emits items that are the result of flattening the items emitted * by the Observables emitted by `this` * * @usecase def flatten[U]: Observable[U] * @inheritdoc */ def flatten[U](implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[U] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[_ <: U]] = o2.map(_.asJavaObservable) val o4: rx.Observable[_ <: rx.Observable[_ <: U]] = o3.asJavaObservable val o5 = rx.Observable.merge[U](o4) toScalaObservable[U](o5) } /** * Flattens an Observable that emits Observables into a single Observable that emits the items emitted by * those Observables, without any transformation, while limiting the maximum number of concurrent * subscriptions to these Observables. * * <img width="640" height="370" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/merge.oo.png"> * * You can combine the items emitted by multiple Observables so that they appear as a single Observable, by * using the `flatten` method. * * @param maxConcurrent the maximum number of Observables that may be subscribed to concurrently * @return an Observable that emits items that are the result of flattening the Observables emitted by the `source` Observable * @throws IllegalArgumentException if `maxConcurrent` is less than or equal to 0 */ def flatten[U](maxConcurrent: Int)(implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[U] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[_ <: U]] = o2.map(_.asJavaObservable) val o4: rx.Observable[_ <: rx.Observable[_ <: U]] = o3.asJavaObservable val o5 = rx.Observable.merge[U](o4, maxConcurrent) toScalaObservable[U](o5) } /** * This behaves like `flatten` except that if any of the merged Observables * notify of an error via [[rx.lang.scala.Observer.onError onError]], this method will * refrain from propagating that error notification until all of the merged Observables have * finished emitting items. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/mergeDelayError.png"> * * Even if multiple merged Observables send `onError` notifications, this method will only invoke the `onError` method of its * Observers once. * * This method allows an Observer to receive all successfully emitted items from all of the * source Observables without being interrupted by an error notification from one of them. * * This operation is only available if `this` is of type `Observable[Observable[U]]` for some `U`, * otherwise you'll get a compilation error. * * @return an Observable that emits items that are the result of flattening the items emitted by * the Observables emitted by the this Observable * * @usecase def flattenDelayError[U]: Observable[U] * @inheritdoc */ def flattenDelayError[U](implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[U] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[_ <: U]] = o2.map(_.asJavaObservable) val o4: rx.Observable[_ <: rx.Observable[_ <: U]] = o3.asJavaObservable val o5 = rx.Observable.mergeDelayError[U](o4) toScalaObservable[U](o5) } /** * Combines two observables, emitting a pair of the latest values of each of * the source observables each time an event is received from one of the source observables, where the * aggregation is defined by the given function. * * @param that * The second source observable. * @return An Observable that combines the source Observables */ def combineLatest[U](that: Observable[U]): Observable[(T, U)] = { val f: Func2[_ >: T, _ >: U, _ <: (T, U)] = (t: T, u: U) => (t, u) toScalaObservable[(T, U)](rx.Observable.combineLatest[T, U, (T, U)](this.asJavaObservable, that.asJavaObservable, f)) } /** * Combines two observables, emitting some type `R` specified in the function f, * each time an event is received from one of the source observables, where the aggregation * is defined by the given function. * *@param that * The second source observable. *@param f The function that is used combine the emissions of the two observables. *@return An Observable that combines the source Observables according to the function f. */ def combineLatest[U,R](that: Observable[U], f: (T, U) => R): Observable[R] = { toScalaObservable[R](rx.Observable.combineLatest[T, U, R](this.asJavaObservable, that.asJavaObservable, f)) } /** * Debounces by dropping all values that are followed by newer values before the timeout value expires. The timer resets on each `onNext` call. * * NOTE: If events keep firing faster than the timeout then no data will be emitted. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleWithTimeout.png"> * * $debounceVsThrottle * * @param timeout * The time each value has to be 'the most recent' of the [[rx.lang.scala.Observable]] to ensure that it's not dropped. * * @return An [[rx.lang.scala.Observable]] which filters out values which are too quickly followed up with newer values. * @see `Observable.debounce` */ def throttleWithTimeout(timeout: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleWithTimeout(timeout.length, timeout.unit)) } /** * Return an Observable that mirrors the source Observable, except that it drops items emitted by the source * Observable that are followed by another item within a computed debounce duration. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/debounce.f.png"> * * @param debounceSelector function to retrieve a sequence that indicates the throttle duration for each item * @return an Observable that omits items emitted by the source Observable that are followed by another item * within a computed debounce duration */ def debounce(debounceSelector: T => Observable[Any]): Observable[T] = { val fJava = new rx.functions.Func1[T, rx.Observable[Any]] { override def call(t: T) = debounceSelector(t).asJavaObservable.asInstanceOf[rx.Observable[Any]] } toScalaObservable[T](asJavaObservable.debounce[Any](fJava)) } /** * Debounces by dropping all values that are followed by newer values before the timeout value expires. The timer resets on each `onNext` call. * * NOTE: If events keep firing faster than the timeout then no data will be emitted. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/debounce.png"> * * $debounceVsThrottle * * @param timeout * The time each value has to be 'the most recent' of the [[rx.lang.scala.Observable]] to ensure that it's not dropped. * * @return An [[rx.lang.scala.Observable]] which filters out values which are too quickly followed up with newer values. * @see `Observable.throttleWithTimeout` */ def debounce(timeout: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.debounce(timeout.length, timeout.unit)) } /** * Debounces by dropping all values that are followed by newer values before the timeout value expires. The timer resets on each `onNext` call. * * NOTE: If events keep firing faster than the timeout then no data will be emitted. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/debounce.png"> * * $debounceVsThrottle * * @param timeout * The time each value has to be 'the most recent' of the [[rx.lang.scala.Observable]] to ensure that it's not dropped. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use internally to manage the timers which handle timeout for each event. * @return Observable which performs the throttle operation. * @see `Observable.throttleWithTimeout` */ def debounce(timeout: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.debounce(timeout.length, timeout.unit, scheduler)) } /** * Debounces by dropping all values that are followed by newer values before the timeout value expires. The timer resets on each `onNext` call. * * NOTE: If events keep firing faster than the timeout then no data will be emitted. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleWithTimeout.png"> * * @param timeout * The time each value has to be 'the most recent' of the [[rx.lang.scala.Observable]] to ensure that it's not dropped. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use internally to manage the timers which handle timeout for each event. * @return Observable which performs the throttle operation. * @see `Observable.debounce` */ def throttleWithTimeout(timeout: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleWithTimeout(timeout.length, timeout.unit, scheduler)) } /** * Throttles by skipping value until `skipDuration` passes and then emits the next received value. * * This differs from `Observable.throttleLast` in that this only tracks passage of time whereas `Observable.throttleLast` ticks at scheduled intervals. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleFirst.png"> * * @param skipDuration * Time to wait before sending another value after emitting last value. * @param scheduler * The [[rx.lang.scala.Scheduler]] to use internally to manage the timers which handle timeout for each event. * @return Observable which performs the throttle operation. */ def throttleFirst(skipDuration: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleFirst(skipDuration.length, skipDuration.unit, scheduler)) } /** * Throttles by skipping value until `skipDuration` passes and then emits the next received value. * * This differs from `Observable.throttleLast` in that this only tracks passage of time whereas `Observable.throttleLast` ticks at scheduled intervals. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleFirst.png"> * * @param skipDuration * Time to wait before sending another value after emitting last value. * @return Observable which performs the throttle operation. */ def throttleFirst(skipDuration: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleFirst(skipDuration.length, skipDuration.unit)) } /** * Throttles by returning the last value of each interval defined by 'intervalDuration'. * * This differs from `Observable.throttleFirst` in that this ticks along at a scheduled interval whereas `Observable.throttleFirst` does not tick, it just tracks passage of time. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleLast.png"> * * @param intervalDuration * Duration of windows within with the last value will be chosen. * @return Observable which performs the throttle operation. */ def throttleLast(intervalDuration: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleLast(intervalDuration.length, intervalDuration.unit)) } /** * Throttles by returning the last value of each interval defined by 'intervalDuration'. * * This differs from `Observable.throttleFirst` in that this ticks along at a scheduled interval whereas `Observable.throttleFirst` does not tick, it just tracks passage of time. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/throttleLast.png"> * * @param intervalDuration * Duration of windows within with the last value will be chosen. * @return Observable which performs the throttle operation. */ def throttleLast(intervalDuration: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.throttleLast(intervalDuration.length, intervalDuration.unit, scheduler)) } /** * Applies a timeout policy for each item emitted by the Observable, using * the specified scheduler to run timeout timers. If the next item isn't * observed within the specified timeout duration starting from its * predecessor, observers are notified of a `TimeoutException`. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout.1.png"> * * @param timeout maximum duration between items before a timeout occurs * @return the source Observable modified to notify observers of a * `TimeoutException` in case of a timeout */ def timeout(timeout: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.timeout(timeout.length, timeout.unit)) } /** * Applies a timeout policy for each item emitted by the Observable, using * the specified scheduler to run timeout timers. If the next item isn't * observed within the specified timeout duration starting from its * predecessor, a specified fallback Observable produces future items and * notifications from that point on. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout.2.png"> * * @param timeout maximum duration between items before a timeout occurs * @param other fallback Observable to use in case of a timeout * @return the source Observable modified to switch to the fallback * Observable in case of a timeout */ def timeout[U >: T](timeout: Duration, other: Observable[U]): Observable[U] = { val otherJava: rx.Observable[_ <: U] = other.asJavaObservable val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable[U](thisJava.timeout(timeout.length, timeout.unit, otherJava)) } /** * Applies a timeout policy for each item emitted by the Observable, using * the specified scheduler to run timeout timers. If the next item isn't * observed within the specified timeout duration starting from its * predecessor, the observer is notified of a `TimeoutException`. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout.1s.png"> * * @param timeout maximum duration between items before a timeout occurs * @param scheduler Scheduler to run the timeout timers on * @return the source Observable modified to notify observers of a * `TimeoutException` in case of a timeout */ def timeout(timeout: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.timeout(timeout.length, timeout.unit, scheduler.asJavaScheduler)) } /** * Applies a timeout policy for each item emitted by the Observable, using * the specified scheduler to run timeout timers. If the next item isn't * observed within the specified timeout duration starting from its * predecessor, a specified fallback Observable sequence produces future * items and notifications from that point on. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout.2s.png"> * * @param timeout maximum duration between items before a timeout occurs * @param other Observable to use as the fallback in case of a timeout * @param scheduler Scheduler to run the timeout timers on * @return the source Observable modified so that it will switch to the * fallback Observable in case of a timeout */ def timeout[U >: T](timeout: Duration, other: Observable[U], scheduler: Scheduler): Observable[U] = { val otherJava: rx.Observable[_ <: U] = other.asJavaObservable val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable[U](thisJava.timeout(timeout.length, timeout.unit, otherJava, scheduler.asJavaScheduler)) } /** * Returns an Observable that mirrors the source Observable, but emits a TimeoutException if an item emitted by * the source Observable doesn't arrive within a window of time after the emission of the * previous item, where that period of time is measured by an Observable that is a function * of the previous item. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout3.png"> * </p> * Note: The arrival of the first source item is never timed out. * * @param timeoutSelector * a function that returns an observable for each item emitted by the source * Observable and that determines the timeout window for the subsequent item * @return an Observable that mirrors the source Observable, but emits a TimeoutException if a item emitted by * the source Observable takes longer to arrive than the time window defined by the * selector for the previously emitted item */ def timeout[V](timeoutSelector: T => Observable[V]): Observable[T] = { toScalaObservable[T](asJavaObservable.timeout({ t: T => timeoutSelector(t).asJavaObservable.asInstanceOf[rx.Observable[V]] })) } /** * Returns an Observable that mirrors the source Observable, but that switches to a fallback * Observable if an item emitted by the source Observable doesn't arrive within a window of time * after the emission of the previous item, where that period of time is measured by an * Observable that is a function of the previous item. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout4.png"> * </p> * Note: The arrival of the first source item is never timed out. * * @param timeoutSelector * a function that returns an observable for each item emitted by the source * Observable and that determines the timeout window for the subsequent item * @param other * the fallback Observable to switch to if the source Observable times out * @return an Observable that mirrors the source Observable, but switches to mirroring a * fallback Observable if a item emitted by the source Observable takes longer to arrive * than the time window defined by the selector for the previously emitted item */ def timeout[V, O >: T](timeoutSelector: T => Observable[V], other: Observable[O]): Observable[O] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[O]] toScalaObservable[O](thisJava.timeout( { t: O => timeoutSelector(t.asInstanceOf[T]).asJavaObservable.asInstanceOf[rx.Observable[V]] }, other.asJavaObservable)) } /** * Returns an Observable that mirrors the source Observable, but emits a TimeoutException * if either the first item emitted by the source Observable or any subsequent item * don't arrive within time windows defined by other Observables. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout5.png"> * </p> * @param firstTimeoutSelector * a function that returns an Observable that determines the timeout window for the * first source item * @param timeoutSelector * a function that returns an Observable for each item emitted by the source * Observable and that determines the timeout window in which the subsequent source * item must arrive in order to continue the sequence * @return an Observable that mirrors the source Observable, but emits a TimeoutException if either the first item or any subsequent item doesn't * arrive within the time windows specified by the timeout selectors */ def timeout[U, V](firstTimeoutSelector: () => Observable[U], timeoutSelector: T => Observable[V]): Observable[T] = { toScalaObservable[T](asJavaObservable.timeout( { firstTimeoutSelector().asJavaObservable.asInstanceOf[rx.Observable[U]] }, { t: T => timeoutSelector(t).asJavaObservable.asInstanceOf[rx.Observable[V]] })) } /** * Returns an Observable that mirrors the source Observable, but switches to a fallback * Observable if either the first item emitted by the source Observable or any subsequent item * don't arrive within time windows defined by other Observables. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeout6.png"> * </p> * @param firstTimeoutSelector * a function that returns an Observable which determines the timeout window for the * first source item * @param timeoutSelector * a function that returns an Observable for each item emitted by the source * Observable and that determines the timeout window in which the subsequent source * item must arrive in order to continue the sequence * @param other * the fallback Observable to switch to if the source Observable times out * @return an Observable that mirrors the source Observable, but switches to the {@code other} Observable if either the first item emitted by the source Observable or any * subsequent item don't arrive within time windows defined by the timeout selectors */ def timeout[U, V, O >: T](firstTimeoutSelector: () => Observable[U], timeoutSelector: T => Observable[V], other: Observable[O]): Observable[O] = { val thisJava = this.asJavaObservable.asInstanceOf[rx.Observable[O]] toScalaObservable[O](thisJava.timeout( { firstTimeoutSelector().asJavaObservable.asInstanceOf[rx.Observable[U]] }, { t: O => timeoutSelector(t.asInstanceOf[T]).asJavaObservable.asInstanceOf[rx.Observable[V]] }, other.asJavaObservable)) } /** * Returns an Observable that sums up the elements of this Observable. * * This operation is only available if the elements of this Observable are numbers, otherwise * you will get a compilation error. * * @return an Observable emitting the sum of all the elements of the source Observable * as its single item. * * @usecase def sum: Observable[T] * @inheritdoc */ def sum[U >: T](implicit num: Numeric[U]): Observable[U] = { foldLeft(num.zero)(num.plus) } /** * Returns an Observable that multiplies up the elements of this Observable. * * This operation is only available if the elements of this Observable are numbers, otherwise * you will get a compilation error. * * @return an Observable emitting the product of all the elements of the source Observable * as its single item. * * @usecase def product: Observable[T] * @inheritdoc */ def product[U >: T](implicit num: Numeric[U]): Observable[U] = { foldLeft(num.one)(num.times) } /** * Returns an Observable that emits only the very first item emitted by the source Observable, or * a default value if the source Observable is empty. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/firstOrDefault.png"> * * @param default * The default value to emit if the source Observable doesn't emit anything. * This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything. * @return an Observable that emits only the very first item from the source, or a default value * if the source Observable completes without emitting any item. */ def firstOrElse[U >: T](default: => U): Observable[U] = { take(1).singleOrElse(default) } /** * Returns an Observable that emits only an `Option` with the very first item emitted by the source Observable, * or `None` if the source Observable is empty. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/firstOrDefault.png"> * * @return an Observable that emits only an `Option` with the very first item from the source, or `None` * if the source Observable completes without emitting any item. */ def headOption: Observable[Option[T]] = { take(1).singleOption } /** * Returns an Observable that emits only the very first item emitted by the source Observable, or * a default value if the source Observable is empty. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/firstOrDefault.png"> * * @param default * The default value to emit if the source Observable doesn't emit anything. * This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything. * @return an Observable that emits only the very first item from the source, or a default value * if the source Observable completes without emitting any item. */ def headOrElse[U >: T](default: => U): Observable[U] = firstOrElse(default) /** * Returns an Observable that emits only the very first item emitted by the source Observable, or raises an * `NoSuchElementException` if the source Observable is empty. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/first.png"> * * @return an Observable that emits only the very first item emitted by the source Observable, or raises an * `NoSuchElementException` if the source Observable is empty * @see <a href="https://github.com/Netflix/RxJava/wiki/Filtering-Observables#wiki-first">RxJava Wiki: first()</a> * @see "MSDN: Observable.firstAsync()" */ def first: Observable[T] = { toScalaObservable[T](asJavaObservable.first) } /** * Returns an Observable that emits only the very first item emitted by the source Observable, or raises an * `NoSuchElementException` if the source Observable is empty. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/first.png"> * * @return an Observable that emits only the very first item emitted by the source Observable, or raises an * `NoSuchElementException` if the source Observable is empty * @see <a href="https://github.com/Netflix/RxJava/wiki/Filtering-Observables#wiki-first">RxJava Wiki: first()</a> * @see "MSDN: Observable.firstAsync()" * @see [[Observable.first]] */ def head: Observable[T] = first /** * Returns an Observable that emits all items except the first one, or raises an `UnsupportedOperationException` * if the source Observable is empty. * * @return an Observable that emits all items except the first one, or raises an `UnsupportedOperationException` * if the source Observable is empty. */ def tail: Observable[T] = { lift { (subscriber: Subscriber[T]) => { var isFirst = true Subscriber[T]( subscriber, (v: T) => if(isFirst) isFirst = false else subscriber.onNext(v), e => subscriber.onError(e), () => if(isFirst) subscriber.onError(new UnsupportedOperationException("tail of empty Observable")) else subscriber.onCompleted ) } } } /** * Returns an Observable that emits the last item emitted by the source Observable or notifies observers of * an `NoSuchElementException` if the source Observable is empty. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/last.png"> * * @return an Observable that emits the last item from the source Observable or notifies observers of an * error * @see <a href="https://github.com/Netflix/RxJava/wiki/Filtering-Observable-Operators#wiki-last">RxJava Wiki: last()</a> * @see "MSDN: Observable.lastAsync()" */ def last: Observable[T] = { toScalaObservable[T](asJavaObservable.last) } /** * Returns an Observable that emits only an `Option` with the last item emitted by the source Observable, * or `None` if the source Observable completes without emitting any items. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/lastOrDefault.png"> * * @return an Observable that emits only an `Option` with the last item emitted by the source Observable, * or `None` if the source Observable is empty */ def lastOption: Observable[Option[T]] = { takeRight(1).singleOption } /** * Returns an Observable that emits only the last item emitted by the source Observable, or a default item * if the source Observable completes without emitting any items. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/lastOrDefault.png"> * * @param default the default item to emit if the source Observable is empty. * This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything. * @return an Observable that emits only the last item emitted by the source Observable, or a default item * if the source Observable is empty */ def lastOrElse[U >: T](default: => U): Observable[U] = { takeRight(1).singleOrElse(default) } /** * If the source Observable completes after emitting a single item, return an Observable that emits that * item. If the source Observable emits more than one item or no items, notify of an `IllegalArgumentException` * or `NoSuchElementException` respectively. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/single.png"> * * @return an Observable that emits the single item emitted by the source Observable * @throws IllegalArgumentException if the source emits more than one item * @throws NoSuchElementException if the source emits no items * @see <a href="https://github.com/Netflix/RxJava/wiki/Observable-Utility-Operators#wiki-single-and-singleordefault">RxJava Wiki: single()</a> * @see "MSDN: Observable.singleAsync()" */ def single: Observable[T] = { toScalaObservable[T](asJavaObservable.single) } /** * If the source Observable completes after emitting a single item, return an Observable that emits an `Option` * with that item; if the source Observable is empty, return an Observable that emits `None`. * If the source Observable emits more than one item, throw an `IllegalArgumentException`. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/singleOrDefault.png"> * * @return an Observable that emits an `Option` with the single item emitted by the source Observable, or * `None` if the source Observable is empty * @throws IllegalArgumentException if the source Observable emits more than one item */ def singleOption: Observable[Option[T]] = { val jObservableOption = map(Some(_)).asJavaObservable.asInstanceOf[rx.Observable[Option[T]]] toScalaObservable[Option[T]](jObservableOption.singleOrDefault(None)) } /** * If the source Observable completes after emitting a single item, return an Observable that emits that * item; if the source Observable is empty, return an Observable that emits a default item. If the source * Observable emits more than one item, throw an `IllegalArgumentException`. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/singleOrDefault.png"> * * @param default a default value to emit if the source Observable emits no item. * This is a by-name parameter, so it is only evaluated if the source Observable doesn't emit anything. * @return an Observable that emits the single item emitted by the source Observable, or a default item if * the source Observable is empty * @throws IllegalArgumentException if the source Observable emits more than one item */ def singleOrElse[U >: T](default: => U): Observable[U] = { singleOption.map { case Some(element) => element case None => default } } /** * Returns an Observable that emits the items emitted by the source Observable or a specified default item * if the source Observable is empty. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/defaultIfEmpty.png"> * * @param default the item to emit if the source Observable emits no items. This is a by-name parameter, so it is * only evaluated if the source Observable doesn't emit anything. * @return an Observable that emits either the specified default item if the source Observable emits no * items, or the items emitted by the source Observable */ def orElse[U >: T](default: => U): Observable[U] = { val jObservableOption = map(Some(_)).asJavaObservable.asInstanceOf[rx.Observable[Option[T]]] val o = toScalaObservable[Option[T]](jObservableOption.defaultIfEmpty(None)) o map { case Some(element) => element case None => default } } /** * Returns an Observable that forwards all sequentially distinct items emitted from the source Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/distinctUntilChanged.png"> * * @return an Observable of sequentially distinct items */ def distinctUntilChanged: Observable[T] = { toScalaObservable[T](asJavaObservable.distinctUntilChanged) } /** * Returns an Observable that forwards all items emitted from the source Observable that are sequentially * distinct according to a key selector function. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/distinctUntilChanged.key.png"> * * @param keySelector * a function that projects an emitted item to a key value which is used for deciding whether an item is sequentially * distinct from another one or not * @return an Observable of sequentially distinct items */ def distinctUntilChanged[U](keySelector: T => U): Observable[T] = { toScalaObservable[T](asJavaObservable.distinctUntilChanged[U](keySelector)) } /** * Returns an Observable that forwards all distinct items emitted from the source Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/distinct.png"> * * @return an Observable of distinct items */ def distinct: Observable[T] = { toScalaObservable[T](asJavaObservable.distinct()) } /** * Returns an Observable that forwards all items emitted from the source Observable that are distinct according * to a key selector function. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/distinct.key.png"> * * @param keySelector * a function that projects an emitted item to a key value which is used for deciding whether an item is * distinct from another one or not * @return an Observable of distinct items */ def distinct[U](keySelector: T => U): Observable[T] = { toScalaObservable[T](asJavaObservable.distinct[U](keySelector)) } /** * Returns an Observable that counts the total number of elements in the source Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/count.png"> * * @return an Observable emitting the number of counted elements of the source Observable * as its single item. */ def length: Observable[Int] = { toScalaObservable[Integer](asJavaObservable.count()).map(_.intValue()) } /** * Returns an Observable that counts the total number of elements in the source Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/count.png"> * * @return an Observable emitting the number of counted elements of the source Observable * as its single item. */ def size: Observable[Int] = length /** * Retry subscription to origin Observable upto given retry count. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/retry.png"> * * If [[rx.lang.scala.Observer.onError]] is invoked the source Observable will be re-subscribed to as many times as defined by retryCount. * * Any [[rx.lang.scala.Observer.onNext]] calls received on each attempt will be emitted and concatenated together. * * For example, if an Observable fails on first time but emits [1, 2] then succeeds the second time and * emits [1, 2, 3, 4, 5] then the complete output would be [1, 2, 1, 2, 3, 4, 5, onCompleted]. * * @param retryCount * Number of retry attempts before failing. * @return Observable with retry logic. */ def retry(retryCount: Int): Observable[T] = { toScalaObservable[T](asJavaObservable.retry(retryCount)) } /** * Retry subscription to origin Observable whenever onError is called (infinite retry count). * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/retry.png"> * * If [[rx.lang.scala.Observer.onError]] is invoked the source Observable will be re-subscribed to. * * Any [[rx.lang.scala.Observer.onNext]] calls received on each attempt will be emitted and concatenated together. * * For example, if an Observable fails on first time but emits [1, 2] then succeeds the second time and * emits [1, 2, 3, 4, 5] then the complete output would be [1, 2, 1, 2, 3, 4, 5, onCompleted]. * @return Observable with retry logic. */ def retry: Observable[T] = { toScalaObservable[T](asJavaObservable.retry()) } /** * Returns an Observable that mirrors the source Observable, resubscribing to it if it calls `onError` * and the predicate returns true for that specific exception and retry count. * * <img width="640" height="315" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/retry.png"> * * @param predicate the predicate that determines if a resubscription may happen in case of a specific exception and retry count * @return the source Observable modified with retry logic */ def retry(predicate: (Int, Throwable) => Boolean): Observable[T] = { val f = new Func2[java.lang.Integer, Throwable, java.lang.Boolean] { def call(times: java.lang.Integer, e: Throwable): java.lang.Boolean = predicate(times, e) } toScalaObservable[T](asJavaObservable.retry(f)) } /** * Returns an Observable that repeats the sequence of items emitted by the source Observable indefinitely. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/repeat.o.png"> * * @return an Observable that emits the items emitted by the source Observable repeatedly and in sequence * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-repeat">RxJava Wiki: repeat()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229428.aspx">MSDN: Observable.Repeat</a> */ def repeat: Observable[T] = { toScalaObservable[T](asJavaObservable.repeat()) } /** * Returns an Observable that repeats the sequence of items emitted by the source Observable indefinitely, * on a particular Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/repeat.os.png"> * * @param scheduler the Scheduler to emit the items on * @return an Observable that emits the items emitted by the source Observable repeatedly and in sequence * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-repeat">RxJava Wiki: repeat()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229428.aspx">MSDN: Observable.Repeat</a> */ def repeat(scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.repeat(scheduler)) } /** * Returns an Observable that repeats the sequence of items emitted by the source Observable at most `count` times. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/repeat.on.png"> * * @param count the number of times the source Observable items are repeated, * a count of 0 will yield an empty sequence * @return an Observable that repeats the sequence of items emitted by the source Observable at most `count` times * @throws IllegalArgumentException if `count` is less than zero * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-repeat">RxJava Wiki: repeat()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229428.aspx">MSDN: Observable.Repeat</a> */ def repeat(count: Long): Observable[T] = { toScalaObservable[T](asJavaObservable.repeat(count)) } /** * Returns an Observable that repeats the sequence of items emitted by the source Observable * at most `count` times, on a particular Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/repeat.ons.png"> * * @param count the number of times the source Observable items are repeated, * a count of 0 will yield an empty sequence * @param scheduler the `Scheduler` to emit the items on * @return an Observable that repeats the sequence of items emitted by the source Observable at most `count` times * on a particular Scheduler * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#wiki-repeat">RxJava Wiki: repeat()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229428.aspx">MSDN: Observable.Repeat</a> */ def repeat(count: Long, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.repeat(count, scheduler)) } /** * Converts an Observable into a [[BlockingObservable]] (an Observable with blocking operators). * * @return a [[BlockingObservable]] version of this Observable * @see <a href="https://github.com/Netflix/RxJava/wiki/Blocking-Observable-Operators">Blocking Observable Operators</a> */ @deprecated("Use `toBlocking` instead", "0.19") def toBlockingObservable: BlockingObservable[T] = { new BlockingObservable[T](this) } /** * Converts an Observable into a [[BlockingObservable]] (an Observable with blocking * operators). * * @return a [[BlockingObservable]] version of this Observable * @see <a href="https://github.com/Netflix/RxJava/wiki/Blocking-Observable-Operators">Blocking Observable Operators</a> * @since 0.19 */ def toBlocking: BlockingObservable[T] = { new BlockingObservable[T](this) } /** * Perform work in parallel by sharding an `Observable[T]` on a * [[rx.lang.scala.schedulers.ComputationScheduler]] and return an `Observable[R]` with the output. * * @param f * a function that applies Observable operators to `Observable[T]` in parallel and returns an `Observable[R]` * @return an Observable with the output of the function executed on a [[rx.lang.scala.Scheduler]] */ def parallel[R](f: Observable[T] => Observable[R]): Observable[R] = { val fJava: Func1[rx.Observable[T], rx.Observable[R]] = (jo: rx.Observable[T]) => f(toScalaObservable[T](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable(asJavaObservable.asInstanceOf[rx.Observable[T]].parallel[R](fJava)) } /** * Perform work in parallel by sharding an `Observable[T]` on a [[rx.lang.scala.Scheduler]] and return an `Observable[R]` with the output. * * @param f * a function that applies Observable operators to `Observable[T]` in parallel and returns an `Observable[R]` * @param scheduler * a [[rx.lang.scala.Scheduler]] to perform the work on. * @return an Observable with the output of the function executed on a [[rx.lang.scala.Scheduler]] */ def parallel[R](f: Observable[T] => Observable[R], scheduler: Scheduler): Observable[R] = { val fJava: Func1[rx.Observable[T], rx.Observable[R]] = (jo: rx.Observable[T]) => f(toScalaObservable[T](jo)).asJavaObservable.asInstanceOf[rx.Observable[R]] toScalaObservable(asJavaObservable.asInstanceOf[rx.Observable[T]].parallel[R](fJava, scheduler)) } /** * Converts an `Observable[Observable[T]]` into another `Observable[Observable[T]]` whose * emitted Observables emit the same items, but the number of such Observables is restricted by `parallelObservables`. * * For example, if the original `Observable[Observable[T]]` emits 100 Observables and `parallelObservables` is 8, * the items emitted by the 100 original Observables will be distributed among 8 Observables emitted by the resulting Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/parallelMerge.png"> * * This is a mechanism for efficiently processing `n` number of Observables on a smaller `m` number of resources (typically CPU cores). * * @param parallelObservables the number of Observables to merge into * @return an Observable of Observables constrained in number by `parallelObservables` */ def parallelMerge[U](parallelObservables: Int)(implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[Observable[U]] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[U]] = o2.map(_.asJavaObservable.asInstanceOf[rx.Observable[U]]) val o4: rx.Observable[rx.Observable[U]] = o3.asJavaObservable.asInstanceOf[rx.Observable[rx.Observable[U]]] val o5: rx.Observable[rx.Observable[U]] = rx.Observable.parallelMerge[U](o4, parallelObservables) toScalaObservable(o5).map(toScalaObservable[U](_)) } /** * Converts an `Observable[Observable[T]]` into another `Observable[Observable[T]]` whose * emitted Observables emit the same items, but the number of such Observables is restricted by `parallelObservables`, * and each runs on a defined Scheduler. * * For example, if the original Observable[Observable[T]]` emits 100 Observables and `parallelObservables` is 8, * the items emitted by the 100 original Observables will be distributed among 8 Observables emitted by the resulting Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/parallelMerge.png"> * * This is a mechanism for efficiently processing n` number of Observables on a smaller `m` * number of resources (typically CPU cores). * * @param parallelObservables the number of Observables to merge into * @param scheduler the [[Scheduler]] to run each Observable on * @return an Observable of Observables constrained in number by `parallelObservables` */ def parallelMerge[U](parallelObservables: Int, scheduler: Scheduler)(implicit evidence: Observable[T] <:< Observable[Observable[U]]): Observable[Observable[U]] = { val o2: Observable[Observable[U]] = this val o3: Observable[rx.Observable[U]] = o2.map(_.asJavaObservable.asInstanceOf[rx.Observable[U]]) val o4: rx.Observable[rx.Observable[U]] = o3.asJavaObservable.asInstanceOf[rx.Observable[rx.Observable[U]]] val o5: rx.Observable[rx.Observable[U]] = rx.Observable.parallelMerge[U](o4, parallelObservables, scheduler) toScalaObservable(o5).map(toScalaObservable[U](_)) } /** Tests whether a predicate holds for some of the elements of this `Observable`. * * @param p the predicate used to test elements. * @return an Observable emitting one single Boolean, which is `true` if the given predicate `p` * holds for some of the elements of this Observable, and `false` otherwise. */ def exists(p: T => Boolean): Observable[Boolean] = { toScalaObservable[java.lang.Boolean](asJavaObservable.exists(p)).map(_.booleanValue()) } /** Tests whether this `Observable` emits no elements. * * @return an Observable emitting one single Boolean, which is `true` if this `Observable` * emits no elements, and `false` otherwise. */ def isEmpty: Observable[Boolean] = { toScalaObservable[java.lang.Boolean](asJavaObservable.isEmpty()).map(_.booleanValue()) } def withFilter(p: T => Boolean): WithFilter[T] = { new WithFilter[T](p, asJavaObservable) } /** * Returns an Observable that applies the given function to each item emitted by an * Observable. * * @param observer the observer * * @return an Observable with the side-effecting behavior applied. */ def doOnEach(observer: Observer[T]): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnEach(observer.asJavaObserver)) } /** * Invokes an action when the source Observable calls <code>onNext</code>. * * @param onNext the action to invoke when the source Observable calls <code>onNext</code> * @return the source Observable with the side-effecting behavior applied */ def doOnNext(onNext: T => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnNext(onNext)) } /** * Invokes an action if the source Observable calls `onError`. * * @param onError the action to invoke if the source Observable calls * `onError` * @return the source Observable with the side-effecting behavior applied */ def doOnError(onError: Throwable => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnError(onError)) } /** * Invokes an action when the source Observable calls `onCompleted`. * * @param onCompleted the action to invoke when the source Observable calls * `onCompleted` * @return the source Observable with the side-effecting behavior applied */ def doOnCompleted(onCompleted: => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnCompleted(() => onCompleted)) } /** * Returns an Observable that applies the given function to each item emitted by an * Observable. * * @param onNext this function will be called whenever the Observable emits an item * * @return an Observable with the side-effecting behavior applied. */ def doOnEach(onNext: T => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnNext(onNext)) } /** * Returns an Observable that applies the given function to each item emitted by an * Observable. * * @param onNext this function will be called whenever the Observable emits an item * @param onError this function will be called if an error occurs * * @return an Observable with the side-effecting behavior applied. */ def doOnEach(onNext: T => Unit, onError: Throwable => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnEach(Observer(onNext, onError, ()=>{}))) } /** * Returns an Observable that applies the given function to each item emitted by an * Observable. * * @param onNext this function will be called whenever the Observable emits an item * @param onError this function will be called if an error occurs * @param onCompleted the action to invoke when the source Observable calls * * @return an Observable with the side-effecting behavior applied. */ def doOnEach(onNext: T => Unit, onError: Throwable => Unit, onCompleted: () => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnEach(Observer(onNext, onError,onCompleted))) } /** * Modifies an Observable so that it invokes an action when it calls `onCompleted` or `onError`. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/doOnTerminate.png"> * <p> * This differs from `finallyDo` in that this happens **before** `onCompleted/onError` are emitted. * * @param onTerminate the action to invoke when the source Observable calls `onCompleted` or `onError` * @return the source Observable with the side-effecting behavior applied * @see <a href="https://github.com/Netflix/RxJava/wiki/Observable-Utility-Operators#wiki-doonterminate">RxJava Wiki: doOnTerminate()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229804.aspx">MSDN: Observable.Do</a> */ def doOnTerminate(onTerminate: => Unit): Observable[T] = { toScalaObservable[T](asJavaObservable.doOnTerminate(() => onTerminate)) } /** * Given two Observables, mirror the one that first emits an item. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/amb.png"> * * @param that * an Observable competing to react first * @return an Observable that emits the same sequence of items as whichever of `this` or `that` first emitted an item. */ def amb[U >: T](that: Observable[U]): Observable[U] = { val thisJava: rx.Observable[_ <: U] = this.asJavaObservable val thatJava: rx.Observable[_ <: U] = that.asJavaObservable toScalaObservable[U](rx.Observable.amb(thisJava, thatJava)) } /** * Returns an Observable that emits the items emitted by the source Observable shifted forward in time by a * specified delay. Error notifications from the source Observable are not delayed. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delay.png"> * * @param delay the delay to shift the source by * @return the source Observable shifted in time by the specified delay */ def delay(delay: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.delay(delay.length, delay.unit)) } /** * Returns an Observable that emits the items emitted by the source Observable shifted forward in time by a * specified delay. Error notifications from the source Observable are not delayed. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delay.s.png"> * * @param delay the delay to shift the source by * @param scheduler the Scheduler to use for delaying * @return the source Observable shifted in time by the specified delay */ def delay(delay: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.delay(delay.length, delay.unit, scheduler)) } /** * Returns an Observable that delays the emissions of the source Observable via another Observable on a * per-item basis. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delay.o.png"> * <p> * Note: the resulting Observable will immediately propagate any `onError` notification * from the source Observable. * * @param itemDelay a function that returns an Observable for each item emitted by the source Observable, which is * then used to delay the emission of that item by the resulting Observable until the Observable * returned from `itemDelay` emits an item * @return an Observable that delays the emissions of the source Observable via another Observable on a per-item basis */ def delay(itemDelay: T => Observable[Any]): Observable[T] = { val itemDelayJava = new Func1[T, rx.Observable[Any]] { override def call(t: T): rx.Observable[Any] = itemDelay(t).asJavaObservable.asInstanceOf[rx.Observable[Any]] } toScalaObservable[T](asJavaObservable.delay[Any](itemDelayJava)) } /** * Returns an Observable that delays the subscription to and emissions from the souce Observable via another * Observable on a per-item basis. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delay.oo.png"> * <p> * Note: the resulting Observable will immediately propagate any `onError` notification * from the source Observable. * * @param subscriptionDelay a function that returns an Observable that triggers the subscription to the source Observable * once it emits any item * @param itemDelay a function that returns an Observable for each item emitted by the source Observable, which is * then used to delay the emission of that item by the resulting Observable until the Observable * returned from `itemDelay` emits an item * @return an Observable that delays the subscription and emissions of the source Observable via another * Observable on a per-item basis */ def delay(subscriptionDelay: () => Observable[Any], itemDelay: T => Observable[Any]): Observable[T] = { val subscriptionDelayJava = new Func0[rx.Observable[Any]] { override def call(): rx.Observable[Any] = subscriptionDelay().asJavaObservable.asInstanceOf[rx.Observable[Any]] } val itemDelayJava = new Func1[T, rx.Observable[Any]] { override def call(t: T): rx.Observable[Any] = itemDelay(t).asJavaObservable.asInstanceOf[rx.Observable[Any]] } toScalaObservable[T](asJavaObservable.delay[Any, Any](subscriptionDelayJava, itemDelayJava)) } /** * Return an Observable that delays the subscription to the source Observable by a given amount of time. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delaySubscription.png"> * * @param delay the time to delay the subscription * @return an Observable that delays the subscription to the source Observable by the given amount */ def delaySubscription(delay: Duration): Observable[T] = { toScalaObservable[T](asJavaObservable.delaySubscription(delay.length, delay.unit)) } /** * Return an Observable that delays the subscription to the source Observable by a given amount of time, * both waiting and subscribing on a given Scheduler. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/delaySubscription.s.png"> * * @param delay the time to delay the subscription * @param scheduler the Scheduler on which the waiting and subscription will happen * @return an Observable that delays the subscription to the source Observable by a given * amount, waiting and subscribing on the given Scheduler */ def delaySubscription(delay: Duration, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](asJavaObservable.delaySubscription(delay.length, delay.unit, scheduler)) } /** * Returns an Observable that emits the single item at a specified index in a sequence of emissions from a * source Observbable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/elementAt.png"> * * @param index * the zero-based index of the item to retrieve * @return an Observable that emits a single item: the item at the specified position in the sequence of * those emitted by the source Observable * @throws IndexOutOfBoundsException * if index is greater than or equal to the number of items emitted by the source * Observable, or index is less than 0 */ def elementAt(index: Int): Observable[T] = { toScalaObservable[T](asJavaObservable.elementAt(index)) } /** * Returns an Observable that emits the item found at a specified index in a sequence of emissions from a * source Observable, or a default item if that index is out of range. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/elementAtOrDefault.png"> * * @param index * the zero-based index of the item to retrieve * @param default * the default item * @return an Observable that emits the item at the specified position in the sequence emitted by the source * Observable, or the default item if that index is outside the bounds of the source sequence * @throws IndexOutOfBoundsException * if {@code index} is less than 0 */ def elementAtOrDefault[U >: T](index: Int, default: U): Observable[U] = { val thisJava = asJavaObservable.asInstanceOf[rx.Observable[U]] toScalaObservable[U](thisJava.elementAtOrDefault(index, default)) } /** * Return an Observable that emits a single Map containing all items emitted by the source Observable, * mapped by the keys returned by a specified {@code keySelector} function. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMap.png"> * <p> * If more than one source item maps to the same key, the Map will contain the latest of those items. * * @param keySelector * the function that extracts the key from a source item to be used in the Map * @return an Observable that emits a single item: a Map containing the mapped items from the source * Observable */ def toMap[K] (keySelector: T => K): Observable[Map[K, T]]= { val thisJava = asJavaObservable.asInstanceOf[rx.Observable[T]] val o: rx.Observable[util.Map[K, T]] = thisJava.toMap[K](keySelector) toScalaObservable[util.Map[K,T]](o).map(m => m.toMap) } /** * Return an Observable that emits a single Map containing values corresponding to items emitted by the * source Observable, mapped by the keys returned by a specified {@code keySelector} function. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMap.png"> * <p> * If more than one source item maps to the same key, the Map will contain a single entry that * corresponds to the latest of those items. * * @param keySelector * the function that extracts the key from a source item to be used in the Map * @param valueSelector * the function that extracts the value from a source item to be used in the Map * @return an Observable that emits a single item: a HashMap containing the mapped items from the source * Observable */ def toMap[K, V] (keySelector: T => K, valueSelector: T => V) : Observable[Map[K, V]] = { val thisJava = asJavaObservable.asInstanceOf[rx.Observable[T]] val o: rx.Observable[util.Map[K, V]] = thisJava.toMap[K, V](keySelector, valueSelector) toScalaObservable[util.Map[K, V]](o).map(m => m.toMap) } /** * Return an Observable that emits a single Map, returned by a specified {@code mapFactory} function, that * contains keys and values extracted from the items emitted by the source Observable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMap.png"> * * @param keySelector * the function that extracts the key from a source item to be used in the Map * @param valueSelector * the function that extracts the value from the source items to be used as value in the Map * @param mapFactory * the function that returns a Map instance to be used * @return an Observable that emits a single item: a Map that contains the mapped items emitted by the * source Observable */ def toMap[K, V] (keySelector: T => K, valueSelector: T => V, mapFactory: () => Map[K, V]): Observable[Map[K, V]] = { val thisJava = asJavaObservable.asInstanceOf[rx.Observable[T]] val o: rx.Observable[util.Map[K, V]] = thisJava.toMap[K, V](keySelector, valueSelector) toScalaObservable[util.Map[K, V]](o).map(m => mapFactory() ++ m.toMap) } /** * Returns an Observable that emits a Boolean value that indicates whether `this` and `that` Observable sequences are the * same by comparing the items emitted by each Observable pairwise. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/sequenceEqual.png"> * * Note: this method uses `==` to compare elements. It's a bit different from RxJava which uses `Object.equals`. * * @param that the Observable to compare * @return an Observable that emits a `Boolean` value that indicates whether the two sequences are the same */ def sequenceEqual[U >: T](that: Observable[U]): Observable[Boolean] = { sequenceEqual(that, (_1: U, _2: U) => _1 == _2) } /** * Returns an Observable that emits a Boolean value that indicates whether `this` and `that` Observable sequences are the * same by comparing the items emitted by each Observable pairwise based on the results of a specified `equality` function. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/sequenceEqual.png"> * * @param that the Observable to compare * @param equality a function used to compare items emitted by each Observable * @return an Observable that emits a `Boolean` value that indicates whether the two sequences are the same based on the `equality` function. */ def sequenceEqual[U >: T](that: Observable[U], equality: (U, U) => Boolean): Observable[Boolean] = { val thisJava: rx.Observable[_ <: U] = this.asJavaObservable val thatJava: rx.Observable[_ <: U] = that.asJavaObservable val equalityJava: Func2[_ >: U, _ >: U, java.lang.Boolean] = equality toScalaObservable[java.lang.Boolean](rx.Observable.sequenceEqual[U](thisJava, thatJava, equalityJava)).map(_.booleanValue) } /** * Returns an Observable that emits records of the time interval between consecutive items emitted by the * source Obsegrvable. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeInterval.png"> * * @return an Observable that emits time interval information items */ def timeInterval: Observable[(Duration, T)] = { toScalaObservable(asJavaObservable.timeInterval()) .map(inv => (Duration(inv.getIntervalInMilliseconds, MILLISECONDS), inv.getValue)) } /** * Returns an Observable that emits records of the time interval between consecutive items emitted by the * source Observable, where this interval is computed on a specified Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timeInterval.s.png"> * * @param scheduler the [[Scheduler]] used to compute time intervals * @return an Observable that emits time interval information items */ def timeInterval(scheduler: Scheduler): Observable[(Duration, T)] = { toScalaObservable(asJavaObservable.timeInterval(scheduler.asJavaScheduler)) .map(inv => (Duration(inv.getIntervalInMilliseconds, MILLISECONDS), inv.getValue)) } /** * Lift a function to the current Observable and return a new Observable that when subscribed to will pass * the values of the current Observable through the function. * <p> * In other words, this allows chaining Observers together on an Observable for acting on the values within * the Observable. * {{{ * observable.map(...).filter(...).take(5).lift(new ObserverA()).lift(new ObserverB(...)).subscribe() * }}} * * @param operator * @return an Observable that emits values that are the result of applying the bind function to the values * of the current Observable */ def lift[R](operator: Subscriber[R] => Subscriber[T]): Observable[R] = { toScalaObservable(asJavaObservable.lift(toJavaOperator[T, R](operator))) } /** * Converts the source `Observable[T]` into an `Observable[Observable[T]]` that emits the source Observable as its single emission. * * <img width="640" height="350" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/nest.png"> * * @return an Observable that emits a single item: the source Observable */ def nest: Observable[Observable[T]] = { toScalaObservable(asJavaObservable.nest).map(toScalaObservable[T](_)) } /** * Subscribes to the [[Observable]] and receives notifications for each element. * * Alias to `subscribe(T => Unit)`. * * @param onNext function to execute for each item. * @throws IllegalArgumentException if `onNext` is null * @since 0.19 */ def foreach(onNext: T => Unit): Unit = { asJavaObservable.subscribe(onNext) } /** * Subscribes to the [[Observable]] and receives notifications for each element and error events. * * Alias to `subscribe(T => Unit, Throwable => Unit)`. * * @param onNext function to execute for each item. * @param onError function to execute when an error is emitted. * @throws IllegalArgumentException if `onNext` is null, or if `onError` is null * @since 0.19 */ def foreach(onNext: T => Unit, onError: Throwable => Unit): Unit = { asJavaObservable.subscribe(onNext, onError) } /** * Subscribes to the [[Observable]] and receives notifications for each element and the terminal events. * * Alias to `subscribe(T => Unit, Throwable => Unit, () => Unit)`. * * @param onNext function to execute for each item. * @param onError function to execute when an error is emitted. * @param onComplete function to execute when completion is signalled. * @throws IllegalArgumentException if `onNext` is null, or if `onError` is null, or if `onComplete` is null * @since 0.19 */ def foreach(onNext: T => Unit, onError: Throwable => Unit, onComplete: () => Unit): Unit = { asJavaObservable.subscribe(onNext, onError, onComplete) } /** * Pivots a sequence of `(K1, Observable[(K2, Observable[U])])`s emitted by an `Observable` so as to swap the group * and and the set on which their items are grouped. * <p> * <img width="640" height="580" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/pivot.png"> * * For example an `Observable` such as `this = Observable[(String, Observable[(Boolean, Observable[Integer])])`: * <ul> * <li>o1.odd: 1, 3, 5, 7, 9 on Thread 1</li> * <li>o1.even: 2, 4, 6, 8, 10 on Thread 1</li> * <li>o2.odd: 11, 13, 15, 17, 19 on Thread 2</li> * <li>o2.even: 12, 14, 16, 18, 20 on Thread 2</li> * </ul> * is pivoted to become `this = Observable[(Boolean, Observable[(String, Observable[Integer])])`: * * <ul> * <li>odd.o1: 1, 3, 5, 7, 9 on Thread 1</li> * <li>odd.o2: 11, 13, 15, 17, 19 on Thread 2</li> * <li>even.o1: 2, 4, 6, 8, 10 on Thread 1</li> * <li>even.o2: 12, 14, 16, 18, 20 on Thread 2</li> * </ul> * <p> * <img width="640" height="1140" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/pivot.ex.png"> * <p> * <em>Note:</em> A `(K, Observable[_])` will cache the items it is to emit until such time as it * is subscribed to. For this reason, in order to avoid memory leaks, you should not simply ignore those * `(K, Observable[_])`s that do not concern you. Instead, you can signal to them that they may * discard their buffers by applying an operator like `take(0)` to them. * * @return an `Observable`containing a stream of nested `(K1, Observable[(K2, Observable[U])])`s with swapped * inner-outer keys. */ def pivot[U, K1, K2](implicit evidence: Observable[T] <:< Observable[(K1, Observable[(K2, Observable[U])])]): Observable[(K2, Observable[(K1, Observable[U])])] = { import rx.observables.{GroupedObservable => JGroupedObservable} val f1 = new Func1[(K1, Observable[(K2, Observable[U])]), JGroupedObservable[K1, JGroupedObservable[K2, U]]]() { override def call(t1: (K1, Observable[(K2, Observable[U])])): JGroupedObservable[K1, JGroupedObservable[K2, U]] = { val jo = t1._2.asJavaObservable.asInstanceOf[rx.Observable[(K2, Observable[U])]].map[JGroupedObservable[K2, U]](new Func1[(K2, Observable[U]), JGroupedObservable[K2, U]]() { override def call(t2: (K2, Observable[U])): JGroupedObservable[K2, U] = { JGroupedObservable.from(t2._1, t2._2.asJavaObservable.asInstanceOf[rx.Observable[U]]) } }) JGroupedObservable.from(t1._1, jo) } } val o1: Observable[(K1, Observable[(K2, Observable[U])])] = this val o2 = toScalaObservable[JGroupedObservable[K2, JGroupedObservable[K1, U]]](rx.Observable.pivot(o1.asJavaObservable.map(f1))) o2.map { (jgo1: JGroupedObservable[K2, JGroupedObservable[K1, U]]) => { val jo = jgo1.map[(K1, Observable[U])](new Func1[JGroupedObservable[K1, U], (K1, Observable[U])]() { override def call(jgo2: JGroupedObservable[K1, U]): (K1, Observable[U]) = (jgo2.getKey, toScalaObservable[U](jgo2)) }) (jgo1.getKey, toScalaObservable[(K1, Observable[U])](jo)) } } } /** * Returns an Observable that counts the total number of items emitted by the source Observable and emits this count as a 64-bit Long. * * <img width="640" height="310" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/longCount.png"> * * @return an Observable that emits a single item: the number of items emitted by the source Observable as a 64-bit Long item */ def longCount: Observable[Long] = { toScalaObservable[java.lang.Long](asJavaObservable.longCount()).map(_.longValue()) } /** * Returns an Observable that emits a single `Map` that contains an `Seq` of items emitted by the * source Observable keyed by a specified keySelector` function. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMultiMap.png"> * * @param keySelector the function that extracts the key from the source items to be used as key in the HashMap * @return an Observable that emits a single item: a `Map` that contains an `Seq` of items mapped from * the source Observable */ def toMultimap[K](keySelector: T => K): Observable[scala.collection.Map[K, Seq[T]]] = { toMultimap(keySelector, k => k) } /** * Returns an Observable that emits a single `Map` that contains an `Seq` of values extracted by a * specified `valueSelector` function from items emitted by the source Observable, keyed by a * specified `keySelector` function. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMultiMap.png"> * * @param keySelector the function that extracts a key from the source items to be used as key in the HashMap * @param valueSelector the function that extracts a value from the source items to be used as value in the HashMap * @return an Observable that emits a single item: a `Map` that contains an `Seq` of items mapped from * the source Observable */ def toMultimap[K, V](keySelector: T => K, valueSelector: T => V): Observable[scala.collection.Map[K, Seq[V]]] = { toMultimap(keySelector, valueSelector, () => mutable.Map[K, mutable.Buffer[V]]()) } /** * Returns an Observable that emits a single `mutable.Map[K, mutable.Buffer[V]]`, returned by a specified `mapFactory` function, that * contains values, extracted by a specified `valueSelector` function from items emitted by the source Observable and * keyed by the `keySelector` function. `mutable.Map[K, B]` is the same instance create by `mapFactory`. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMultiMap.png"> * * @param keySelector the function that extracts a key from the source items to be used as the key in the Map * @param valueSelector the function that extracts a value from the source items to be used as the value in the Map * @param mapFactory he function that returns a `mutable.Map[K, mutable.Buffer[V]]` instance to be used * @return an Observable that emits a single item: a `mutable.Map[K, mutable.Buffer[V]]` that contains items mapped * from the source Observable */ def toMultimap[K, V, M <: mutable.Map[K, mutable.Buffer[V]]](keySelector: T => K, valueSelector: T => V, mapFactory: () => M): Observable[M] = { toMultimap[K, V, mutable.Buffer[V], M](keySelector, valueSelector, mapFactory, k => mutable.Buffer[V]()) } /** * Returns an Observable that emits a single `mutable.Map[K, B]`, returned by a specified `mapFactory` function, that * contains values extracted by a specified `valueSelector` function from items emitted by the source Observable, and * keyed by the `keySelector` function. `mutable.Map[K, B]` is the same instance create by `mapFactory`. * * <img width="640" height="305" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/toMultiMap.png"> * * @param keySelector the function that extracts a key from the source items to be used as the key in the Map * @param valueSelector the function that extracts a value from the source items to be used as the value in the Map * @param mapFactory the function that returns a Map instance to be used * @param bufferFactory the function that returns a `mutable.Buffer[V]` instance for a particular key to be used in the Map * @return an Observable that emits a single item: a `mutable.Map[K, B]` that contains mapped items from the source Observable. */ def toMultimap[K, V, B <: mutable.Buffer[V], M <: mutable.Map[K, B]](keySelector: T => K, valueSelector: T => V, mapFactory: () => M, bufferFactory: K => B): Observable[M] = { // It's complicated to convert `mutable.Map[K, mutable.Buffer[V]]` to `java.util.Map[K, java.util.Collection[V]]`, // so RxScala implements `toMultimap` directly. // Choosing `mutable.Buffer/Map` is because `append/update` is necessary to implement an efficient `toMultimap`. lift { (subscriber: Subscriber[M]) => { val map = mapFactory() Subscriber[T]( subscriber, (t: T) => { val key = keySelector(t) val values = map.get(key) match { case Some(v) => v case None => bufferFactory(key) } values += valueSelector(t) map += key -> values: Unit }, e => subscriber.onError(e), () => { subscriber.onNext(map) subscriber.onCompleted() } ) } } } /** * Returns an Observable that emits a single item, a collection composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @tparam Col the collection type to build. * @return an Observable that emits a single item, a collection containing all of the items emitted by * the source Observable. */ def to[Col[_]](implicit cbf: CanBuildFrom[Nothing, T, Col[T @uncheckedVariance]]): Observable[Col[T @uncheckedVariance]] = { lift { (subscriber: Subscriber[Col[T]]) => { val b = cbf() Subscriber[T]( subscriber, (t: T) => { b += t: Unit }, e => subscriber.onError(e), () => { subscriber.onNext(b.result) subscriber.onCompleted() } ) } } } /** * Returns an Observable that emits a single item, a `Traversable` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `Traversable` containing all of the items emitted by * the source Observable. */ def toTraversable: Observable[Traversable[T]] = to[Traversable] /** * Returns an Observable that emits a single item, a `List` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `List` containing all of the items emitted by * the source Observable. */ def toList: Observable[List[T]] = to[List] /** * Returns an Observable that emits a single item, an `Iterable` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, an `Iterable` containing all of the items emitted by * the source Observable. */ def toIterable: Observable[Iterable[T]] = to[Iterable] /** * Returns an Observable that emits a single item, an `Iterator` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, an `Iterator` containing all of the items emitted by * the source Observable. */ def toIterator: Observable[Iterator[T]] = toIterable.map(_.iterator) /** * Returns an Observable that emits a single item, a `Stream` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `Stream` containing all of the items emitted by * the source Observable. */ def toStream: Observable[Stream[T]] = to[Stream] /** * Returns an Observable that emits a single item, an `IndexedSeq` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, an `IndexedSeq` containing all of the items emitted by * the source Observable. */ def toIndexedSeq: Observable[immutable.IndexedSeq[T]] = to[immutable.IndexedSeq] /** * Returns an Observable that emits a single item, a `Vector` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `Vector` containing all of the items emitted by * the source Observable. */ def toVector: Observable[Vector[T]] = to[Vector] /** * Returns an Observable that emits a single item, a `Buffer` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `Buffer` containing all of the items emitted by * the source Observable. */ def toBuffer[U >: T]: Observable[mutable.Buffer[U]] = { // use U >: T because Buffer is invariant val us: Observable[U] = this us.to[ArrayBuffer] } /** * Returns an Observable that emits a single item, a `Set` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, a `Set` containing all of the items emitted by * the source Observable. */ def toSet[U >: T]: Observable[immutable.Set[U]] = { // use U >: T because Set is invariant val us: Observable[U] = this us.to[immutable.Set] } /** * Returns an Observable that emits a single item, an `Array` composed of all the items emitted by * the source Observable. * * Be careful not to use this operator on Observables that emit infinite or very large numbers * of items, as you do not have the option to unsubscribe. * * @return an Observable that emits a single item, an `Array` containing all of the items emitted by * the source Observable. */ def toArray[U >: T : ClassTag]: Observable[Array[U]] = // use U >: T because Array is invariant toBuffer[U].map(_.toArray) } /** * Provides various ways to construct new Observables. */ object Observable { import scala.collection.JavaConverters._ import scala.collection.immutable.Range import scala.concurrent.duration.Duration import scala.concurrent.{Future, ExecutionContext} import scala.util.{Success, Failure} import ImplicitFunctionConversions._ import JavaConversions._ import rx.lang.scala.subjects.AsyncSubject private[scala] def jObsOfListToScObsOfSeq[T](jObs: rx.Observable[_ <: java.util.List[T]]): Observable[Seq[T]] = { val oScala1: Observable[java.util.List[T]] = new Observable[java.util.List[T]]{ val asJavaObservable = jObs } oScala1.map((lJava: java.util.List[T]) => lJava.asScala) } private[scala] def jObsOfJObsToScObsOfScObs[T](jObs: rx.Observable[_ <: rx.Observable[_ <: T]]): Observable[Observable[T]] = { val oScala1: Observable[rx.Observable[_ <: T]] = new Observable[rx.Observable[_ <: T]]{ val asJavaObservable = jObs } oScala1.map((oJava: rx.Observable[_ <: T]) => oJava) } /** * Creates an Observable that will execute the given function when an [[rx.lang.scala.Observer]] subscribes to it. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/create.png"> * * Write the function you pass to `create` so that it behaves as an Observable: It * should invoke the Observer's [[rx.lang.scala.Observer.onNext onNext]], [[rx.lang.scala.Observer.onError onError]], and [[rx.lang.scala.Observer.onCompleted onCompleted]] methods * appropriately. * * See <a href="http://go.microsoft.com/fwlink/?LinkID=205219">Rx Design Guidelines (PDF)</a> * for detailed information. * * * @tparam T * the type of the items that this Observable emits. * @param func * a function that accepts an `Observer[T]`, invokes its `onNext`, `onError`, and `onCompleted` methods * as appropriate, and returns a [[rx.lang.scala.Subscription]] to allow the Observer to * canceling the subscription. * @return * an Observable that, when an [[rx.lang.scala.Observer]] subscribes to it, will execute the given function. */ def create[T](func: Observer[T] => Subscription): Observable[T] = { Observable( (subscriber: Subscriber[T]) => { val s = func(subscriber) if (s != null && s != subscriber) { subscriber.add(s) } } ) } /* Note: It's dangerous to have two overloads where one takes an `Observer[T] => Subscription` function and the other takes a `Subscriber[T] => Unit` function, because expressions like `o => Subscription{}` have both of these types. So we call the old create method "create", and the new create method "apply". Try it out yourself here: def foo[T]: Unit = { val fMeant: Observer[T] => Subscription = o => Subscription{} val fWrong: Subscriber[T] => Unit = o => Subscription{} } */ /** * Returns an Observable that will execute the specified function when someone subscribes to it. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/create.png"> * * Write the function you pass so that it behaves as an Observable: It should invoke the * Subscriber's `onNext`, `onError`, and `onCompleted` methods appropriately. * * You can `add` custom [[Subscription]]s to [[Subscriber]]. These [[Subscription]]s will be called * <ul> * <li>when someone calls `unsubscribe`.</li> * <li>after `onCompleted` or `onError`.</li> * </ul> * * See <a href="http://go.microsoft.com/fwlink/?LinkID=205219">Rx Design Guidelines (PDF)</a> for detailed * information. * * See `<a href="https://github.com/Netflix/RxJava/blob/master/language-adaptors/rxjava-scala/src/examples/scala/rx/lang/scala/examples/RxScalaDemo.scala">RxScalaDemo</a>.createExampleGood` * and `<a href="https://github.com/Netflix/RxJava/blob/master/language-adaptors/rxjava-scala/src/examples/scala/rx/lang/scala/examples/RxScalaDemo.scala">RxScalaDemo</a>.createExampleGood2`. * * @param T * the type of the items that this Observable emits * @param f * a function that accepts a `Subscriber[T]`, and invokes its `onNext`, * `onError`, and `onCompleted` methods as appropriate * @return an Observable that, when someone subscribes to it, will execute the specified * function */ def apply[T](f: Subscriber[T] => Unit): Observable[T] = { toScalaObservable(rx.Observable.create(f)) } /** * Returns an Observable that invokes an [[rx.lang.scala.Observer]]'s [[rx.lang.scala.Observer.onError onError]] * method when the Observer subscribes to it. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/error.png"> * * @param exception * the particular error to report * @tparam T * the type of the items (ostensibly) emitted by the Observable * @return an Observable that invokes the [[rx.lang.scala.Observer]]'s [[rx.lang.scala.Observer.onError onError]] * method when the Observer subscribes to it */ def error[T](exception: Throwable): Observable[T] = { toScalaObservable[T](rx.Observable.error(exception)) } /** * Returns an Observable that invokes an `Observer`'s `onError` method on the * specified Scheduler. * * <img width="640" height="190" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/error.s.png"> * * @param exception the particular Throwable to pass to `onError` * @param scheduler the Scheduler on which to call `onError` * @tparam T the type of the items (ostensibly) emitted by the Observable * @return an Observable that invokes the `Observer`'s `onError` method, on the specified Scheduler */ def error[T](exception: Throwable, scheduler: Scheduler): Observable[T] = { toScalaObservable[T](rx.Observable.error(exception, scheduler)) } /** * Returns an Observable that emits no data to the [[rx.lang.scala.Observer]] and * immediately invokes its [[rx.lang.scala.Observer#onCompleted onCompleted]] method * with the specified scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/empty.s.png"> * * @return an Observable that returns no data to the [[rx.lang.scala.Observer]] and * immediately invokes the [[rx.lang.scala.Observer]]r's * [[rx.lang.scala.Observer#onCompleted onCompleted]] method with the * specified scheduler * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#empty-error-and-never">RxJava Wiki: empty()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229066.aspx">MSDN: Observable.Empty Method (IScheduler)</a> */ def empty: Observable[Nothing] = { toScalaObservable(rx.Observable.empty[Nothing]()) } /** * Returns an Observable that emits no data to the [[rx.lang.scala.Observer]] and * immediately invokes its [[rx.lang.scala.Observer#onCompleted onCompleted]] method * with the specified scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/empty.s.png"> * * @param scheduler the scheduler to call the [[rx.lang.scala.Observer#onCompleted onCompleted]] method * @return an Observable that returns no data to the [[rx.lang.scala.Observer]] and * immediately invokes the [[rx.lang.scala.Observer]]r's * [[rx.lang.scala.Observer#onCompleted onCompleted]] method with the * specified scheduler * @see <a href="https://github.com/Netflix/RxJava/wiki/Creating-Observables#empty-error-and-never">RxJava Wiki: empty()</a> * @see <a href="http://msdn.microsoft.com/en-us/library/hh229066.aspx">MSDN: Observable.Empty Method (IScheduler)</a> */ def empty(scheduler: Scheduler): Observable[Nothing] = { toScalaObservable(rx.Observable.empty[Nothing](scalaSchedulerToJavaScheduler(scheduler))) } /** * Converts a sequence of values into an Observable. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/from.png"> * * Implementation note: the entire array will be immediately emitted each time an [[rx.lang.scala.Observer]] subscribes. * Since this occurs before the [[rx.lang.scala.Subscription]] is returned, * it in not possible to unsubscribe from the sequence before it completes. * * @param items * the source Array * @tparam T * the type of items in the Array, and the type of items to be emitted by the * resulting Observable * @return an Observable that emits each item in the source Array */ def items[T](items: T*): Observable[T] = { toScalaObservable[T](rx.Observable.from(items.toIterable.asJava)) } /** Returns an Observable emitting the value produced by the Future as its single item. * If the future fails, the Observable will fail as well. * * @param f Future whose value ends up in the resulting Observable * @return an Observable completed after producing the value of the future, or with an exception */ def from[T](f: Future[T])(implicit execContext: ExecutionContext): Observable[T] = { val s = AsyncSubject[T]() f.onComplete { case Failure(e) => s.onError(e) case Success(c) => s.onNext(c) s.onCompleted() } s } /** * Converts an `Iterable` into an Observable. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/from.png"> * * Note: the entire iterable sequence is immediately emitted each time an * Observer subscribes. Since this occurs before the * `Subscription` is returned, it is not possible to unsubscribe from * the sequence before it completes. * * @param iterable the source `Iterable` sequence * @param T the type of items in the `Iterable` sequence and the * type of items to be emitted by the resulting Observable * @return an Observable that emits each item in the source `Iterable` * sequence */ def from[T](iterable: Iterable[T]): Observable[T] = { toScalaObservable(rx.Observable.from(iterable.asJava)) } /** * * @param iterable the source `Iterable` sequence * @param scheduler the scheduler to use * @tparam T the type of items in the `Iterable` sequence and the * type of items to be emitted by the resulting Observable * @return an Observable that emits each item in the source `Iterable` * sequence */ def from[T](iterable: Iterable[T], scheduler: Scheduler): Observable[T] = { toScalaObservable(rx.Observable.from(iterable.asJava, scheduler.asJavaScheduler)) } /** * Returns an Observable that calls an Observable factory to create its Observable for each * new Observer that subscribes. That is, for each subscriber, the actual Observable is determined * by the factory function. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/defer.png"> * * The defer operator allows you to defer or delay emitting items from an Observable until such * time as an Observer subscribes to the Observable. This allows an [[rx.lang.scala.Observer]] to easily * obtain updates or a refreshed version of the sequence. * * @param observable * the Observable factory function to invoke for each [[rx.lang.scala.Observer]] that * subscribes to the resulting Observable * @tparam T * the type of the items emitted by the Observable * @return an Observable whose [[rx.lang.scala.Observer]]s trigger an invocation of the given Observable * factory function */ def defer[T](observable: => Observable[T]): Observable[T] = { toScalaObservable[T](rx.Observable.defer[T](() => observable.asJavaObservable)) } /** * Returns an Observable that never sends any items or notifications to an [[rx.lang.scala.Observer]]. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/never.png"> * * This Observable is useful primarily for testing purposes. * * @return an Observable that never sends any items or notifications to an [[rx.lang.scala.Observer]] */ def never: Observable[Nothing] = { toScalaObservable[Nothing](rx.Observable.never()) } /** * Given 3 observables, returns an observable that emits Tuples of 3 elements each. * The first emitted Tuple will contain the first element of each source observable, * the second Tuple the second element of each source observable, and so on. * * @return an Observable that emits the zipped Observables */ def zip[A, B, C](obA: Observable[A], obB: Observable[B], obC: Observable[C]): Observable[(A, B, C)] = { toScalaObservable[(A, B, C)](rx.Observable.zip[A, B, C, (A, B, C)](obA.asJavaObservable, obB.asJavaObservable, obC.asJavaObservable, (a: A, b: B, c: C) => (a, b, c))) } /** * Given 4 observables, returns an observable that emits Tuples of 4 elements each. * The first emitted Tuple will contain the first element of each source observable, * the second Tuple the second element of each source observable, and so on. * * @return an Observable that emits the zipped Observables */ def zip[A, B, C, D](obA: Observable[A], obB: Observable[B], obC: Observable[C], obD: Observable[D]): Observable[(A, B, C, D)] = { toScalaObservable[(A, B, C, D)](rx.Observable.zip[A, B, C, D, (A, B, C, D)](obA.asJavaObservable, obB.asJavaObservable, obC.asJavaObservable, obD.asJavaObservable, (a: A, b: B, c: C, d: D) => (a, b, c, d))) } /** * Given an Observable emitting `N` source observables, returns an observable that * emits Seqs of `N` elements each. * The first emitted Seq will contain the first element of each source observable, * the second Seq the second element of each source observable, and so on. * * Note that the returned Observable will only start emitting items once the given * `Observable[Observable[T]]` has completed, because otherwise it cannot know `N`. * * @param observables * An Observable emitting N source Observables * @return an Observable that emits the zipped Seqs */ def zip[T](observables: Observable[Observable[T]]): Observable[Seq[T]] = { val f: FuncN[Seq[T]] = (args: Seq[java.lang.Object]) => { val asSeq: Seq[Object] = args.toSeq asSeq.asInstanceOf[Seq[T]] } val list = observables.map(_.asJavaObservable).asJavaObservable val o = rx.Observable.zip(list, f) toScalaObservable[Seq[T]](o) } /** * Emits `0`, `1`, `2`, `...` with a delay of `duration` between consecutive numbers. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/interval.png"> * * @param duration * duration between two consecutive numbers * @return An Observable that emits a number each time interval. */ def interval(duration: Duration): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.interval(duration.length, duration.unit)).map(_.longValue()) } /** * Emits `0`, `1`, `2`, `...` with a delay of `duration` between consecutive numbers. * * <img width="640" src="https://github.com/Netflix/RxJava/wiki/images/rx-operators/interval.png"> * * @param period * duration between two consecutive numbers * @param scheduler * the scheduler to use * @return An Observable that emits a number each time interval. */ def interval(period: Duration, scheduler: Scheduler): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.interval(period.length, period.unit, scheduler)).map(_.longValue()) } /** * Return an Observable that emits a 0L after the {@code initialDelay} and ever increasing * numbers after each {@code period} of time thereafter, on a specified Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timer.ps.png"> * * @param initialDelay * the initial delay time to wait before emitting the first value of 0L * @param period * the period of time between emissions of the subsequent numbers * @return an Observable that emits a 0L after the { @code initialDelay} and ever increasing * numbers after each { @code period} of time thereafter, while running on the given { @code scheduler} */ def timer(initialDelay: Duration, period: Duration): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.timer(initialDelay.toNanos, period.toNanos, duration.NANOSECONDS)).map(_.longValue()) } /** * Return an Observable that emits a 0L after the {@code initialDelay} and ever increasing * numbers after each {@code period} of time thereafter, on a specified Scheduler. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timer.ps.png"> * * @param initialDelay * the initial delay time to wait before emitting the first value of 0L * @param period * the period of time between emissions of the subsequent numbers * @param scheduler * the scheduler on which the waiting happens and items are emitted * @return an Observable that emits a 0L after the { @code initialDelay} and ever increasing * numbers after each { @code period} of time thereafter, while running on the given { @code scheduler} */ def timer(initialDelay: Duration, period: Duration, scheduler: Scheduler): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.timer(initialDelay.toNanos, period.toNanos, duration.NANOSECONDS, scheduler)).map(_.longValue()) } /** * Returns an Observable that emits `0L` after a specified delay, and then completes. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timer.png"> * * @param delay the initial delay before emitting a single `0L` * @return Observable that emits `0L` after a specified delay, and then completes */ def timer(delay: Duration): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.timer(delay.length, delay.unit)).map(_.longValue()) } /** * Returns an Observable that emits `0L` after a specified delay, on a specified Scheduler, and then * completes. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/timer.s.png"> * * @param delay the initial delay before emitting a single `0L` * @param scheduler the Scheduler to use for scheduling the item * @return Observable that emits `0L` after a specified delay, on a specified Scheduler, and then completes */ def timer(delay: Duration, scheduler: Scheduler): Observable[Long] = { toScalaObservable[java.lang.Long](rx.Observable.timer(delay.length, delay.unit, scheduler)).map(_.longValue()) } /** * Constructs an Observable that creates a dependent resource object. * <p> * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/using.png"> * * @param resourceFactory the factory function to create a resource object that depends on the Observable * @param observableFactory the factory function to obtain an Observable * @return the Observable whose lifetime controls the lifetime of the dependent resource object */ def using[T, Resource <: Subscription](resourceFactory: () => Resource, observableFactory: Resource => Observable[T]): Observable[T] = { class ResourceSubscription(val resource: Resource) extends rx.Subscription { def unsubscribe = resource.unsubscribe def isUnsubscribed: Boolean = resource.isUnsubscribed } toScalaObservable(rx.Observable.using[T, ResourceSubscription]( () => new ResourceSubscription(resourceFactory()), (s: ResourceSubscription) => observableFactory(s.resource).asJavaObservable )) } /** * Mirror the one Observable in an Iterable of several Observables that first emits an item. * * <img width="640" src="https://raw.github.com/wiki/Netflix/RxJava/images/rx-operators/amb.png"> * * @param sources an Iterable of Observable sources competing to react first * @return an Observable that emits the same sequence of items as whichever of the source Observables * first emitted an item */ def amb[T](sources: Observable[T]*): Observable[T] = { toScalaObservable[T](rx.Observable.amb[T](sources.map(_.asJavaObservable).asJava)) } /** * Combines a list of source Observables by emitting an item that aggregates the latest values of each of * the source Observables each time an item is received from any of the source Observables, where this * aggregation is defined by a specified function. * * @tparam T the common base type of source values * @tparam R the result type * @param sources the list of source Observables * @param combineFunction the aggregation function used to combine the items emitted by the source Observables * @return an Observable that emits items that are the result of combining the items emitted by the source * Observables by means of the given aggregation function */ def combineLatest[T, R](sources: Seq[Observable[T]], combineFunction: Seq[T] => R): Observable[R] = { val jSources = new java.util.ArrayList[rx.Observable[_ <: T]](sources.map(_.asJavaObservable).asJava) val jCombineFunction = new rx.functions.FuncN[R] { override def call(args: java.lang.Object*): R = combineFunction(args.map(_.asInstanceOf[T])) } toScalaObservable[R](rx.Observable.combineLatest[T, R](jSources, jCombineFunction)) } }
srvaroa/RxJava
language-adaptors/rxjava-scala/src/main/scala/rx/lang/scala/Observable.scala
Scala
apache-2.0
231,784
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.util.Locale import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, UnresolvedException} import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess} import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateFunction, DeclarativeAggregate, NoOp} import org.apache.spark.sql.catalyst.trees.{BinaryLike, LeafLike, TernaryLike, UnaryLike} import org.apache.spark.sql.catalyst.trees.TreePattern.{TreePattern, UNRESOLVED_WINDOW_EXPRESSION, WINDOW_EXPRESSION} import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.types._ /** * The trait of the Window Specification (specified in the OVER clause or WINDOW clause) for * Window Functions. */ sealed trait WindowSpec /** * The specification for a window function. * * @param partitionSpec It defines the way that input rows are partitioned. * @param orderSpec It defines the ordering of rows in a partition. * @param frameSpecification It defines the window frame in a partition. */ case class WindowSpecDefinition( partitionSpec: Seq[Expression], orderSpec: Seq[SortOrder], frameSpecification: WindowFrame) extends Expression with WindowSpec with Unevaluable { override def children: Seq[Expression] = partitionSpec ++ orderSpec :+ frameSpecification override protected def withNewChildrenInternal( newChildren: IndexedSeq[Expression]): WindowSpecDefinition = copy( partitionSpec = newChildren.take(partitionSpec.size), orderSpec = newChildren.drop(partitionSpec.size).dropRight(1).asInstanceOf[Seq[SortOrder]], frameSpecification = newChildren.last.asInstanceOf[WindowFrame]) override lazy val resolved: Boolean = childrenResolved && checkInputDataTypes().isSuccess && frameSpecification.isInstanceOf[SpecifiedWindowFrame] override def nullable: Boolean = true override def dataType: DataType = throw QueryExecutionErrors.dataTypeOperationUnsupportedError override def checkInputDataTypes(): TypeCheckResult = { frameSpecification match { case UnspecifiedFrame => TypeCheckFailure( "Cannot use an UnspecifiedFrame. This should have been converted during analysis. " + "Please file a bug report.") case f: SpecifiedWindowFrame if f.frameType == RangeFrame && !f.isUnbounded && orderSpec.isEmpty => TypeCheckFailure( "A range window frame cannot be used in an unordered window specification.") case f: SpecifiedWindowFrame if f.frameType == RangeFrame && f.isValueBound && orderSpec.size > 1 => TypeCheckFailure( s"A range window frame with value boundaries cannot be used in a window specification " + s"with multiple order by expressions: ${orderSpec.mkString(",")}") case f: SpecifiedWindowFrame if f.frameType == RangeFrame && f.isValueBound && !isValidFrameType(f.valueBoundary.head.dataType) => TypeCheckFailure( s"The data type '${orderSpec.head.dataType.catalogString}' used in the order " + "specification does not match the data type " + s"'${f.valueBoundary.head.dataType.catalogString}' which is used in the range frame.") case _ => TypeCheckSuccess } } override def sql: String = { def toSql(exprs: Seq[Expression], prefix: String): Seq[String] = { Seq(exprs).filter(_.nonEmpty).map(_.map(_.sql).mkString(prefix, ", ", "")) } val elements = toSql(partitionSpec, "PARTITION BY ") ++ toSql(orderSpec, "ORDER BY ") ++ Seq(frameSpecification.sql) elements.mkString("(", " ", ")") } private def isValidFrameType(ft: DataType): Boolean = (orderSpec.head.dataType, ft) match { case (DateType, IntegerType) => true case (DateType, _: YearMonthIntervalType) => true case (TimestampType | TimestampNTZType, CalendarIntervalType) => true case (TimestampType | TimestampNTZType, _: YearMonthIntervalType) => true case (TimestampType | TimestampNTZType, _: DayTimeIntervalType) => true case (a, b) => a == b } } /** * A Window specification reference that refers to the [[WindowSpecDefinition]] defined * under the name `name`. */ case class WindowSpecReference(name: String) extends WindowSpec /** * The trait used to represent the type of a Window Frame. */ sealed trait FrameType { def inputType: AbstractDataType def sql: String } /** * RowFrame treats rows in a partition individually. Values used in a row frame are considered * to be physical offsets. * For example, `ROW BETWEEN 1 PRECEDING AND 1 FOLLOWING` represents a 3-row frame, * from the row that precedes the current row to the row that follows the current row. */ case object RowFrame extends FrameType { override def inputType: AbstractDataType = IntegerType override def sql: String = "ROWS" } /** * RangeFrame treats rows in a partition as groups of peers. All rows having the same `ORDER BY` * ordering are considered as peers. Values used in a range frame are considered to be logical * offsets. * For example, assuming the value of the current row's `ORDER BY` expression `expr` is `v`, * `RANGE BETWEEN 1 PRECEDING AND 1 FOLLOWING` represents a frame containing rows whose values * `expr` are in the range of [v-1, v+1]. * * If `ORDER BY` clause is not defined, all rows in the partition are considered as peers * of the current row. */ case object RangeFrame extends FrameType { override def inputType: AbstractDataType = TypeCollection.NumericAndInterval override def sql: String = "RANGE" } /** * The trait used to represent special boundaries used in a window frame. */ sealed trait SpecialFrameBoundary extends LeafExpression with Unevaluable { override def dataType: DataType = NullType override def nullable: Boolean = false } /** UNBOUNDED boundary. */ case object UnboundedPreceding extends SpecialFrameBoundary { override def sql: String = "UNBOUNDED PRECEDING" } case object UnboundedFollowing extends SpecialFrameBoundary { override def sql: String = "UNBOUNDED FOLLOWING" } /** CURRENT ROW boundary. */ case object CurrentRow extends SpecialFrameBoundary { override def sql: String = "CURRENT ROW" } /** * Represents a window frame. */ sealed trait WindowFrame extends Expression with Unevaluable { override def dataType: DataType = throw QueryExecutionErrors.dataTypeOperationUnsupportedError override def nullable: Boolean = false } /** Used as a placeholder when a frame specification is not defined. */ case object UnspecifiedFrame extends WindowFrame with LeafLike[Expression] /** * A specified Window Frame. The val lower/upper can be either a foldable [[Expression]] or a * [[SpecialFrameBoundary]]. */ case class SpecifiedWindowFrame( frameType: FrameType, lower: Expression, upper: Expression) extends WindowFrame with BinaryLike[Expression] { override def left: Expression = lower override def right: Expression = upper lazy val valueBoundary: Seq[Expression] = children.filterNot(_.isInstanceOf[SpecialFrameBoundary]) override def checkInputDataTypes(): TypeCheckResult = { // Check lower value. val lowerCheck = checkBoundary(lower, "lower") if (lowerCheck.isFailure) { return lowerCheck } // Check upper value. val upperCheck = checkBoundary(upper, "upper") if (upperCheck.isFailure) { return upperCheck } // Check combination (of expressions). (lower, upper) match { case (l: Expression, u: Expression) if !isValidFrameBoundary(l, u) => TypeCheckFailure(s"Window frame upper bound '$upper' does not follow the lower bound " + s"'$lower'.") case (l: SpecialFrameBoundary, _) => TypeCheckSuccess case (_, u: SpecialFrameBoundary) => TypeCheckSuccess case (l: Expression, u: Expression) if l.dataType != u.dataType => TypeCheckFailure( s"Window frame bounds '$lower' and '$upper' do no not have the same data type: " + s"'${l.dataType.catalogString}' <> '${u.dataType.catalogString}'") case (l: Expression, u: Expression) if isGreaterThan(l, u) => TypeCheckFailure( "The lower bound of a window frame must be less than or equal to the upper bound") case _ => TypeCheckSuccess } } override def sql: String = { val lowerSql = boundarySql(lower) val upperSql = boundarySql(upper) s"${frameType.sql} BETWEEN $lowerSql AND $upperSql" } def isUnbounded: Boolean = lower == UnboundedPreceding && upper == UnboundedFollowing def isValueBound: Boolean = valueBoundary.nonEmpty def isOffset: Boolean = (lower, upper) match { case (l: Expression, u: Expression) => frameType == RowFrame && l == u case _ => false } private def boundarySql(expr: Expression): String = expr match { case e: SpecialFrameBoundary => e.sql case UnaryMinus(n, _) => n.sql + " PRECEDING" case e: Expression => e.sql + " FOLLOWING" } // Check whether the left boundary value is greater than the right boundary value. It's required // that the both expressions have the same data type. // Since CalendarIntervalType is not comparable, we only compare expressions that are AtomicType. private def isGreaterThan(l: Expression, r: Expression): Boolean = l.dataType match { case _: AtomicType => GreaterThan(l, r).eval().asInstanceOf[Boolean] case _ => false } private def checkBoundary(b: Expression, location: String): TypeCheckResult = b match { case _: SpecialFrameBoundary => TypeCheckSuccess case e: Expression if !e.foldable => TypeCheckFailure(s"Window frame $location bound '$e' is not a literal.") case e: Expression if !frameType.inputType.acceptsType(e.dataType) => TypeCheckFailure( s"The data type of the $location bound '${e.dataType.catalogString}' does not match " + s"the expected data type '${frameType.inputType.simpleString}'.") case _ => TypeCheckSuccess } private def isValidFrameBoundary(l: Expression, u: Expression): Boolean = { (l, u) match { case (UnboundedFollowing, _) => false case (_, UnboundedPreceding) => false case _ => true } } override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): SpecifiedWindowFrame = copy(lower = newLeft, upper = newRight) } case class UnresolvedWindowExpression( child: Expression, windowSpec: WindowSpecReference) extends UnaryExpression with Unevaluable { override def dataType: DataType = throw new UnresolvedException("dataType") override def nullable: Boolean = throw new UnresolvedException("nullable") override lazy val resolved = false override protected def withNewChildInternal(newChild: Expression): UnresolvedWindowExpression = copy(child = newChild) override val nodePatterns: Seq[TreePattern] = Seq(UNRESOLVED_WINDOW_EXPRESSION) } case class WindowExpression( windowFunction: Expression, windowSpec: WindowSpecDefinition) extends Expression with Unevaluable with BinaryLike[Expression] { override def left: Expression = windowFunction override def right: Expression = windowSpec override def dataType: DataType = windowFunction.dataType override def nullable: Boolean = windowFunction.nullable override def toString: String = s"$windowFunction $windowSpec" override def sql: String = windowFunction.sql + " OVER " + windowSpec.sql override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): WindowExpression = copy(windowFunction = newLeft, windowSpec = newRight.asInstanceOf[WindowSpecDefinition]) override val nodePatterns: Seq[TreePattern] = Seq(WINDOW_EXPRESSION) } /** * A window function is a function that can only be evaluated in the context of a window operator. */ trait WindowFunction extends Expression { /** Frame in which the window operator must be executed. */ def frame: WindowFrame = UnspecifiedFrame } /** * Case objects that describe whether a window function is a SQL window function or a Python * user-defined window function. */ sealed trait WindowFunctionType object WindowFunctionType { case object SQL extends WindowFunctionType case object Python extends WindowFunctionType def functionType(windowExpression: NamedExpression): WindowFunctionType = { val t = windowExpression.collectFirst { case _: WindowFunction | _: AggregateFunction => SQL case udf: PythonUDF if PythonUDF.isWindowPandasUDF(udf) => Python } // Normally a window expression would either have a SQL window function, a SQL // aggregate function or a python window UDF. However, sometimes the optimizer will replace // the window function if the value of the window function can be predetermined. // For example, for query: // // select count(NULL) over () from values 1.0, 2.0, 3.0 T(a) // // The window function will be replaced by expression literal(0) // To handle this case, if a window expression doesn't have a regular window function, we // consider its type to be SQL as literal(0) is also a SQL expression. t.getOrElse(SQL) } } trait OffsetWindowFunction extends WindowFunction { /** * Input expression to evaluate against a row which a number of rows below or above (depending on * the value and sign of the offset) the starting row (current row if isRelative=true, or the * first row of the window frame otherwise). */ val input: Expression /** * (Foldable) expression that contains the number of rows between the current row and the row * where the input expression is evaluated. If `offset` is a positive integer, it means that * the direction of the `offset` is from front to back. If it is a negative integer, the direction * of the `offset` is from back to front. If it is zero, it means that the offset is ignored and * use current row. */ val offset: Expression /** * Default result value for the function when the `offset`th row does not exist. */ val default: Expression /** * An optional specification that indicates the offset window function should skip null values in * the determination of which row to use. */ val ignoreNulls: Boolean /** * A fake window frame which is used to hold the offset information. It's used as a key to group * by offset window functions in `WindowExecBase.windowFrameExpressionFactoryPairs`, as offset * window functions with the same offset and same window frame can be evaluated together. */ lazy val fakeFrame = SpecifiedWindowFrame(RowFrame, offset, offset) } /** * A frameless offset window function is a window function that cannot specify window frame and * returns the value of the input column offset by a number of rows according to the current row * within the partition. For instance: a FrameLessOffsetWindowFunction for value x with offset -2, * will get the value of x 2 rows back from the current row in the partition. */ sealed abstract class FrameLessOffsetWindowFunction extends OffsetWindowFunction with Unevaluable with ImplicitCastInputTypes { /* * The result of an OffsetWindowFunction is dependent on the frame in which the * OffsetWindowFunction is executed, the input expression and the default expression. Even when * both the input and the default expression are foldable, the result is still not foldable due to * the frame. * * Note, the value of foldable is set to false in the trait Unevaluable * * override def foldable: Boolean = false */ override def nullable: Boolean = default == null || default.nullable || input.nullable override lazy val frame: WindowFrame = fakeFrame override def checkInputDataTypes(): TypeCheckResult = { val check = super.checkInputDataTypes() if (check.isFailure) { check } else if (!offset.foldable) { TypeCheckFailure(s"Offset expression '$offset' must be a literal.") } else { TypeCheckSuccess } } override def dataType: DataType = input.dataType override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType, IntegerType, TypeCollection(input.dataType, NullType)) override def toString: String = s"$prettyName($input, $offset, $default)" } /** * The Lead function returns the value of `input` at the `offset`th row after the current row in * the window. Offsets start at 0, which is the current row. The offset must be constant * integer value. The default offset is 1. When the value of `input` is null at the `offset`th row, * null is returned. If there is no such offset row, the `default` expression is evaluated. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_(input[, offset[, default]]) - Returns the value of `input` at the `offset`th row after the current row in the window. The default value of `offset` is 1 and the default value of `default` is null. If the value of `input` at the `offset`th row is null, null is returned. If there is no such an offset row (e.g., when the offset is 1, the last row of the window does not have any subsequent row), `default` is returned. """, arguments = """ Arguments: * input - a string expression to evaluate `offset` rows after the current row. * offset - an int expression which is rows to jump ahead in the partition. * default - a string expression which is to use when the offset is larger than the window. The default value is null. """, examples = """ Examples: > SELECT a, b, _FUNC_(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 2 A1 2 NULL A2 3 NULL """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class Lead( input: Expression, offset: Expression, default: Expression, ignoreNulls: Boolean) extends FrameLessOffsetWindowFunction with TernaryLike[Expression] { def this(input: Expression, offset: Expression, default: Expression) = this(input, offset, default, false) def this(input: Expression, offset: Expression) = this(input, offset, Literal(null)) def this(input: Expression) = this(input, Literal(1)) def this() = this(Literal(null)) override def first: Expression = input override def second: Expression = offset override def third: Expression = default override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): Lead = copy(input = newFirst, offset = newSecond, default = newThird) } /** * The Lag function returns the value of `input` at the `offset`th row before the current row in * the window. Offsets start at 0, which is the current row. The offset must be constant * integer value. The default offset is 1. When the value of `input` is null at the `offset`th row, * null is returned. If there is no such offset row, the `default` expression is evaluated. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_(input[, offset[, default]]) - Returns the value of `input` at the `offset`th row before the current row in the window. The default value of `offset` is 1 and the default value of `default` is null. If the value of `input` at the `offset`th row is null, null is returned. If there is no such offset row (e.g., when the offset is 1, the first row of the window does not have any previous row), `default` is returned. """, arguments = """ Arguments: * input - a string expression to evaluate `offset` rows before the current row. * offset - an int expression which is rows to jump back in the partition. * default - a string expression which is to use when the offset row does not exist. """, examples = """ Examples: > SELECT a, b, _FUNC_(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 NULL A1 1 1 A1 2 1 A2 3 NULL """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class Lag( input: Expression, inputOffset: Expression, default: Expression, ignoreNulls: Boolean) extends FrameLessOffsetWindowFunction with TernaryLike[Expression] { def this(input: Expression, inputOffset: Expression, default: Expression) = this(input, inputOffset, default, false) def this(input: Expression, inputOffset: Expression) = this(input, inputOffset, Literal(null)) def this(input: Expression) = this(input, Literal(1)) def this() = this(Literal(null)) override val offset: Expression = UnaryMinus(inputOffset) match { case e: Expression if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType) case o => o } override def first: Expression = input override def second: Expression = inputOffset override def third: Expression = default override protected def withNewChildrenInternal( newFirst: Expression, newSecond: Expression, newThird: Expression): Lag = copy(input = newFirst, inputOffset = newSecond, default = newThird) } abstract class AggregateWindowFunction extends DeclarativeAggregate with WindowFunction { self: Product => override val frame: WindowFrame = SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow) override def dataType: DataType = IntegerType override def nullable: Boolean = true override lazy val mergeExpressions = throw QueryExecutionErrors.mergeUnsupportedByWindowFunctionError } abstract class RowNumberLike extends AggregateWindowFunction { protected val zero = Literal(0) protected val one = Literal(1) protected val rowNumber = AttributeReference("rowNumber", IntegerType, nullable = false)() override val aggBufferAttributes: Seq[AttributeReference] = rowNumber :: Nil override val initialValues: Seq[Expression] = zero :: Nil override val updateExpressions: Seq[Expression] = rowNumber + one :: Nil override def nullable: Boolean = false } /** * A [[SizeBasedWindowFunction]] needs the size of the current window for its calculation. */ trait SizeBasedWindowFunction extends AggregateWindowFunction { // It's made a val so that the attribute created on driver side is serialized to executor side. // Otherwise, if it's defined as a function, when it's called on executor side, it actually // returns the singleton value instantiated on executor side, which has different expression ID // from the one created on driver side. val n: AttributeReference = SizeBasedWindowFunction.n } object SizeBasedWindowFunction { val n = AttributeReference("window__partition__size", IntegerType, nullable = false)() } /** * The RowNumber function computes a unique, sequential number to each row, starting with one, * according to the ordering of rows within the window partition. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_() - Assigns a unique, sequential number to each row, starting with one, according to the ordering of rows within the window partition. """, examples = """ Examples: > SELECT a, b, _FUNC_() OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 2 A1 2 3 A2 3 1 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class RowNumber() extends RowNumberLike with LeafLike[Expression] { override val evaluateExpression = rowNumber override def prettyName: String = "row_number" } /** * The CumeDist function computes the position of a value relative to all values in the partition. * The result is the number of rows preceding or equal to the current row in the ordering of the * partition divided by the total number of rows in the window partition. Any tie values in the * ordering will evaluate to the same position. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_() - Computes the position of a value relative to all values in the partition. """, examples = """ Examples: > SELECT a, b, _FUNC_() OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 0.6666666666666666 A1 1 0.6666666666666666 A1 2 1.0 A2 3 1.0 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class CumeDist() extends RowNumberLike with SizeBasedWindowFunction with LeafLike[Expression] { override def dataType: DataType = DoubleType // The frame for CUME_DIST is Range based instead of Row based, because CUME_DIST must // return the same value for equal values in the partition. override val frame = SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow) override val evaluateExpression = rowNumber.cast(DoubleType) / n.cast(DoubleType) override def prettyName: String = "cume_dist" } // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_(input[, offset]) - Returns the value of `input` at the row that is the `offset`th row from beginning of the window frame. Offset starts at 1. If ignoreNulls=true, we will skip nulls when finding the `offset`th row. Otherwise, every row counts for the `offset`. If there is no such an `offset`th row (e.g., when the offset is 10, size of the window frame is less than 10), null is returned. """, examples = """ Examples: > SELECT a, b, _FUNC_(b, 2) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 1 A1 2 1 A2 3 NULL """, arguments = """ Arguments: * input - the target column or expression that the function operates on. * offset - a positive int literal to indicate the offset in the window frame. It starts with 1. * ignoreNulls - an optional specification that indicates the NthValue should skip null values in the determination of which row to use. """, since = "3.1.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class NthValue(input: Expression, offset: Expression, ignoreNulls: Boolean) extends AggregateWindowFunction with OffsetWindowFunction with ImplicitCastInputTypes with BinaryLike[Expression] { def this(child: Expression, offset: Expression) = this(child, offset, false) override lazy val default = Literal.create(null, input.dataType) override def left: Expression = input override def right: Expression = offset override val frame: WindowFrame = UnspecifiedFrame override def dataType: DataType = input.dataType override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType, IntegerType) override def checkInputDataTypes(): TypeCheckResult = { val check = super.checkInputDataTypes() if (check.isFailure) { check } else if (!offset.foldable) { TypeCheckFailure(s"Offset expression '$offset' must be a literal.") } else if (offsetVal <= 0) { TypeCheckFailure( s"The 'offset' argument of nth_value must be greater than zero but it is $offsetVal.") } else { TypeCheckSuccess } } private lazy val offsetVal = offset.eval().asInstanceOf[Int].toLong private lazy val result = AttributeReference("result", input.dataType)() private lazy val count = AttributeReference("count", LongType)() override lazy val aggBufferAttributes: Seq[AttributeReference] = result :: count :: Nil override lazy val initialValues: Seq[Literal] = Seq( /* result = */ default, /* count = */ Literal(1L) ) override lazy val updateExpressions: Seq[Expression] = { if (ignoreNulls) { Seq( /* result = */ If(count === offsetVal && input.isNotNull, input, result), /* count = */ If(input.isNull, count, count + 1L) ) } else { Seq( /* result = */ If(count === offsetVal, input, result), /* count = */ count + 1L ) } } override lazy val evaluateExpression: AttributeReference = result override def prettyName: String = "nth_value" override def sql: String = s"$prettyName(${input.sql}, ${offset.sql})${if (ignoreNulls) " ignore nulls" else ""}" override protected def withNewChildrenInternal( newLeft: Expression, newRight: Expression): NthValue = copy(input = newLeft, offset = newRight) } /** * The NTile function divides the rows for each window partition into `n` buckets ranging from 1 to * at most `n`. Bucket values will differ by at most 1. If the number of rows in the partition does * not divide evenly into the number of buckets, then the remainder values are distributed one per * bucket, starting with the first bucket. * * The NTile function is particularly useful for the calculation of tertiles, quartiles, deciles and * other common summary statistics * * The function calculates two variables during initialization: The size of a regular bucket, and * the number of buckets that will have one extra row added to it (when the rows do not evenly fit * into the number of buckets); both variables are based on the size of the current partition. * During the calculation process the function keeps track of the current row number, the current * bucket number, and the row number at which the bucket will change (bucketThreshold). When the * current row number reaches bucket threshold, the bucket value is increased by one and the * threshold is increased by the bucket size (plus one extra if the current bucket is padded). * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_(n) - Divides the rows for each window partition into `n` buckets ranging from 1 to at most `n`. """, arguments = """ Arguments: * buckets - an int expression which is number of buckets to divide the rows in. Default value is 1. """, examples = """ Examples: > SELECT a, b, _FUNC_(2) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 1 A1 2 2 A2 3 1 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class NTile(buckets: Expression) extends RowNumberLike with SizeBasedWindowFunction with UnaryLike[Expression] { def this() = this(Literal(1)) override def child: Expression = buckets // Validate buckets. Note that this could be relaxed, the bucket value only needs to constant // for each partition. override def checkInputDataTypes(): TypeCheckResult = { if (!buckets.foldable) { return TypeCheckFailure(s"Buckets expression must be foldable, but got $buckets") } if (buckets.dataType != IntegerType) { return TypeCheckFailure(s"Buckets expression must be integer type, but got $buckets") } val i = buckets.eval().asInstanceOf[Int] if (i > 0) { TypeCheckSuccess } else { TypeCheckFailure(s"Buckets expression must be positive, but got: $i") } } private val bucket = AttributeReference("bucket", IntegerType, nullable = false)() private val bucketThreshold = AttributeReference("bucketThreshold", IntegerType, nullable = false)() private val bucketSize = AttributeReference("bucketSize", IntegerType, nullable = false)() private val bucketsWithPadding = AttributeReference("bucketsWithPadding", IntegerType, nullable = false)() private def bucketOverflow(e: Expression) = If(rowNumber >= bucketThreshold, e, zero) override val aggBufferAttributes = Seq( rowNumber, bucket, bucketThreshold, bucketSize, bucketsWithPadding ) override val initialValues = Seq( zero, zero, zero, (n.cast(DecimalType.IntDecimal) / buckets.cast(DecimalType.IntDecimal)).cast(IntegerType), (n % buckets).cast(IntegerType) ) override val updateExpressions = Seq( rowNumber + one, bucket + bucketOverflow(one), bucketThreshold + bucketOverflow(bucketSize + If(bucket < bucketsWithPadding, one, zero)), NoOp, NoOp ) override val evaluateExpression = bucket override protected def withNewChildInternal( newChild: Expression): NTile = copy(buckets = newChild) } /** * A RankLike function is a WindowFunction that changes its value based on a change in the value of * the order of the window in which is processed. For instance, when the value of `input` changes * in a window ordered by `input` the rank function also changes. The size of the change of the * rank function is (typically) not dependent on the size of the change in `input`. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ abstract class RankLike extends AggregateWindowFunction { /** Store the values of the window 'order' expressions. */ protected val orderAttrs = children.map { expr => AttributeReference(expr.sql, expr.dataType)() } /** Predicate that detects if the order attributes have changed. */ protected val orderEquals = children.zip(orderAttrs) .map(EqualNullSafe.tupled) .reduceOption(And) .getOrElse(Literal(true)) protected val orderInit = children.map(e => Literal.create(null, e.dataType)) protected val rank = AttributeReference("rank", IntegerType, nullable = false)() protected val rowNumber = AttributeReference("rowNumber", IntegerType, nullable = false)() protected val zero = Literal(0) protected val one = Literal(1) protected val increaseRowNumber = rowNumber + one /** * Different RankLike implementations use different source expressions to update their rank value. * Rank for instance uses the number of rows seen, whereas DenseRank uses the number of changes. */ protected def rankSource: Expression = rowNumber /** Increase the rank when the current rank == 0 or when the one of order attributes changes. */ protected val increaseRank = If(orderEquals && rank =!= zero, rank, rankSource) override val aggBufferAttributes: Seq[AttributeReference] = rank +: rowNumber +: orderAttrs override val initialValues = zero +: one +: orderInit override val updateExpressions = increaseRank +: increaseRowNumber +: children override val evaluateExpression: Expression = rank override def nullable: Boolean = false override def sql: String = s"${prettyName.toUpperCase(Locale.ROOT)}()" def withOrder(order: Seq[Expression]): RankLike } /** * The Rank function computes the rank of a value in a group of values. The result is one plus the * number of rows preceding or equal to the current row in the ordering of the partition. The values * will produce gaps in the sequence. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_() - Computes the rank of a value in a group of values. The result is one plus the number of rows preceding or equal to the current row in the ordering of the partition. The values will produce gaps in the sequence. """, arguments = """ Arguments: * children - this is to base the rank on; a change in the value of one the children will trigger a change in rank. This is an internal parameter and will be assigned by the Analyser. """, examples = """ Examples: > SELECT a, b, _FUNC_(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 1 A1 2 3 A2 3 1 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class Rank(children: Seq[Expression]) extends RankLike { def this() = this(Nil) override def withOrder(order: Seq[Expression]): Rank = Rank(order) override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Rank = copy(children = newChildren) } /** * The DenseRank function computes the rank of a value in a group of values. The result is one plus * the previously assigned rank value. Unlike [[Rank]], [[DenseRank]] will not produce gaps in the * ranking sequence. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_() - Computes the rank of a value in a group of values. The result is one plus the previously assigned rank value. Unlike the function rank, dense_rank will not produce gaps in the ranking sequence. """, arguments = """ Arguments: * children - this is to base the rank on; a change in the value of one the children will trigger a change in rank. This is an internal parameter and will be assigned by the Analyser. """, examples = """ Examples: > SELECT a, b, _FUNC_(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 1 A1 1 1 A1 2 2 A2 3 1 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class DenseRank(children: Seq[Expression]) extends RankLike { def this() = this(Nil) override def withOrder(order: Seq[Expression]): DenseRank = DenseRank(order) override protected def rankSource = rank + one override val updateExpressions = increaseRank +: children override val aggBufferAttributes = rank +: orderAttrs override val initialValues = zero +: orderInit override def prettyName: String = "dense_rank" override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): DenseRank = copy(children = newChildren) } /** * The PercentRank function computes the percentage ranking of a value in a group of values. The * result the rank of the minus one divided by the total number of rows in the partition minus one: * (r - 1) / (n - 1). If a partition only contains one row, the function will return 0. * * The PercentRank function is similar to the CumeDist function, but it uses rank values instead of * row counts in the its numerator. * * This documentation has been based upon similar documentation for the Hive and Presto projects. */ // scalastyle:off line.size.limit line.contains.tab @ExpressionDescription( usage = """ _FUNC_() - Computes the percentage ranking of a value in a group of values. """, arguments = """ Arguments: * children - this is to base the rank on; a change in the value of one the children will trigger a change in rank. This is an internal parameter and will be assigned by the Analyser. """, examples = """ Examples: > SELECT a, b, _FUNC_(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b); A1 1 0.0 A1 1 0.0 A1 2 1.0 A2 3 0.0 """, since = "2.0.0", group = "window_funcs") // scalastyle:on line.size.limit line.contains.tab case class PercentRank(children: Seq[Expression]) extends RankLike with SizeBasedWindowFunction { def this() = this(Nil) override def withOrder(order: Seq[Expression]): PercentRank = PercentRank(order) override def dataType: DataType = DoubleType override val evaluateExpression = If(n > one, (rank - one).cast(DoubleType) / (n - one).cast(DoubleType), 0.0d) override def prettyName: String = "percent_rank" override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): PercentRank = copy(children = newChildren) }
mahak/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/windowExpressions.scala
Scala
apache-2.0
41,110
/* * Extended.scala * Extended values, which could also be Star (unspecified). * * Created By: Avi Pfeffer (apfeffer@cra.com) * Creation Date: Dec 27, 2013 * * Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email figaro@cra.com for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.algorithm.lazyfactored /** * An extended value, which could either be a regular value or the special value Star. */ sealed abstract class Extended[T] { /** * Return true if the value is a regular value. */ def isRegular: Boolean /** * Return the underlying value. Throws an IllegalArgumentException when given Star. */ def value: T } /** * A regular value. */ case class Regular[T](val value: T) extends Extended[T] { def isRegular = true } /** * The special value Star, which stands for the unknown result of an unexpanded computation. * When computing a lower bound to probabilities, we can assume that Star will eventually * evaluate to something other than what is needed to make the query have a particular value. * Star is a case class so that when we have different variables over the same type that both * have value Star, we can say that their values are equal. */ case class Star[T]() extends Extended[T] { def isRegular = false def value: T = throw new IllegalArgumentException("Attempt to get value of Star") override def toString = "*" }
jyuhuan/figaro
Figaro/src/main/scala/com/cra/figaro/algorithm/lazyfactored/Extended.scala
Scala
bsd-3-clause
1,519
/* * Copyright 2017 Sumo Logic * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ws.epigraph.java.service.projections.req /** * Deprecated. Use `Fragment` instead * * @author <a href="mailto:konstantin.sobolev@gmail.com">Konstantin Sobolev</a> */ sealed case class CodeChunk(code: String, imports: Set[String]) { def +(other: CodeChunk): CodeChunk = CodeChunk( if (code.isEmpty) other.code else if (other.code.isEmpty) code else { if (code.endsWith("\\n\\n")) code + other.code else if (code.endsWith("\\n")) code + "\\n" + other.code else code + "\\n\\n" + other.code }, imports ++ other.imports ) } object CodeChunk { val empty = CodeChunk("", Set()) def apply(code: String): CodeChunk = CodeChunk(code, Set()) }
SumoLogic/epigraph
java/codegen/src/main/scala/ws/epigraph/java/service/projections/req/CodeChunk.scala
Scala
apache-2.0
1,284
package mesosphere.marathon.tasks import scala.util.Random import org.apache.mesos.Protos.Offer import mesosphere.mesos.protos import mesosphere.mesos.protos.{ RangesResource, Resource } import mesosphere.marathon.state.AppDefinition import scala.collection.JavaConverters._ /** * Utility class for checking if the ports resource in an offer matches the requirements of an app. */ class PortsMatcher(app: AppDefinition, offer: Offer) { val portsResource = offer.getResourcesList.asScala .find(_.getName == Resource.PORTS) val offeredPortRanges = portsResource .map(_.getRanges.getRangeList.asScala) .getOrElse(Nil) val role = portsResource.map(_.getRole).getOrElse("*") def portRanges: Option[RangesResource] = { if (app.ports.isEmpty) { Some(RangesResource(Resource.PORTS, Nil)) } else if (app.requirePorts) { appPortRanges } else { appPortRanges.orElse(randomPortRanges) } } def matches: Boolean = { portRanges.isDefined } def ports: Seq[Long] = { portRanges.map(_.ranges.flatMap(_.asScala())).getOrElse(Nil) } private def appPortRanges: Option[RangesResource] = { val sortedPorts = app.ports.sorted val firstPort = sortedPorts.head val lastPort = sortedPorts.last // Monotonically increasing ports if (firstPort + sortedPorts.size - 1 == lastPort && offeredPortRanges.exists(r => r.getBegin <= firstPort && r.getEnd >= lastPort)) { Some(RangesResource(Resource.PORTS, Seq(protos.Range(firstPort.longValue, lastPort.longValue)), role)) } else if (app.ports.forall(p => offeredPortRanges.exists(r => r.getBegin <= p && r.getEnd >= p))) { val portRanges = app.ports.map(p => protos.Range(p.longValue, p.longValue)) Some(RangesResource(Resource.PORTS, portRanges, role)) } else { None } } private def randomPortRanges: Option[RangesResource] = { for (range <- offeredPortRanges) { // TODO use multiple ranges if one is not enough if (range.getEnd - range.getBegin + 1 >= app.ports.length) { val maxOffset = (range.getEnd - range.getBegin - app.ports.length + 2).toInt val firstPort = range.getBegin.toInt + Random.nextInt(maxOffset) val rangeProto = protos.Range(firstPort, firstPort + app.ports.length - 1) return Some( RangesResource(Resource.PORTS, Seq(rangeProto), role) ) } } None } }
tnachen/marathon
src/main/scala/mesosphere/marathon/tasks/PortsMatcher.scala
Scala
apache-2.0
2,440
package io.eels.yarn object YarnUtils { def jarForClass(klass: Class[_]): String = { val uri = klass.getResource("/" + klass.getName.replace('.', '/') + ".class") assert(uri != null, s"Class $klass not found in resource path") // if the class is inside a jar, the uri will be of the form jar:file:/path/myjar.jar!/com.package/myclass.class if (uri.toString.startsWith("jar:file:")) { uri.toString.stripPrefix("jar:file:").split('!').head } else { sys.error(s"Class is not located in a jar [$uri]") } } }
eel-lib/eel
eel-yarn/src/main/scala/io/eels/yarn/YarnUtils.scala
Scala
mit
546
package com.sksamuel.scapegoat.inspections import com.sksamuel.scapegoat.{Levels, Inspection, Reporter} import scala.reflect.runtime._ import scala.reflect.runtime.universe._ /** @author Stephen Samuel */ class ComparingFloatingPointTypes extends Inspection { override def traverser(reporter: Reporter) = new universe.Traverser { override def traverse(tree: scala.reflect.runtime.universe.Tree): Unit = { tree match { case Apply(Select(left, TermName("$eq$eq")), List(right)) => val leftType = Option(left.tpe).map(_.typeSymbol).map(_.fullName).orNull val rightType = Option(left.tpe).map(_.typeSymbol).map(_.fullName).orNull val leftFloating = leftType == "scala.Double" || leftType == "scala.Float" val rightFloating = rightType == "scala.Double" || rightType == "scala.Float" if (leftFloating && rightFloating) reporter .warn("Floating type comparison", tree, level = Levels.Error) case _ => super.traverse(tree) } } } }
RichardBradley/scapegoat
src/main/scala/com/sksamuel/scapegoat/inspections/ComparingFloatingPointTypes.scala
Scala
apache-2.0
1,029
package uk.gov.gds.ier.service import uk.gov.gds.ier.validation.constraints.NationalityConstraints import uk.gov.gds.ier.validation.{ErrorMessages, FormKeys} import uk.gov.gds.ier.model.PartialNationality import uk.gov.gds.ier.model.IsoNationality import uk.gov.gds.ier.validation.constants.NationalityConstants._ class IsoCountryService extends NationalityConstraints with FormKeys with ErrorMessages { def isValidCountry(country:String):Boolean = { countryNameToCodes.contains(country) } def transformToIsoCode(nationality:PartialNationality):IsoNationality = { val nationalities = nationality.isoCheckedNationalities ++ nationality.otherCountries val isoCountries = nationalities.flatMap{ country => countryNameToCodes.get(country) } val isoCodes = isoCountries map(_.isoCode) IsoNationality(countryIsos = isoCodes, nationality.noNationalityReason) } def getFranchises(nationality:PartialNationality):List[Franchise] = { val nationalities = nationality.isoCheckedNationalities ++ nationality.otherCountries val isoCodes = nationalities.flatMap{ country => countryNameToCodes.get(country) } val franchises = isoCodes.flatMap(_.franchise) franchises.distinct } }
alphagov/ier-frontend
app/uk/gov/gds/ier/service/IsoCountryService.scala
Scala
mit
1,243
/** * Copyright 2011 Alberto Franco * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hube import java.net.InetAddress /** * Entry point of the application. All start from this object. */ object Server { def main(args: Array[String]) { // -- Create the server, bind to localhost port 1234 ----------------------- var server:HttpSimpleServer = new HttpSimpleServer(1234, InetAddress.getByName("127.0.0.1")); // -- Add some handlers and start the server ------------------------------- server.addHandler("/", new ResponseHandler("/")); server.addHandler("/hube", new ResponseHandler("/hube")); server.start; } }
pmsg863/xmgps
vertx3test/src/main/java/com/xmgps/yfzx/hwb/scala/simpleweb/Server.scala
Scala
apache-2.0
1,170
package dsl.reactive.examples import dsl.reactive._ import dsl.reactive.auxiliary._ import dsl.reactive.optimizations._ import virtualization.lms.common._ trait ConstantFoldingSimple extends ReactiveDSL { def f(x : Rep[Unit]) = { printTime() val v1 = ISignal { 43l } val v2 = ISignal { 43l } val v3 = ISignal { 43l } val s1 = ISignal { expensive(v1.get) + expensive(v2.get) + expensive(v3.get) } val s2 = ISignal { expensive(v1.get) + expensive(v2.get) + expensive(v3.get) } val s3 = ISignal { expensive(v1.get) + expensive(v2.get) + expensive(v3.get) } val r = ISignal { s1.get + s2.get + s3.get } println(r.get) printTime() } } trait ConstantFoldingEvenSimpler extends ReactiveDSL { def f(x : Rep[Unit]) = { printTime() val v1 = ISignal { 42l } val v2 = ISignal { 43l } val s1 = ISignal { expensive(v1.get) + expensive(v2.get) } val s2 = ISignal { expensive(v1.get) + expensive(v2.get) } val r = ISignal { s1.get + s2.get} println(r.get) printTime() } } object Main2 extends App { val optimized = new ConstantFoldingEvenSimpler with ReactiveDSLExpOpt with CompileScala { self => override val codegen = new ReactiveDSLGenOpt { val IR: self.type = self } } val normal = new ConstantFoldingEvenSimpler with ReactiveDSLExp with CompileScala { self => override val codegen = new ReactiveDSLGen { val IR: self.type = self } } optimized.compile(optimized.f).apply() optimized.codegen.emitSource(optimized.f, "F", new java.io.PrintWriter(System.out)) normal.compile(normal.f).apply() normal.codegen.emitSource(normal.f, "F", new java.io.PrintWriter(System.out)) }
markus1189/OptiReactive
src/main/scala/dsl/reactive/examples/ConstantFolding.scala
Scala
gpl-3.0
1,720
package us.feliscat.text.normalizer.ja import us.feliscat.text.StringOption /** * @author K.Sakamoto * Created on 15/10/28 */ class JapaneseNormalizedCharacter(private var character: Char) { character = JapaneseNormalizer.normalize(StringOption(character.toString)).get.head def toChar: Char = { character } }
ktr-skmt/FelisCatusZero-multilingual
libraries/src/main/scala/us/feliscat/text/normalizer/ja/JapaneseNormalizedCharacter.scala
Scala
apache-2.0
334
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.scaladsl.api import akka.NotUsed import com.lightbend.lagom.internal.api.Execution import com.lightbend.lagom.scaladsl.api.transport.RequestHeader import com.lightbend.lagom.scaladsl.api.transport.ResponseHeader import scala.concurrent.Future /** * A service call for an entity. * * A service call has a request and a response entity. Either entity may be NotUsed, if there is no entity associated * with the call. They may also be an Akka streams Source, in situations where the endpoint serves a stream. In all * other cases, the entities will be considered "strict" entities, that is, they will be parsed into memory, eg, * using json. */ trait ServiceCall[Request, Response] { /** * Invoke the service call. * * @param request The request entity. * @return A future of the response entity. */ def invoke(request: Request): Future[Response] /** * Invoke the service call with unit id argument and a unit request message. * * This should only be used when the request message is NotUsed. * * @return A future of the response entity. */ def invoke()(implicit evidence: NotUsed =:= Request): Future[Response] = invoke(NotUsed) /** * Make any modifications necessary to the request header. * * For client service calls, this gives clients an opportunity to add custom headers and/or modify the request in * some way before it is made. The passed in handler is applied before the header transformers * configured for the descriptor/endpoint are applied. * * For server implementations of service calls, this will be invoked by the server in order to supply the request * header. A new service call can then be returned that uses the header. The header passed in to the handler by * the service call can be anything, it will be ignored - [[RequestHeader#DEFAULT]] exists for this * purpose. Generally, server implementations should not implement this method directly, rather, they should use * `ServerServiceCall`, which provides an appropriate implementation. * * @param handler A function that takes in the request header representing the request, and transforms it. * @return A service call that will use the given handler. */ def handleRequestHeader(handler: RequestHeader => RequestHeader): ServiceCall[Request, Response] = { // Default implementation. For client service calls, this is overridden by the implementation to do something // with the handler. this } /** * Transform the response using the given function that takes the response header and the response. * * For client service calls, this gives clients an opportunity to inspect the response headers and status code. * The passed in handler is applied after the header transformers configured for the descriptor/endpoint are * applied. * * For server implementations of service calls, this will be invoked by the server in order to give the service * call an opportunity to supply the response header when it supplies the response, but only if the underlying * transport supports sending a response header. Generally, server implementations should not implement this * method directly, rather, they should use <tt>ServerServiceCall</tt>, which provides an appropriate * implementation. * * @param handler The handler. * @return A service call that uses the given handler. */ def handleResponseHeader[T](handler: (ResponseHeader, Response) => T): ServiceCall[Request, T] = { // Default implementation. For client service calls, this is overridden by the implementation to do something // with the handler. ServiceCall { request => invoke(request).map(response => handler(ResponseHeader.Ok, response))(Execution.trampoline) } } /** * Allow handling of the response header. * * This converts the service call to one that returns both the response header and the response message. * * This is simply a convenience method for invoking <code>handleResponseHeader((_, _)</code>. * * @return The a service call that returns the response header and the response message. */ def withResponseHeader: ServiceCall[Request, (ResponseHeader, Response)] = handleResponseHeader((_, _)) } object ServiceCall { /** * Create a service call from a function to handle it. */ def apply[Request, Response](call: Request => Future[Response]): ServiceCall[Request, Response] = new ServiceCall[Request, Response] { override def invoke(request: Request): Future[Response] = call.apply(request) } }
lagom/lagom
service/scaladsl/api/src/main/scala/com/lightbend/lagom/scaladsl/api/ServiceCall.scala
Scala
apache-2.0
4,686
package diffson package jsonpatch import jsonpointer._ import cats.implicits._ import org.scalatest._ import org.scalatest.flatspec.AnyFlatSpec import scala.util.Try import scala.language.implicitConversions import org.scalatest.matchers.should.Matchers abstract class TestJsonPatch[Json](implicit Json: Jsony[Json]) extends AnyFlatSpec with Matchers with TestProtocol[Json] { // add "applying an 'add' operation" should "add the field to the object if it does not exist" in { val op = Add[Json](parsePointer("/lbl"), 17) op[Try](parseJson("{}")).get should be(parseJson("{ \\"lbl\\": 17 } ")) } it should "add a value with an empty string as the key" in { val op = Add[Json](parsePointer("/foo/"), 17) op[Try](parseJson("{ \\"foo\\": {} }")).get should be(parseJson("{ \\"foo\\": {\\"\\": 17 } }")) } it should "replace the value if the pointer is the root" in { val op = Add[Json](parsePointer(""), 17) op[Try](parseJson("[1, 2, 3, 4]")).get should be(17: Json) } it should "replace the field value if it does exist" in { val op = Add[Json](parsePointer("/lbl"), 17) op[Try](parseJson("{ \\"lbl\\": true }")).get should be(parseJson("{ \\"lbl\\": 17 } ")) } it should "add an element to the array at the given index" in { val op1 = Add[Json](parsePointer("/1"), 17) op1[Try](parseJson("[1, 2, 3]")).get should be(parseJson("[1, 17, 2, 3]")) val op2 = Add[Json](parsePointer("/0"), 17) op2[Try](parseJson("[1, 2, 3]")).get should be(parseJson("[17, 1, 2, 3]")) } it should "add an element at the end of the array if the last element is '-'" in { val op = Add[Json](parsePointer("/-"), 17) op[Try](parseJson("[1, 2, 3]")).get should be(parseJson("[1, 2, 3, 17]")) } it should "create a nested field if needed" in { val op = Add[Json](parsePointer("/lbl/lbl"), 17) op[Try](parseJson("{ \\"lbl\\": {} }")).get should be(parseJson("{ \\"lbl\\": { \\"lbl\\": 17 } }")) } it should "throw an error if some element is missing in the middle of the path" in { a[PatchException] should be thrownBy { val op = Add[Json](parsePointer("/lbl/lbl"), 17) op[Try](parseJson("{}")).get } } it should "throw an error if adding an element out of the array boundaries" in { a[PatchException] should be thrownBy { val op = Add[Json](parsePointer("/178"), 17) op[Try](parseJson("[1, 2]")).get } } // remove "removing a label of an object" should "result in the object being amputated from this label" in { val op = Remove[Json](parsePointer("/lbl")) op[Try](parseJson("{ \\"lbl\\": 17, \\"toto\\": true }")).get should be(parseJson("{ \\"toto\\": true }")) } "removing an element of an array" should "result in the array being amputated from this element" in { val op = Remove[Json](parsePointer("/2")) op[Try](parseJson("[1, 2, 3, 4, 5]")).get should be(parseJson("[1, 2, 4, 5]")) } "removing the '-' element of an array" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Remove[Json](parsePointer("/-")) op[Try](parseJson("[1, 2, 3, 4]")).get } } "removing an element out of the array boundaries" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Remove[Json](parsePointer("/20")) op[Try](parseJson("[1, 2, 3, 4]")).get } } "removing an unknown label from an object" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Remove[Json](parsePointer("/toto")) op[Try](parseJson("{}")).get } } "removing the root" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Remove[Json](parsePointer("/")) op[Try](parseJson("{}")).get } } // replace "replacing an element in an object" should "result in this element being replaced" in { val op = Replace[Json](parsePointer("/lbl/lbl"), 17) op[Try](parseJson("""{"lbl": {"lbl": true, "gruik": 1}, "toto": 3}""")).get should be(parseJson("""{"lbl": {"lbl": 17, "gruik": 1}, "toto": 3}""")) } "replacing an element in an array" should "result in this element being replaced" in { val op = Replace[Json](parsePointer("/3"), 17) op[Try](parseJson("[true, false, true, true, true]")).get should be(parseJson("[true, false, true, 17,true]")) } "replacing the root" should "result in the value being completely replaced" in { val op = Replace[Json](parsePointer(""), 17) op[Try](parseJson("[1, 2, 3]")).get should be(17: Json) } "replacing a non-existing element in an object" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Replace[Json](parsePointer("/1/lbl"), 17) op[Try](parseJson("[1, {}, true]")).get } } "replacing the '-' element of an array" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Replace[Json](parsePointer("/-"), 17) op[Try](parseJson("[1, 2, 3, 4]")).get } } "replacing an element out of the array boundaries" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Replace[Json](parsePointer("/20"), 17) op[Try](parseJson("[1, 2, 3, 4]")).get } a[PatchException] should be thrownBy { val op = Replace[Json](parsePointer("/array/3/sub1"), 17) op[Try](parseJson("{\\"array\\":[\\"bar1\\",\\"bar2\\",{\\"sub1\\":\\"bar3\\"}]}")).get } } // move "moving a value from an object to an array" should "result in the value being added to the array and removed from the object" in { val op = Move(parsePointer("/0/lbl"), parsePointer("/1/1")) op[Try](parseJson("[{ \\"lbl\\": 17, \\"toto\\": true }, [1, 2], \\"plop\\"]")).get should be( parseJson("[{ \\"toto\\": true }, [1, 17, 2], \\"plop\\"]")) } "moving a value in a sub element" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Move(parsePointer("/0"), parsePointer("/0/toto")) op[Try](parseJson("0")).get } } "moving the root" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Move(parsePointer(""), parsePointer("/toto")) op[Try](parseJson("0")).get } } // copy "copying an element in an object" should "result in this element being copied in the expected path" in { val op = Copy[Json](parsePointer("/root/a"), parsePointer("/root/c")) op.apply[Try](parseJson("""{"root": {"a": 1, "b": "B"}}""")).get shouldBe parseJson("""{"root": {"a": 1, "b": "B", "c": 1}}""") } // test "testing an existing element of an object" should "succeed and not modify the original object" in { val op = Test[Json](parsePointer("/a/b/3"), 6) val initial = """{"a": {"b": [0,2,4,6] } }""" op.apply[Try](parseJson(initial)).get shouldBe parseJson(initial) } "testing an existing element with a non-expected value" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Test[Json](parsePointer("/a"), 2) op.apply[Try](parseJson("""{ "a": 1 }""")).get } } "testing a non-existing element in an object" should "result in an error being thrown" in { a[PatchException] should be thrownBy { val op = Test[Json](parsePointer("/b"), 1) op.apply[Try](parseJson("""{ "a": 1 }""")).get } } }
gnieh/diffson
testkit/shared/src/main/scala/diffson/TestJsonPatch.scala
Scala
apache-2.0
7,510
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.utils.json import com.fasterxml.jackson.databind.{ObjectMapper, JsonMappingException} import org.junit.Test import org.junit.Assert._ import kafka.utils.Json class JsonValueTest { private val json = """ |{ | "boolean": false, | "int": 1234, | "long": 3000000000, | "double": 16.244355, | "string": "string", | "number_as_string": "123", | "array": [4.0, 11.1, 44.5], | "object": { | "a": true, | "b": false | }, | "null": null |} """.stripMargin private def parse(s: String): JsonValue = Json.parseFull(s).getOrElse(sys.error("Failed to parse json: " + s)) private def assertTo[T: DecodeJson](expected: T, jsonValue: JsonObject => JsonValue): Unit = { val parsed = jsonValue(parse(json).asJsonObject) assertEquals(Right(expected), parsed.toEither[T]) assertEquals(expected, parsed.to[T]) } private def assertToFails[T: DecodeJson](jsonValue: JsonObject => JsonValue): Unit = { val parsed = jsonValue(parse(json).asJsonObject) assertTrue(parsed.toEither[T].isLeft) assertThrow[JsonMappingException](parsed.to[T]) } def assertThrow[E <: Throwable : Manifest](body: => Unit): Unit = { import scala.util.control.Exception._ val klass = manifest[E].runtimeClass catchingPromiscuously(klass).opt(body).foreach { _ => fail("Expected `" + klass + "` to be thrown, but no exception was thrown") } } @Test def testAsJsonObject(): Unit = { val parsed = parse(json).asJsonObject val obj = parsed("object") assertEquals(obj, obj.asJsonObject) assertThrow[JsonMappingException](parsed("array").asJsonObject) } @Test def testAsJsonObjectOption(): Unit = { val parsed = parse(json).asJsonObject assertTrue(parsed("object").asJsonObjectOption.isDefined) assertEquals(None, parsed("array").asJsonObjectOption) } @Test def testAsJsonArray(): Unit = { val parsed = parse(json).asJsonObject val array = parsed("array") assertEquals(array, array.asJsonArray) assertThrow[JsonMappingException](parsed("object").asJsonArray) } @Test def testAsJsonArrayOption(): Unit = { val parsed = parse(json).asJsonObject assertTrue(parsed("array").asJsonArrayOption.isDefined) assertEquals(None, parsed("object").asJsonArrayOption) } @Test def testJsonObjectGet(): Unit = { val parsed = parse(json).asJsonObject assertEquals(Some(parse("""{"a":true,"b":false}""")), parsed.get("object")) assertEquals(None, parsed.get("aaaaa")) } @Test def testJsonObjectApply(): Unit = { val parsed = parse(json).asJsonObject assertEquals(parse("""{"a":true,"b":false}"""), parsed("object")) assertThrow[JsonMappingException](parsed("aaaaaaaa")) } @Test def testJsonObjectIterator(): Unit = { assertEquals( Vector("a" -> parse("true"), "b" -> parse("false")), parse(json).asJsonObject("object").asJsonObject.iterator.toVector ) } @Test def testJsonArrayIterator(): Unit = { assertEquals(Vector("4.0", "11.1", "44.5").map(parse), parse(json).asJsonObject("array").asJsonArray.iterator.toVector) } @Test def testJsonValueEquals(): Unit = { assertEquals(parse(json), parse(json)) assertEquals(parse("""{"blue": true, "red": false}"""), parse("""{"red": false, "blue": true}""")) assertNotEquals(parse("""{"blue": true, "red": true}"""), parse("""{"red": false, "blue": true}""")) assertEquals(parse("""[1, 2, 3]"""), parse("""[1, 2, 3]""")) assertNotEquals(parse("""[1, 2, 3]"""), parse("""[2, 1, 3]""")) assertEquals(parse("1344"), parse("1344")) assertNotEquals(parse("1344"), parse("144")) } @Test def testJsonValueHashCode(): Unit = { assertEquals(new ObjectMapper().readTree(json).hashCode, parse(json).hashCode) } @Test def testJsonValueToString(): Unit = { val js = """{"boolean":false,"int":1234,"array":[4.0,11.1,44.5],"object":{"a":true,"b":false}}""" assertEquals(js, parse(js).toString) } @Test def testDecodeBoolean(): Unit = { assertTo[Boolean](false, _("boolean")) assertToFails[Boolean](_("int")) } @Test def testDecodeString(): Unit = { assertTo[String]("string", _("string")) assertTo[String]("123", _("number_as_string")) assertToFails[String](_("int")) assertToFails[String](_("array")) } @Test def testDecodeInt(): Unit = { assertTo[Int](1234, _("int")) assertToFails[Int](_("long")) } @Test def testDecodeLong(): Unit = { assertTo[Long](3000000000L, _("long")) assertTo[Long](1234, _("int")) assertToFails[Long](_("string")) } @Test def testDecodeDouble(): Unit = { assertTo[Double](16.244355, _("double")) assertTo[Double](1234.0, _("int")) assertTo[Double](3000000000L, _("long")) assertToFails[Double](_("string")) } @Test def testDecodeSeq(): Unit = { assertTo[Seq[Double]](Seq(4.0, 11.1, 44.5), _("array")) assertToFails[Seq[Double]](_("string")) assertToFails[Seq[Double]](_("object")) assertToFails[Seq[String]](_("array")) } @Test def testDecodeMap(): Unit = { assertTo[Map[String, Boolean]](Map("a" -> true, "b" -> false), _("object")) assertToFails[Map[String, Int]](_("object")) assertToFails[Map[String, String]](_("object")) assertToFails[Map[String, Double]](_("array")) } @Test def testDecodeOption(): Unit = { assertTo[Option[Int]](None, _("null")) assertTo[Option[Int]](Some(1234), _("int")) assertToFails[Option[String]](_("int")) } }
mihbor/kafka
core/src/test/scala/unit/kafka/utils/json/JsonValueTest.scala
Scala
apache-2.0
6,373
/* * GNU GENERAL PUBLIC LICENSE * Version 2, June 1991 * * Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/> * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Everyone is permitted to copy and distribute verbatim copies * of this license document, but changing it is not allowed. * * Preamble * * The licenses for most software are designed to take away your * freedom to share and change it. By contrast, the GNU General Public * License is intended to guarantee your freedom to share and change free * software--to make sure the software is free for all its users. This * General Public License applies to most of the Free Software * Foundation's software and to any other program whose authors commit to * using it. (Some other Free Software Foundation software is covered by * the GNU Lesser General Public License instead.) You can apply it to * your programs, too. * * When we speak of free software, we are referring to freedom, not * price. Our General Public Licenses are designed to make sure that you * have the freedom to distribute copies of free software (and charge for * this service if you wish), that you receive source code or can get it * if you want it, that you can change the software or use pieces of it * in new free programs; and that you know you can do these things. * * To protect your rights, we need to make restrictions that forbid * anyone to deny you these rights or to ask you to surrender the rights. * These restrictions translate to certain responsibilities for you if you * distribute copies of the software, or if you modify it. * * For example, if you distribute copies of such a program, whether * gratis or for a fee, you must give the recipients all the rights that * you have. You must make sure that they, too, receive or can get the * source code. And you must show them these terms so they know their * rights. * * We protect your rights with two steps: (1) copyright the software, and * (2) offer you this license which gives you legal permission to copy, * distribute and/or modify the software. * * Also, for each author's protection and ours, we want to make certain * that everyone understands that there is no warranty for this free * software. If the software is modified by someone else and passed on, we * want its recipients to know that what they have is not the original, so * that any problems introduced by others will not reflect on the original * authors' reputations. * * Finally, any free program is threatened constantly by software * patents. We wish to avoid the danger that redistributors of a free * program will individually obtain patent licenses, in effect making the * program proprietary. To prevent this, we have made it clear that any * patent must be licensed for everyone's free use or not licensed at all. * * The precise terms and conditions for copying, distribution and * modification follow. * * GNU GENERAL PUBLIC LICENSE * TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION * * 0. This License applies to any program or other work which contains * a notice placed by the copyright holder saying it may be distributed * under the terms of this General Public License. The "Program", below, * refers to any such program or work, and a "work based on the Program" * means either the Program or any derivative work under copyright law: * that is to say, a work containing the Program or a portion of it, * either verbatim or with modifications and/or translated into another * language. (Hereinafter, translation is included without limitation in * the term "modification".) Each licensee is addressed as "you". * * Activities other than copying, distribution and modification are not * covered by this License; they are outside its scope. The act of * running the Program is not restricted, and the output from the Program * is covered only if its contents constitute a work based on the * Program (independent of having been made by running the Program). * Whether that is true depends on what the Program does. * * 1. You may copy and distribute verbatim copies of the Program's * source code as you receive it, in any medium, provided that you * conspicuously and appropriately publish on each copy an appropriate * copyright notice and disclaimer of warranty; keep intact all the * notices that refer to this License and to the absence of any warranty; * and give any other recipients of the Program a copy of this License * along with the Program. * * You may charge a fee for the physical act of transferring a copy, and * you may at your option offer warranty protection in exchange for a fee. * * 2. You may modify your copy or copies of the Program or any portion * of it, thus forming a work based on the Program, and copy and * distribute such modifications or work under the terms of Section 1 * above, provided that you also meet all of these conditions: * * a) You must cause the modified files to carry prominent notices * stating that you changed the files and the date of any change. * * b) You must cause any work that you distribute or publish, that in * whole or in part contains or is derived from the Program or any * part thereof, to be licensed as a whole at no charge to all third * parties under the terms of this License. * * c) If the modified program normally reads commands interactively * when run, you must cause it, when started running for such * interactive use in the most ordinary way, to print or display an * announcement including an appropriate copyright notice and a * notice that there is no warranty (or else, saying that you provide * a warranty) and that users may redistribute the program under * these conditions, and telling the user how to view a copy of this * License. (Exception: if the Program itself is interactive but * does not normally print such an announcement, your work based on * the Program is not required to print an announcement.) * * These requirements apply to the modified work as a whole. If * identifiable sections of that work are not derived from the Program, * and can be reasonably considered independent and separate works in * themselves, then this License, and its terms, do not apply to those * sections when you distribute them as separate works. But when you * distribute the same sections as part of a whole which is a work based * on the Program, the distribution of the whole must be on the terms of * this License, whose permissions for other licensees extend to the * entire whole, and thus to each and every part regardless of who wrote it. * * Thus, it is not the intent of this section to claim rights or contest * your rights to work written entirely by you; rather, the intent is to * exercise the right to control the distribution of derivative or * collective works based on the Program. * * In addition, mere aggregation of another work not based on the Program * with the Program (or with a work based on the Program) on a volume of * a storage or distribution medium does not bring the other work under * the scope of this License. * * 3. You may copy and distribute the Program (or a work based on it, * under Section 2) in object code or executable form under the terms of * Sections 1 and 2 above provided that you also do one of the following: * * a) Accompany it with the complete corresponding machine-readable * source code, which must be distributed under the terms of Sections * 1 and 2 above on a medium customarily used for software interchange; or, * * b) Accompany it with a written offer, valid for at least three * years, to give any third party, for a charge no more than your * cost of physically performing source distribution, a complete * machine-readable copy of the corresponding source code, to be * distributed under the terms of Sections 1 and 2 above on a medium * customarily used for software interchange; or, * * c) Accompany it with the information you received as to the offer * to distribute corresponding source code. (This alternative is * allowed only for noncommercial distribution and only if you * received the program in object code or executable form with such * an offer, in accord with Subsection b above.) * * The source code for a work means the preferred form of the work for * making modifications to it. For an executable work, complete source * code means all the source code for all modules it contains, plus any * associated interface definition files, plus the scripts used to * control compilation and installation of the executable. However, as a * special exception, the source code distributed need not include * anything that is normally distributed (in either source or binary * form) with the major components (compiler, kernel, and so on) of the * operating system on which the executable runs, unless that component * itself accompanies the executable. * * If distribution of executable or object code is made by offering * access to copy from a designated place, then offering equivalent * access to copy the source code from the same place counts as * distribution of the source code, even though third parties are not * compelled to copy the source along with the object code. * * 4. You may not copy, modify, sublicense, or distribute the Program * except as expressly provided under this License. Any attempt * otherwise to copy, modify, sublicense or distribute the Program is * void, and will automatically terminate your rights under this License. * However, parties who have received copies, or rights, from you under * this License will not have their licenses terminated so long as such * parties remain in full compliance. * * 5. You are not required to accept this License, since you have not * signed it. However, nothing else grants you permission to modify or * distribute the Program or its derivative works. These actions are * prohibited by law if you do not accept this License. Therefore, by * modifying or distributing the Program (or any work based on the * Program), you indicate your acceptance of this License to do so, and * all its terms and conditions for copying, distributing or modifying * the Program or works based on it. * * 6. Each time you redistribute the Program (or any work based on the * Program), the recipient automatically receives a license from the * original licensor to copy, distribute or modify the Program subject to * these terms and conditions. You may not impose any further * restrictions on the recipients' exercise of the rights granted herein. * You are not responsible for enforcing compliance by third parties to * this License. * * 7. If, as a consequence of a court judgment or allegation of patent * infringement or for any other reason (not limited to patent issues), * conditions are imposed on you (whether by court order, agreement or * otherwise) that contradict the conditions of this License, they do not * excuse you from the conditions of this License. If you cannot * distribute so as to satisfy simultaneously your obligations under this * License and any other pertinent obligations, then as a consequence you * may not distribute the Program at all. For example, if a patent * license would not permit royalty-free redistribution of the Program by * all those who receive copies directly or indirectly through you, then * the only way you could satisfy both it and this License would be to * refrain entirely from distribution of the Program. * * If any portion of this section is held invalid or unenforceable under * any particular circumstance, the balance of the section is intended to * apply and the section as a whole is intended to apply in other * circumstances. * * It is not the purpose of this section to induce you to infringe any * patents or other property right claims or to contest validity of any * such claims; this section has the sole purpose of protecting the * integrity of the free software distribution system, which is * implemented by public license practices. Many people have made * generous contributions to the wide range of software distributed * through that system in reliance on consistent application of that * system; it is up to the author/donor to decide if he or she is willing * to distribute software through any other system and a licensee cannot * impose that choice. * * This section is intended to make thoroughly clear what is believed to * be a consequence of the rest of this License. * * 8. If the distribution and/or use of the Program is restricted in * certain countries either by patents or by copyrighted interfaces, the * original copyright holder who places the Program under this License * may add an explicit geographical distribution limitation excluding * those countries, so that distribution is permitted only in or among * countries not thus excluded. In such case, this License incorporates * the limitation as if written in the body of this License. * * 9. The Free Software Foundation may publish revised and/or new versions * of the General Public License from time to time. Such new versions will * be similar in spirit to the present version, but may differ in detail to * address new problems or concerns. * * Each version is given a distinguishing version number. If the Program * specifies a version number of this License which applies to it and "any * later version", you have the option of following the terms and conditions * either of that version or of any later version published by the Free * Software Foundation. If the Program does not specify a version number of * this License, you may choose any version ever published by the Free Software * Foundation. * * 10. If you wish to incorporate parts of the Program into other free * programs whose distribution conditions are different, write to the author * to ask for permission. For software which is copyrighted by the Free * Software Foundation, write to the Free Software Foundation; we sometimes * make exceptions for this. Our decision will be guided by the two goals * of preserving the free status of all derivatives of our free software and * of promoting the sharing and reuse of software generally. * * NO WARRANTY * * 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY * FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN * OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES * PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED * OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS * TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE * PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, * REPAIR OR CORRECTION. * * 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING * WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR * REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, * INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING * OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED * TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY * YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER * PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * END OF TERMS AND CONDITIONS * * How to Apply These Terms to Your New Programs * * If you develop a new program, and you want it to be of the greatest * possible use to the public, the best way to achieve this is to make it * free software which everyone can redistribute and change under these terms. * * To do so, attach the following notices to the program. It is safest * to attach them to the start of each source file to most effectively * convey the exclusion of warranty; and each file should have at least * the "copyright" line and a pointer to where the full notice is found. * * {description} * Copyright (C) {year} {fullname} * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Also add information on how to contact you by electronic and paper mail. * * If the program is interactive, make it output a short notice like this * when it starts in an interactive mode: * * Gnomovision version 69, Copyright (C) year name of author * Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. * This is free software, and you are welcome to redistribute it * under certain conditions; type `show c' for details. * * The hypothetical commands `show w' and `show c' should show the appropriate * parts of the General Public License. Of course, the commands you use may * be called something other than `show w' and `show c'; they could even be * mouse-clicks or menu items--whatever suits your program. * * You should also get your employer (if you work as a programmer) or your * school, if any, to sign a "copyright disclaimer" for the program, if * necessary. Here is a sample; alter the names: * * Yoyodyne, Inc., hereby disclaims all copyright interest in the program * `Gnomovision' (which makes passes at compilers) written by James Hacker. * * {signature of Ty Coon}, 1 April 1989 * Ty Coon, President of Vice * * This General Public License does not permit incorporating your program into * proprietary programs. If your program is a subroutine library, you may * consider it more useful to permit linking proprietary applications with the * library. If this is what you want to do, use the GNU Lesser General * Public License instead of this License. */ package logic.generator.schedulerater.rater import models.persistence.lecture.Lecture import models.persistence.scheduletree.TimeSlot /** * @author fabian * on 05.05.14. */ trait Rater { def rate(timeSlots:List[TimeSlot]):(Int, Set[Lecture]) }
P1tt187/fhs-schedule-generator
app/logic/generator/schedulerater/rater/Rater.scala
Scala
gpl-2.0
19,255
package com.arcusys.valamis.slide.service.export import java.io.{ByteArrayInputStream, File, InputStream} import java.util.regex.Pattern import com.arcusys.valamis.content.service.{PlainTextService, QuestionService} import com.arcusys.valamis.file.service.FileService import com.arcusys.valamis.content.model._ import com.arcusys.valamis.slide.model.{SlideEntityType, _} import com.arcusys.valamis.util.FileSystemUtil import com.arcusys.valamis.util.serialization.JsonHelper._ import scala.util.matching.Regex case class QuestionResponse(tpe: Int, json: String, answersJson:Option[String]) case class PlainTextResponse(json: String) case class ExportFormat(version: Option[String], questions: List[QuestionResponse], plaintexts: List[PlainTextResponse], slideSet: SlideSetModel) object QuestionExternalFormat { def exportQuestion(question: Question, answers: Seq[Answer]): QuestionResponse = { QuestionResponse(question.questionType.id, question.toJson, Option(answers.toJson)) } def exportPlainText(pt: PlainText): PlainTextResponse = { PlainTextResponse(pt.toJson) } def importPlainText(ptResponse: PlainTextResponse): PlainText = { fromJson[PlainText](ptResponse.json) } def importPlainTextLast(questionResponse: QuestionResponse): PlainText = { val importPlanText = fromJson[com.arcusys.valamis.questionbank.model.PlainText](questionResponse.json) PlainText(Some(importPlanText.id), importPlanText.categoryID.map(_.toLong), importPlanText.title, importPlanText.text, importPlanText.courseID.getOrElse(0).toLong) } def importQuestion(questionResponse: QuestionResponse): (Question, Seq[Answer]) = questionResponse.tpe match { case 0 => (fromJson[ChoiceQuestion](questionResponse.json), fromJson[Seq[AnswerText]](questionResponse.answersJson.get)) case 1 => (fromJson[TextQuestion](questionResponse.json), fromJson[Seq[AnswerText]](questionResponse.answersJson.get)) case 2 => (fromJson[NumericQuestion](questionResponse.json), fromJson[Seq[AnswerRange]](questionResponse.answersJson.get)) case 3 => (fromJson[PositioningQuestion](questionResponse.json), fromJson[Seq[AnswerText]](questionResponse.answersJson.get)) case 4 => (fromJson[MatchingQuestion](questionResponse.json), fromJson[Seq[AnswerKeyValue]](questionResponse.answersJson.get)) case 5 => (fromJson[EssayQuestion](questionResponse.json), Seq()) case 7 => (fromJson[CategorizationQuestion](questionResponse.json), fromJson[Seq[AnswerKeyValue]](questionResponse.answersJson.get)) } def importQuestionLast(questionResponse: QuestionResponse): (Question, Seq[Answer]) = { questionResponse.tpe match { case 0 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.ChoiceQuestion](questionResponse.json) val question = ChoiceQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.forceCorrectCount, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerText(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.text, a.isCorrect, 0, a.score)) (question, answers) } case 1 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.TextQuestion](questionResponse.json) val question = TextQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.isCaseSensitive, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerText(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.text, a.text.contains(importQuestion.rightAnswerText), 0, a.score)) (question, answers) } case 2 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.NumericQuestion](questionResponse.json) val question = NumericQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerRange(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.notLessThan.toDouble, a.notGreaterThan.toDouble, a.score)) (question, answers) } case 3 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.PositioningQuestion](questionResponse.json) val question = PositioningQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.forceCorrectCount, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerText(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.text, a.text.contains(importQuestion.rightAnswerText), 0, a.score)) (question, answers) } case 4 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.MatchingQuestion](questionResponse.json) val question = MatchingQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerKeyValue(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.text, a.keyText, a.score)) (question, answers) } case 5 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.EssayQuestion](questionResponse.json) val question = EssayQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.courseID.getOrElse(0).toLong) val answers = Seq() (question, answers) } case 7 => { val importQuestion = fromJson[ com.arcusys.valamis.questionbank.model.CategorizationQuestion](questionResponse.json) val question = CategorizationQuestion(Some(importQuestion.id.toLong), importQuestion.categoryID.map(_.toLong), importQuestion.title, importQuestion.text, importQuestion.explanationText, importQuestion.rightAnswerText, importQuestion.wrongAnswerText, importQuestion.courseID.getOrElse(0).toLong) val answers = importQuestion.answers.map(a => AnswerKeyValue(Some(a.id.toLong), a.questionId.map(_.toLong), importQuestion.courseID.getOrElse(0).toLong, a.text, a.answerCategoryText, a.score)) (question, answers) } } } } object SlideSetHelper { val slidesVersion = Some("2.1") def getDisplayMode(url: String) = url.reverse.takeWhile(_ != ' ').reverse def filePathPrefix(any: Product, version: Option[String] = slidesVersion, id: Long = 0L) = any match { case slideSet: SlideSetModel => version match { case Some(v) => s"slideset_logo_${slideSet.id.get}/" case _ => s"slide_logo${slideSet.id.get}/" } case slide: SlideModel => s"slide_${slide.id.get}/" case slideElement: SlideElementModel => if (slideElement.slideEntityType == SlideEntityType.Pdf) s"slideData${slideElement.id.get}/" else s"slide_item_${slideElement.id.get}/" case slideTheme: SlideThemeModel => s"slide_theme_${slideTheme.id.get}/" } def slideSetLogoPath(slide: SlideSetModel, version: Option[String] = slidesVersion) = { slide.logo.map(SlideSetHelper.logoPath(slide, _ )) } def logoPath(any: Product, logo: String, version: Option[String] = slidesVersion) = "files/" + filePathPrefix(any, version) + logo } trait SlideSetExportUtils { protected def questionService: QuestionService protected def plainTextService: PlainTextService protected def fileService: FileService protected def getRequiredQuestions(slides: List[SlideModel]):List[(Question,Seq[Answer])] = slides.flatMap { slide => slide.slideElements.filter { _.slideEntityType == com.arcusys.valamis.slide.model.SlideEntityType.Question } .filter { _.content != "" } .map { question => questionService.getWithAnswers(question.content.toLong)} } protected def getRequiredPlainTexts(slides: List[SlideModel]):List[PlainText] = slides.flatMap { slide => slide.slideElements.filter { _.slideEntityType == com.arcusys.valamis.slide.model.SlideEntityType.PlainText } .filter { _.content != "" } .map { pt => plainTextService.getById(pt.content.toLong)} } protected def getFromPath(content: String, folderPrefix: String): Option[(String, String)] = { if (content.isEmpty || content.contains("http://") || content.contains("https://")) None else { val regex = if (!content.contains("/")) "(.+)".r else if (content.contains("/learn-portlet/preview-resources/pdf/")) ".+/(.+)/(.+)$".r else if (content.contains("/documents/")) { if(content.contains("groupId")) ".+/(.+)/.+/(.+)/(.+)\\\\?groupId=(.+).*".r else ".+/(.+)/.+/(.+)/\\\\?version=(.+).*entryId=(.+).*&ext=(.+)".r } else throw new UnsupportedOperationException(s"Unknown path to image: $content") getFileTuple(content, folderPrefix, regex) } } protected def getFileTuple( content: String, folderPrefix: String, regex: Regex): Option[(String, String)] = content match { case regex(fileName) => Some((folderPrefix, fileName)) case regex(pdfFolderName, pdfFileName) => Some((pdfFolderName, pdfFileName)) case regex(courseId, fileName, uuid, groupId) => Some((uuid, fileName)) case regex(courseId, fileName, fileVersion, entryId, fileExtension) => { Some((entryId, fileName)) } case _ => throw new IllegalArgumentException("Content didn't match any of the regular expressions.") } protected def composeFile(any: Product): Option[(String, InputStream)] = { val fileName: Option[String] = any match { case slide: SlideModel => slide.bgImage case slideElement: SlideElementModel => Some(slideElement.content) } fileName .flatMap(filename => getFromPath(filename.takeWhile(_ != ' '), SlideSetHelper.filePathPrefix(any)) .map(getPathAndInputStream)) } protected def getPath(folderName: String, fileName: String, version: Option[String] = None) = { val folderPrefix = version match { case Some(v) => "resources" case _ => "images" } s"$folderPrefix/$folderName/$fileName" } protected def getPathAndInputStream(folderAndFileName: (String, String)) = { val folderName = folderAndFileName._1 val fileName = folderAndFileName._2 val fromOldVersion = Pattern.compile("slide_\\\\d+_.*").matcher(folderName).find val folderPrefix = if (fromOldVersion) "images" else "resources" s"$folderPrefix/${folderName.takeWhile(_ != '/')}/$fileName" -> new ByteArrayInputStream(fileService.getFileContent(folderName, fileName)) } protected def getRequiredFiles(slides: List[SlideModel]) = getRequiredFileModels(slides).flatten private def getRequiredFileModels(slides: List[SlideModel]): List[Option[(String, InputStream)]] = slides.flatMap { slide => val slideResource = composeFile(slide) val slideElementResources = slide .slideElements .filter(x => SlideEntityType.AvailableExternalFileTypes.contains(x.slideEntityType)) .map(composeFile) slideResource :: slideElementResources } protected def addImageToFileService(any: Product, version: Option[String], fileName: String, path: String, id: Long = 0L): String = { val folder = SlideSetHelper.filePathPrefix(any, SlideSetHelper.slidesVersion, id) fileService.setFileContent( folder = folder, name = fileName.reverse.takeWhile(_ != '/').reverse, content = FileSystemUtil.getFileContent(new File(path)), deleteFolder = false) fileName } protected def omitFileDuplicates(files: List[(String, InputStream)]): List[(String, InputStream)] = { files.groupBy(_._1).map(_._2.head).toList } }
igor-borisov/valamis
valamis-slide/src/main/scala/com/arcusys/valamis/slide/service/export/SlideSetExportUtils.scala
Scala
gpl-3.0
13,917
package korolev.blazeServer import java.net.InetAddress import java.security.KeyStore import javax.net.ssl.{KeyManagerFactory, SSLContext} import org.http4s.blaze.util.BogusKeystore import scala.concurrent.ExecutionContextExecutorService /** * @author Aleksey Fomkin <aleksey.fomkin@gmail.com> */ case class BlazeServerConfig( port: Int = 8181, host: String = InetAddress.getLoopbackAddress.getHostAddress, /** * Standard Java SSL context. * Use [[BlazeServerConfig.bogusSslContext]] for tests */ sslContext: Option[SSLContext] = None, bufferSize: Int = 8 * 1024, doNotBlockCurrentThread: Boolean = false )( // Trampoline implicit val executionContext: ExecutionContextExecutorService ) object BlazeServerConfig { val default = BlazeServerConfig() def bogusSslContext: SSLContext = { val ksStream = BogusKeystore.asInputStream() assert(ksStream != null) val ks = KeyStore.getInstance("JKS") ks.load(ksStream, BogusKeystore.getKeyStorePassword) val kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()) kmf.init(ks, BogusKeystore.getCertificatePassword) val context = SSLContext.getInstance("SSL") context.init(kmf.getKeyManagers, null, null) context } }
PhilAndrew/JumpMicro
JMSangriaGraphql/src/main/scala/korolev/blazeServer/BlazeServerConfig.scala
Scala
mit
1,258
package pl.arapso.scaffoldings.scala.custom.collections object SeqExample { def main(args: Array[String]): Unit = { sequenceExample } def sequenceExample = { val listWithInts: Seq[Int] = Seq(1, 2, 3, 4, 5, 6, 7) println(listWithInts) val prependZero = 0 +: listWithInts println(prependZero) val appendInts = prependZero :+ 8 :+ 9 println(appendInts) } }
arapso-scaffoldings/scala
scala-tutor/custom/src/main/scala/pl/arapso/scaffoldings/scala/custom/collections/SeqExample.scala
Scala
apache-2.0
399
package test abstract class Top { require(1 > 0) def bar(x: Int): Unit = () }
epfl-lara/stainless
frontends/benchmarks/extraction/valid/ClassBody.scala
Scala
apache-2.0
84
package slick.jdbc import java.util.concurrent.Executors import org.reactivestreams.Subscriber import scala.concurrent.{ExecutionContext, Future} import java.util.Properties import java.sql.{Array => _, _} import javax.sql.DataSource import javax.naming.InitialContext import slick.dbio._ import slick.backend.{DatabasePublisher, DatabaseComponent, RelationalBackend} import slick.SlickException import slick.util.{LogUtil, GlobalConfig, SlickLogger, AsyncExecutor} import slick.util.ConfigExtensionMethods._ import org.slf4j.LoggerFactory import com.typesafe.config.{ConfigFactory, Config} /** A JDBC-based database back-end which can be used for <em>Plain SQL</em> queries * and with all [[slick.driver.JdbcProfile]]-based drivers. */ trait JdbcBackend extends RelationalBackend { type This = JdbcBackend type Database = DatabaseDef type Session = SessionDef type DatabaseFactory = DatabaseFactoryDef type Context = JdbcActionContext type StreamingContext = JdbcStreamingActionContext val Database = new DatabaseFactoryDef {} val backend: JdbcBackend = this def createDatabase(config: Config, path: String): Database = Database.forConfig(path, config) class DatabaseDef(val source: JdbcDataSource, val executor: AsyncExecutor) extends super.DatabaseDef { /** The DatabaseCapabilities, accessed through a Session and created by the * first Session that needs them. Access does not need to be synchronized * because, in the worst case, capabilities will be determined multiple * times by different concurrent sessions but the result should always be * the same. */ @volatile protected[JdbcBackend] var capabilities: DatabaseCapabilities = null def createSession(): Session = new BaseSession(this) /** Like `stream(StreamingAction)` but you can disable pre-buffering of the next row by setting * `bufferNext = false`. The ResultSet will not advance to the next row until you * `request()` more data. This allows you to process LOBs asynchronously by requesting only * one single element at a time after processing the current one, so that the proper * sequencing is preserved even though processing may happen on a different thread. */ final def stream[T](a: StreamingDBIO[_, T], bufferNext: Boolean): DatabasePublisher[T] = createPublisher(a, s => new JdbcStreamingActionContext(s, false, DatabaseDef.this, bufferNext)) override protected[this] def createDatabaseActionContext[T](_useSameThread: Boolean): Context = new JdbcActionContext { val useSameThread = _useSameThread } override protected[this] def createStreamingDatabaseActionContext[T](s: Subscriber[_ >: T], useSameThread: Boolean): StreamingContext = new JdbcStreamingActionContext(s, useSameThread, DatabaseDef.this, true) protected[this] def synchronousExecutionContext = executor.executionContext /** Run some code on the [[ioExecutionContext]]. */ final def io[T](thunk: => T): Future[T] = Future(thunk)(ioExecutionContext) /** The `ExecutionContext` which is used for performing blocking database I/O, similar to how * `run` or `stream` would run it. This can be used for calling back into blocking JDBC APIs * (e.g. for materializing a LOB or mutating a result set row) from asynchronous processors of * unbuffered streams. */ final def ioExecutionContext: ExecutionContext = executor.executionContext /** Free all resources allocated by Slick for this Database object. In particular, the * [[slick.util.AsyncExecutor]] with the thread pool for asynchronous execution is shut * down. If this object represents a connection pool managed directly by Slick, it is also * closed. */ def close: Unit = try executor.close() finally source.close() } trait DatabaseFactoryDef extends super.DatabaseFactoryDef { /** Create a Database based on a [[JdbcDataSource]]. */ def forSource(source: JdbcDataSource, executor: AsyncExecutor = AsyncExecutor.default()) = new DatabaseDef(source, executor) /** Create a Database based on a DataSource. */ def forDataSource(ds: DataSource, executor: AsyncExecutor = AsyncExecutor.default()): DatabaseDef = forSource(new DataSourceJdbcDataSource(ds), executor) /** Create a Database based on the JNDI name of a DataSource. */ def forName(name: String, executor: AsyncExecutor = null) = new InitialContext().lookup(name) match { case ds: DataSource => forDataSource(ds, executor match { case null => AsyncExecutor.default(name) case e => e }) case x => throw new SlickException("Expected a DataSource for JNDI name "+name+", but got "+x) } /** Create a Database that uses the DriverManager to open new connections. */ def forURL(url:String, user:String = null, password:String = null, prop: Properties = null, driver:String = null, executor: AsyncExecutor = AsyncExecutor.default(), keepAliveConnection: Boolean = false): DatabaseDef = forSource(new DriverJdbcDataSource(url, user, password, prop, driverName = driver, keepAliveConnection = keepAliveConnection), executor) /** Create a Database that uses the DriverManager to open new connections. */ def forURL(url:String, prop: Map[String, String]): Database = { val p = new Properties if(prop ne null) for((k,v) <- prop) if(k.ne(null) && v.ne(null)) p.setProperty(k, v) forURL(url, prop = p, driver = null) } /** Create a Database that directly uses a Driver to open new connections. * This is needed to open a JDBC URL with a driver that was not loaded by the system ClassLoader. */ def forDriver(driver:Driver, url:String, user:String = null, password:String = null, prop: Properties = null, executor: AsyncExecutor = AsyncExecutor.default()): DatabaseDef = forSource(new DriverJdbcDataSource(url, user, password, prop, driver = driver), executor) /** Load a database configuration through [[https://github.com/typesafehub/config Typesafe Config]]. * * The main config key to set is `connectionPool`. It determines the connection pool * implementation to use. The default is `HikariCP` (for * [[https://github.com/brettwooldridge/HikariCP HikariCP]]). Use `disabled` to disable * connection pooling (using the DriverManager directly). A third-party connection pool * implementation can be selected by specifying the fully qualified name of an object * implementing [[JdbcDataSourceFactory]]. * * The pool is tuned for asynchronous execution by default. Apart from the connection * parameters you should only have to set `numThreads` and `queueSize` in most cases. In this * scenario there is contention over the thread pool (via its queue), not over the * connections, so you can have a rather large limit on the maximum number of connections * (based on what the database server can still handle, not what is most efficient). Slick * will use more connections than there are threads in the pool when sequencing non-database * actions inside a transaction. * * The following config keys are supported for HikariCP and direct connections: * <ul> * <li>`url` (String, required): JDBC URL</li> * <li>`driver` or `driverClassName` (String, optional): JDBC driver class to load</li> * <li>`user` (String, optional): User name</li> * <li>`password` (String, optional): Password</li> * <li>`isolation` (String, optional): Transaction isolation level for new connections. * Allowed values are: `NONE`, `READ_COMMITTED`, `READ_UNCOMMITTED`, `REPEATABLE_READ`, * `SERIALIZABLE`.</li> * <li>`catalog` (String, optional): Default catalog for new connections.</li> * <li>`readOnly` (Boolean, optional): Read Only flag for new connections.</li> * <li>`properties` (Map, optional): Properties to pass to the driver (or * to the DataSource when using HikariCP with a `dataSourceClass` * instead of a driver).</li> * <li>`numThreads` (Int, optional, default: 20): The number of concurrent threads in the * thread pool for asynchronous execution of database actions. See the * [[https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing HikariCP wiki]] * for more imformation about sizing the thread pool correctly. Note that for asynchronous * execution in Slick you should tune the thread pool size (this parameter) accordingly * instead of the maximum connection pool size.</li> * <li>`queueSize` (Int, optional, default: 1000): The size of the queue for database * actions which cannot be executed immediately when all threads are busy. Beyond this * limit new actions fail immediately. Set to 0 for no queue (direct hand-off) or to -1 * for an unlimited queue size (not recommended).</li> * </ul> * * The following additional keys are supported for HikariCP only: * <ul> * <li>`dataSourceClass` (String, optional): The name of the DataSource class provided by * the JDBC driver. This is preferred over using `driver`. Note that `url` is ignored when * this key is set (You have to use `properties` to configure the database * connection instead).</li> * <li>`maxConnections` (Int, optional, default: `numThreads` * 5): The maximum number of * connections in the pool.</li> * <li>`minConnections` (Int, optional, default: same as `numThreads`): The minimum number * of connections to keep in the pool.</li> * <li>`connectionTimeout` (Duration, optional, default: 1s): The maximum time to wait * before a call to getConnection is timed out. If this time is exceeded without a * connection becoming available, a SQLException will be thrown. 100ms is the minimum * value.</li> * <li>`idleTimeout` (Duration, optional, default: 10min): The maximum amount * of time that a connection is allowed to sit idle in the pool. A value of 0 means that * idle connections are never removed from the pool.</li> * <li>`maxLifetime` (Duration, optional, default: 30min): The maximum lifetime of a * connection in the pool. When an idle connection reaches this timeout, even if recently * used, it will be retired from the pool. A value of 0 indicates no maximum * lifetime.</li> * <li>`connectionInitSql` (String, optional): A SQL statement that will be * executed after every new connection creation before adding it to the pool. If this SQL * is not valid or throws an exception, it will be treated as a connection failure and the * standard retry logic will be followed.</li> * <li>`initializationFailFast` (Boolean, optional, default: false): Controls whether the * pool will "fail fast" if the pool cannot be seeded with initial connections * successfully. If connections cannot be created at pool startup time, a RuntimeException * will be thrown. This property has no effect if `minConnections` is 0.</li> * <li>`leakDetectionThreshold` (Duration, optional, default: 0): The amount of time that a * connection can be out of the pool before a message is logged indicating a possible * connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value * for enabling leak detection is 10s.</li> * <li>`connectionTestQuery` (String, optional): A statement that will be executed just * before a connection is obtained from the pool to validate that the connection to the * database is still alive. It is database dependent and should be a query that takes very * little processing by the database (e.g. "VALUES 1"). When not set, the JDBC4 * `Connection.isValid()` method is used instead (which is usually preferable).</li> * <li>`registerMbeans` (Boolean, optional, default: false): Whether or not JMX Management * Beans ("MBeans") are registered.</li> * </ul> * * The following additional keys are supported for direct connections only: * <ul> * <li>`keepAliveConnection` (Boolean, optional, default: false): If this is set to true, * one extra connection will be opened as soon as the database is accessed for the first * time, and kept open until `close()` is called. This is useful for named in-memory * databases in test environments.</li> * </ul> * * Unknown keys are ignored. Invalid values or missing mandatory keys will trigger a * [[SlickException]]. * * @param path The path in the configuration file for the database configuration (e.g. `foo.bar` * would find a database URL at config key `foo.bar.url`) or an empty string for * the top level of the `Config` object. * @param config The `Config` object to read from. This defaults to the global app config * (e.g. in `application.conf` at the root of the class path) if not specified. * @param driver An optional JDBC driver to call directly. If this is set to a non-null value, * the `driver` key from the configuration is ignored. The default is to use the * standard lookup mechanism. The explicit driver may not be supported by all * connection pools (in particular, the default [[HikariCPJdbcDataSource]]). */ def forConfig(path: String, config: Config = ConfigFactory.load(), driver: Driver = null): Database = { val source = JdbcDataSource.forConfig(if(path.isEmpty) config else config.getConfig(path), driver, path) val executor = AsyncExecutor(path, config.getIntOr("numThreads", 20), config.getIntOr("queueSize", 1000)) forSource(source, executor) } } trait SessionDef extends super.SessionDef { self => def database: Database def conn: Connection def metaData: DatabaseMetaData def capabilities: DatabaseCapabilities def resultSetType: ResultSetType = ResultSetType.Auto def resultSetConcurrency: ResultSetConcurrency = ResultSetConcurrency.Auto def resultSetHoldability: ResultSetHoldability = ResultSetHoldability.Auto def decorateStatement[S <: Statement](statement: S): S = statement def fetchSize: Int = 0 final def prepareStatement(sql: String, defaultType: ResultSetType = ResultSetType.ForwardOnly, defaultConcurrency: ResultSetConcurrency = ResultSetConcurrency.ReadOnly, defaultHoldability: ResultSetHoldability = ResultSetHoldability.Default): PreparedStatement = { JdbcBackend.logStatement("Preparing statement", sql) val s = loggingPreparedStatement(decorateStatement(resultSetHoldability.withDefault(defaultHoldability) match { case ResultSetHoldability.Default => val rsType = resultSetType.withDefault(defaultType).intValue val rsConc = resultSetConcurrency.withDefault(defaultConcurrency).intValue if(rsType == ResultSet.TYPE_FORWARD_ONLY && rsConc == ResultSet.CONCUR_READ_ONLY) conn.prepareStatement(sql) else conn.prepareStatement(sql, rsType, rsConc) case h => conn.prepareStatement(sql, resultSetType.withDefault(defaultType).intValue, resultSetConcurrency.withDefault(defaultConcurrency).intValue, h.intValue) })) if(fetchSize != 0) s.setFetchSize(fetchSize) s } final def prepareInsertStatement(sql: String, columnNames: Array[String] = new Array[String](0)): PreparedStatement = { if(JdbcBackend.statementLogger.isDebugEnabled) JdbcBackend.logStatement("Preparing insert statement (returning: "+columnNames.mkString(",")+")", sql) val s = loggingPreparedStatement(decorateStatement(conn.prepareStatement(sql, columnNames))) if(fetchSize != 0) s.setFetchSize(fetchSize) s } final def prepareInsertStatement(sql: String, columnIndexes: Array[Int]): PreparedStatement = { if(JdbcBackend.statementLogger.isDebugEnabled) JdbcBackend.logStatement("Preparing insert statement (returning indexes: "+columnIndexes.mkString(",")+")", sql) val s = loggingPreparedStatement(decorateStatement(conn.prepareStatement(sql, columnIndexes))) if(fetchSize != 0) s.setFetchSize(fetchSize) s } final def createStatement(defaultType: ResultSetType = ResultSetType.ForwardOnly, defaultConcurrency: ResultSetConcurrency = ResultSetConcurrency.ReadOnly, defaultHoldability: ResultSetHoldability = ResultSetHoldability.Default): Statement = { val s = loggingStatement(decorateStatement(resultSetHoldability.withDefault(defaultHoldability) match { case ResultSetHoldability.Default => conn.createStatement(resultSetType.withDefault(defaultType).intValue, resultSetConcurrency.withDefault(defaultConcurrency).intValue) case h => conn.createStatement(resultSetType.withDefault(defaultType).intValue, resultSetConcurrency.withDefault(defaultConcurrency).intValue, h.intValue) })) if(fetchSize != 0) s.setFetchSize(fetchSize) s } /** A wrapper around the JDBC Connection's prepareStatement method, that automatically closes the statement. */ final def withPreparedStatement[T](sql: String, defaultType: ResultSetType = ResultSetType.ForwardOnly, defaultConcurrency: ResultSetConcurrency = ResultSetConcurrency.ReadOnly, defaultHoldability: ResultSetHoldability = ResultSetHoldability.Default)(f: (PreparedStatement => T)): T = { val st = prepareStatement(sql, defaultType, defaultConcurrency, defaultHoldability) try f(st) finally st.close() } /** A wrapper around the JDBC Connection's prepareInsertStatement method, that automatically closes the statement. */ final def withPreparedInsertStatement[T](sql: String, columnNames: Array[String] = new Array[String](0))(f: (PreparedStatement => T)): T = { val st = prepareInsertStatement(sql, columnNames) try f(st) finally st.close() } /** A wrapper around the JDBC Connection's prepareInsertStatement method, that automatically closes the statement. */ final def withPreparedInsertStatement[T](sql: String, columnIndexes: Array[Int])(f: (PreparedStatement => T)): T = { val st = prepareInsertStatement(sql, columnIndexes) try f(st) finally st.close() } /** A wrapper around the JDBC Connection's createStatement method, that automatically closes the statement. */ final def withStatement[T](defaultType: ResultSetType = ResultSetType.ForwardOnly, defaultConcurrency: ResultSetConcurrency = ResultSetConcurrency.ReadOnly, defaultHoldability: ResultSetHoldability = ResultSetHoldability.Default)(f: (Statement => T)): T = { val st = createStatement(defaultType, defaultConcurrency, defaultHoldability) try f(st) finally st.close() } def close(): Unit /** * Call this method within a <em>withTransaction</em> call to roll back the current * transaction after <em>withTransaction</em> returns. */ def rollback(): Unit def force() { conn } /** * Run the supplied function within a transaction. If the function throws an Exception * or the session's rollback() method is called, the transaction is rolled back, * otherwise it is commited when the function returns. */ def withTransaction[T](f: => T): T /** * Create a new Slick Session wrapping the same JDBC connection, but using the given values as defaults for * resultSetType, resultSetConcurrency and resultSetHoldability. */ @deprecated("Use the new Action-based API instead", "3.0") final def forParameters(rsType: ResultSetType = resultSetType, rsConcurrency: ResultSetConcurrency = resultSetConcurrency, rsHoldability: ResultSetHoldability = resultSetHoldability): Session = internalForParameters(rsType, rsConcurrency, rsHoldability, null, 0) private[slick] final def internalForParameters(rsType: ResultSetType, rsConcurrency: ResultSetConcurrency, rsHoldability: ResultSetHoldability, statementInit: Statement => Unit, _fetchSize: Int): Session = new Session { override def resultSetType = rsType override def resultSetConcurrency = rsConcurrency override def resultSetHoldability = rsHoldability override def fetchSize = _fetchSize override def decorateStatement[S <: Statement](statement: S): S = { if(statementInit ne null) statementInit(statement) statement } def database = self.database def conn = self.conn def metaData = self.metaData def capabilities = self.capabilities def close() = self.close() def rollback() = self.rollback() def withTransaction[T](f: => T) = self.withTransaction(f) private[slick] def startInTransaction: Unit = self.startInTransaction private[slick] def endInTransaction(f: => Unit): Unit = self.endInTransaction(f) } protected def loggingStatement(st: Statement): Statement = if(JdbcBackend.statementLogger.isDebugEnabled || JdbcBackend.benchmarkLogger.isDebugEnabled) new LoggingStatement(st) else st protected def loggingPreparedStatement(st: PreparedStatement): PreparedStatement = if(JdbcBackend.statementLogger.isDebugEnabled || JdbcBackend.benchmarkLogger.isDebugEnabled) new LoggingPreparedStatement(st) else st /** Start a `transactionally` block */ private[slick] def startInTransaction: Unit /** End a `transactionally` block, running the specified function first if it is the outermost one. */ private[slick] def endInTransaction(f: => Unit): Unit } class BaseSession(val database: Database) extends SessionDef { protected var open = false protected var doRollback = false protected var inTransactionally = 0 def isOpen = open def isInTransaction = inTransactionally > 0 lazy val conn = { open = true; database.source.createConnection } lazy val metaData = conn.getMetaData() def capabilities = { val dc = database.capabilities if(dc ne null) dc else { val newDC = new DatabaseCapabilities(this) database.capabilities = newDC newDC } } def close() { if(open) conn.close() } def rollback() { if(conn.getAutoCommit) throw new SlickException("Cannot roll back session in auto-commit mode") doRollback = true } def withTransaction[T](f: => T): T = if(isInTransaction) f else { startInTransaction try { var done = false try { doRollback = false val res = f if(doRollback) conn.rollback() else conn.commit() done = true res } finally if(!done) conn.rollback() } finally endInTransaction() } private[slick] def startInTransaction: Unit = { if(!isInTransaction) conn.setAutoCommit(false) inTransactionally += 1 } private[slick] def endInTransaction(f: => Unit): Unit = { inTransactionally -= 1 if(!isInTransaction) try f finally conn.setAutoCommit(true) } def getTransactionality: (Int, Boolean) = (inTransactionally, conn.getAutoCommit) } /** * Describes capabilities of the database which can be determined from a * DatabaseMetaData object and then cached and reused for all sessions. */ class DatabaseCapabilities(session: Session) { val supportsBatchUpdates = session.metaData.supportsBatchUpdates } trait JdbcActionContext extends BasicActionContext { private[JdbcBackend] var statementParameters: List[JdbcBackend.StatementParameters] = null def pushStatementParameters(p: JdbcBackend.StatementParameters): Unit = { val p2 = if((p.rsType eq null) || (p.rsConcurrency eq null) || (p.rsHoldability eq null) || (p.statementInit eq null)) { val curr = if(statementParameters eq null) JdbcBackend.defaultStatementParameters else statementParameters.head JdbcBackend.StatementParameters( if(p.rsType eq null) curr.rsType else p.rsType, if(p.rsConcurrency eq null) curr.rsConcurrency else p.rsConcurrency, if(p.rsHoldability eq null) curr.rsHoldability else p.rsHoldability, if(p.statementInit eq null) curr.statementInit else if(curr.statementInit eq null) p.statementInit else { s => curr.statementInit(s); p.statementInit(s) }, p.fetchSize ) } else p statementParameters = p2 :: (if(statementParameters eq null) Nil else statementParameters) } def popStatementParameters: Unit = { val p = statementParameters.tail if(p.isEmpty) statementParameters = null else statementParameters = p } /* TODO: Creating a new Session here for parameter overrides is not the most efficient solution but it provides compatibility with the old Session-based API. This should be changed once the old API has been removed. */ override def session: Session = if(statementParameters eq null) super.session else { val p = statementParameters.head super.session.internalForParameters(p.rsType, p.rsConcurrency, p.rsHoldability, p.statementInit, p.fetchSize) } /** The current JDBC Connection */ def connection: Connection = session.conn } class JdbcStreamingActionContext(subscriber: Subscriber[_], useSameThread: Boolean, database: Database, val bufferNext: Boolean) extends BasicStreamingActionContext(subscriber, useSameThread, database) with JdbcActionContext } object JdbcBackend extends JdbcBackend { case class StatementParameters(rsType: ResultSetType, rsConcurrency: ResultSetConcurrency, rsHoldability: ResultSetHoldability, statementInit: Statement => Unit, fetchSize: Int) val defaultStatementParameters = StatementParameters(ResultSetType.Auto, ResultSetConcurrency.Auto, ResultSetHoldability.Auto, null, 0) protected[jdbc] lazy val statementLogger = new SlickLogger(LoggerFactory.getLogger(classOf[JdbcBackend].getName+".statement")) protected[jdbc] lazy val benchmarkLogger = new SlickLogger(LoggerFactory.getLogger(classOf[JdbcBackend].getName+".benchmark")) protected[jdbc] def logStatement(msg: String, stmt: String) = if(statementLogger.isDebugEnabled) { val s = if(GlobalConfig.sqlIndent) msg + ":\\n" + LogUtil.multilineBorder(stmt) else msg + ": " + stmt statementLogger.debug(s) } }
adamkozuch/slick
slick/src/main/scala/slick/jdbc/JdbcBackend.scala
Scala
bsd-2-clause
27,238
package uitest import com.jayway.awaitility.scala.AwaitilitySupport import com.softwaremill.bootzooka.common.Utils import org.fest.assertions.Assertions._ import org.scalatest.BeforeAndAfterEach import uitest.pages.RegistrationPage class RegisterUiSpec extends BaseUiSpec with AwaitilitySupport with BeforeAndAfterEach { val Login = Utils.randomString(5) val Email = Login + "@example.org" val Password = "test" final val EmailSubject = s"SoftwareMill Bootzooka - registration confirmation for user $Login" override protected def beforeEach(): Unit = { super.beforeEach() emailService.reset() } test("register new user and send an email") { //given val registrationPage = createPage(classOf[RegistrationPage]) //when registrationPage.register(Login, Email, Password) //then assertThat(messagesPage.getInfoText) contains "User registered successfully" assertThat(emailService.wasEmailSent(Email, EmailSubject)) } test("register - fail due to not matching passwords") { //given val registrationPage = createPage(classOf[RegistrationPage]) // when registrationPage.register(Login, Email, Password, Some(Password + "FooBarBaz")) //then assertThat(registrationPage.getPassErrorText) contains "Passwords don't match!" assertThat(emailService.wasEmailSent(Email, EmailSubject)).isFalse() } }
umitunal/bootzooka
ui-tests/src/test/scala/uitest/RegisterUiSpec.scala
Scala
apache-2.0
1,381
/* * Copyright (c) 2013 David Soergel <dev@davidsoergel.com> * Licensed under the Apache License, Version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 */ package worldmake.storage.casbah import com.mongodb.casbah.Imports._ import com.typesafe.scalalogging.slf4j.Logging import scala.collection.mutable import com.mongodb.casbah.commons.Imports import worldmake._ import edu.umass.cs.iesl.scalacommons.util.Hash import scalax.file.Path import org.joda.time.DateTime import worldmake.storage.{FilesystemManagedFileStore, StorageContext, ProvenanceStore} /** * @author <a href="mailto:dev@davidsoergel.com">David Soergel</a> */ class CasbahStorage( conn: MongoConnection, dbname: String, override val fileStore:FilesystemManagedFileStore, override val logStore:FilesystemManagedFileStore ) extends StorageContext with Logging { // just make sure the initialization stuff loads val s = SerializationHelpers //override val artifactStore: ArtifactStore = new CasbahArtifactStore(conn, dbname, "artifact") override val provenanceStore: ProvenanceStore = new CasbahProvenanceStore(conn, dbname, "provenance") } /* class CasbahArtifactStore(conn: MongoConnection, dbname: String, collname: String ) extends ArtifactStore { import CasbahArtifactStore._ val mongoColl = conn(dbname)(collname) override def put(artifact: Artifact[_]): Artifact[_] = { mongoColl.findOne(MongoDBObject("_id" -> artifact.artifactId)).map(artifactFromDb(_)).getOrElse({ val result = artifactToDb(artifact).dbo mongoColl += result artifact }) } override def get(id: String): Option[Artifact[_]] = { val r = mongoColl.findOne(MongoDBObject("_id" -> id)) r.map(artifactFromDb(_)) } def getContentHash(id: String) = get(id).map(_.contentHash) } */
davidsoergel/worldmake
src/main/scala/worldmake/storage/casbah/CasbahStorage.scala
Scala
apache-2.0
1,989
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.rest import java.io.{DataOutputStream, FileNotFoundException} import java.net.{ConnectException, HttpURLConnection, SocketException, URL} import java.nio.charset.StandardCharsets import java.util.concurrent.TimeoutException import javax.servlet.http.HttpServletResponse import scala.collection.mutable import scala.concurrent.{Await, Future} import scala.concurrent.duration._ import scala.io.Source import scala.util.control.NonFatal import com.fasterxml.jackson.core.JsonProcessingException import org.apache.spark.{SPARK_VERSION => sparkVersion, SparkConf, SparkException} import org.apache.spark.deploy.SparkApplication import org.apache.spark.internal.Logging import org.apache.spark.util.Utils /** * A client that submits applications to a [[RestSubmissionServer]]. * * In protocol version v1, the REST URL takes the form http://[host:port]/v1/submissions/[action], * where [action] can be one of create, kill, or status. Each type of request is represented in * an HTTP message sent to the following prefixes: * (1) submit - POST to /submissions/create * (2) kill - POST /submissions/kill/[submissionId] * (3) status - GET /submissions/status/[submissionId] * * In the case of (1), parameters are posted in the HTTP body in the form of JSON fields. * Otherwise, the URL fully specifies the intended action of the client. * * Since the protocol is expected to be stable across Spark versions, existing fields cannot be * added or removed, though new optional fields can be added. In the rare event that forward or * backward compatibility is broken, Spark must introduce a new protocol version (e.g. v2). * * The client and the server must communicate using the same version of the protocol. If there * is a mismatch, the server will respond with the highest protocol version it supports. A future * implementation of this client can use that information to retry using the version specified * by the server. */ private[spark] class RestSubmissionClient(master: String) extends Logging { import RestSubmissionClient._ private val masters: Array[String] = if (master.startsWith("spark://")) { Utils.parseStandaloneMasterUrls(master) } else { Array(master) } // Set of masters that lost contact with us, used to keep track of // whether there are masters still alive for us to communicate with private val lostMasters = new mutable.HashSet[String] /** * Submit an application specified by the parameters in the provided request. * * If the submission was successful, poll the status of the submission and report * it to the user. Otherwise, report the error message provided by the server. */ def createSubmission(request: CreateSubmissionRequest): SubmitRestProtocolResponse = { logInfo(s"Submitting a request to launch an application in $master.") var handled: Boolean = false var response: SubmitRestProtocolResponse = null for (m <- masters if !handled) { validateMaster(m) val url = getSubmitUrl(m) try { response = postJson(url, request.toJson) response match { case s: CreateSubmissionResponse => if (s.success) { reportSubmissionStatus(s) handleRestResponse(s) handled = true } case unexpected => handleUnexpectedRestResponse(unexpected) } } catch { case e: SubmitRestConnectionException => if (handleConnectionException(m)) { throw new SubmitRestConnectionException("Unable to connect to server", e) } } } response } /** Request that the server kill the specified submission. */ def killSubmission(submissionId: String): SubmitRestProtocolResponse = { logInfo(s"Submitting a request to kill submission $submissionId in $master.") var handled: Boolean = false var response: SubmitRestProtocolResponse = null for (m <- masters if !handled) { validateMaster(m) val url = getKillUrl(m, submissionId) try { response = post(url) response match { case k: KillSubmissionResponse => if (!Utils.responseFromBackup(k.message)) { handleRestResponse(k) handled = true } case unexpected => handleUnexpectedRestResponse(unexpected) } } catch { case e: SubmitRestConnectionException => if (handleConnectionException(m)) { throw new SubmitRestConnectionException("Unable to connect to server", e) } } } response } /** Request the status of a submission from the server. */ def requestSubmissionStatus( submissionId: String, quiet: Boolean = false): SubmitRestProtocolResponse = { logInfo(s"Submitting a request for the status of submission $submissionId in $master.") var handled: Boolean = false var response: SubmitRestProtocolResponse = null for (m <- masters if !handled) { validateMaster(m) val url = getStatusUrl(m, submissionId) try { response = get(url) response match { case s: SubmissionStatusResponse if s.success => if (!quiet) { handleRestResponse(s) } handled = true case unexpected => handleUnexpectedRestResponse(unexpected) } } catch { case e: SubmitRestConnectionException => if (handleConnectionException(m)) { throw new SubmitRestConnectionException("Unable to connect to server", e) } } } response } /** Construct a message that captures the specified parameters for submitting an application. */ def constructSubmitRequest( appResource: String, mainClass: String, appArgs: Array[String], sparkProperties: Map[String, String], environmentVariables: Map[String, String]): CreateSubmissionRequest = { val message = new CreateSubmissionRequest message.clientSparkVersion = sparkVersion message.appResource = appResource message.mainClass = mainClass message.appArgs = appArgs message.sparkProperties = sparkProperties message.environmentVariables = environmentVariables message.validate() message } /** Send a GET request to the specified URL. */ private def get(url: URL): SubmitRestProtocolResponse = { logDebug(s"Sending GET request to server at $url.") val conn = url.openConnection().asInstanceOf[HttpURLConnection] conn.setRequestMethod("GET") readResponse(conn) } /** Send a POST request to the specified URL. */ private def post(url: URL): SubmitRestProtocolResponse = { logDebug(s"Sending POST request to server at $url.") val conn = url.openConnection().asInstanceOf[HttpURLConnection] conn.setRequestMethod("POST") readResponse(conn) } /** Send a POST request with the given JSON as the body to the specified URL. */ private def postJson(url: URL, json: String): SubmitRestProtocolResponse = { logDebug(s"Sending POST request to server at $url:\\n$json") val conn = url.openConnection().asInstanceOf[HttpURLConnection] conn.setRequestMethod("POST") conn.setRequestProperty("Content-Type", "application/json") conn.setRequestProperty("charset", "utf-8") conn.setDoOutput(true) try { val out = new DataOutputStream(conn.getOutputStream) Utils.tryWithSafeFinally { out.write(json.getBytes(StandardCharsets.UTF_8)) } { out.close() } } catch { case e: ConnectException => throw new SubmitRestConnectionException("Connect Exception when connect to server", e) } readResponse(conn) } /** * Read the response from the server and return it as a validated [[SubmitRestProtocolResponse]]. * If the response represents an error, report the embedded message to the user. * Exposed for testing. */ private[rest] def readResponse(connection: HttpURLConnection): SubmitRestProtocolResponse = { import scala.concurrent.ExecutionContext.Implicits.global val responseFuture = Future { val responseCode = connection.getResponseCode if (responseCode != HttpServletResponse.SC_OK) { val errString = Some(Source.fromInputStream(connection.getErrorStream()) .getLines().mkString("\\n")) if (responseCode == HttpServletResponse.SC_INTERNAL_SERVER_ERROR && !connection.getContentType().contains("application/json")) { throw new SubmitRestProtocolException(s"Server responded with exception:\\n${errString}") } logError(s"Server responded with error:\\n${errString}") val error = new ErrorResponse if (responseCode == RestSubmissionServer.SC_UNKNOWN_PROTOCOL_VERSION) { error.highestProtocolVersion = RestSubmissionServer.PROTOCOL_VERSION } error.message = errString.get error } else { val dataStream = connection.getInputStream // If the server threw an exception while writing a response, it will not have a body if (dataStream == null) { throw new SubmitRestProtocolException("Server returned empty body") } val responseJson = Source.fromInputStream(dataStream).mkString logDebug(s"Response from the server:\\n$responseJson") val response = SubmitRestProtocolMessage.fromJson(responseJson) response.validate() response match { // If the response is an error, log the message case error: ErrorResponse => logError(s"Server responded with error:\\n${error.message}") error // Otherwise, simply return the response case response: SubmitRestProtocolResponse => response case unexpected => throw new SubmitRestProtocolException( s"Message received from server was not a response:\\n${unexpected.toJson}") } } } // scalastyle:off awaitresult try { Await.result(responseFuture, 10.seconds) } catch { // scalastyle:on awaitresult case unreachable @ (_: FileNotFoundException | _: SocketException) => throw new SubmitRestConnectionException("Unable to connect to server", unreachable) case malformed @ (_: JsonProcessingException | _: SubmitRestProtocolException) => throw new SubmitRestProtocolException("Malformed response received from server", malformed) case timeout: TimeoutException => throw new SubmitRestConnectionException("No response from server", timeout) case NonFatal(t) => throw new SparkException("Exception while waiting for response", t) } } /** Return the REST URL for creating a new submission. */ private def getSubmitUrl(master: String): URL = { val baseUrl = getBaseUrl(master) new URL(s"$baseUrl/create") } /** Return the REST URL for killing an existing submission. */ private def getKillUrl(master: String, submissionId: String): URL = { val baseUrl = getBaseUrl(master) new URL(s"$baseUrl/kill/$submissionId") } /** Return the REST URL for requesting the status of an existing submission. */ private def getStatusUrl(master: String, submissionId: String): URL = { val baseUrl = getBaseUrl(master) new URL(s"$baseUrl/status/$submissionId") } /** Return the base URL for communicating with the server, including the protocol version. */ private def getBaseUrl(master: String): String = { var masterUrl = master supportedMasterPrefixes.foreach { prefix => if (master.startsWith(prefix)) { masterUrl = master.stripPrefix(prefix) } } masterUrl = masterUrl.stripSuffix("/") s"http://$masterUrl/$PROTOCOL_VERSION/submissions" } /** Throw an exception if this is not standalone mode. */ private def validateMaster(master: String): Unit = { val valid = supportedMasterPrefixes.exists { prefix => master.startsWith(prefix) } if (!valid) { throw new IllegalArgumentException( "This REST client only supports master URLs that start with " + "one of the following: " + supportedMasterPrefixes.mkString(",")) } } /** Report the status of a newly created submission. */ private def reportSubmissionStatus( submitResponse: CreateSubmissionResponse): Unit = { if (submitResponse.success) { val submissionId = submitResponse.submissionId if (submissionId != null) { logInfo(s"Submission successfully created as $submissionId. Polling submission state...") pollSubmissionStatus(submissionId) } else { // should never happen logError("Application successfully submitted, but submission ID was not provided!") } } else { val failMessage = Option(submitResponse.message).map { ": " + _ }.getOrElse("") logError(s"Application submission failed$failMessage") } } /** * Poll the status of the specified submission and log it. * This retries up to a fixed number of times before giving up. */ private def pollSubmissionStatus(submissionId: String): Unit = { (1 to REPORT_DRIVER_STATUS_MAX_TRIES).foreach { _ => val response = requestSubmissionStatus(submissionId, quiet = true) val statusResponse = response match { case s: SubmissionStatusResponse => s case _ => return // unexpected type, let upstream caller handle it } if (statusResponse.success) { val driverState = Option(statusResponse.driverState) val workerId = Option(statusResponse.workerId) val workerHostPort = Option(statusResponse.workerHostPort) val exception = Option(statusResponse.message) // Log driver state, if present driverState match { case Some(state) => logInfo(s"State of driver $submissionId is now $state.") case _ => logError(s"State of driver $submissionId was not found!") } // Log worker node, if present (workerId, workerHostPort) match { case (Some(id), Some(hp)) => logInfo(s"Driver is running on worker $id at $hp.") case _ => } // Log exception stack trace, if present exception.foreach { e => logError(e) } return } Thread.sleep(REPORT_DRIVER_STATUS_INTERVAL) } logError(s"Error: Master did not recognize driver $submissionId.") } /** Log the response sent by the server in the REST application submission protocol. */ private def handleRestResponse(response: SubmitRestProtocolResponse): Unit = { logInfo(s"Server responded with ${response.messageType}:\\n${response.toJson}") } /** Log an appropriate error if the response sent by the server is not of the expected type. */ private def handleUnexpectedRestResponse(unexpected: SubmitRestProtocolResponse): Unit = { logError(s"Error: Server responded with message of unexpected type ${unexpected.messageType}.") } /** * When a connection exception is caught, return true if all masters are lost. * Note that the heuristic used here does not take into account that masters * can recover during the lifetime of this client. This assumption should be * harmless because this client currently does not support retrying submission * on failure yet (SPARK-6443). */ private def handleConnectionException(masterUrl: String): Boolean = { if (!lostMasters.contains(masterUrl)) { logWarning(s"Unable to connect to server ${masterUrl}.") lostMasters += masterUrl } lostMasters.size >= masters.length } } private[spark] object RestSubmissionClient { val supportedMasterPrefixes = Seq("spark://", "mesos://") // SPARK_HOME and SPARK_CONF_DIR are filtered out because they are usually wrong // on the remote machine (SPARK-12345) (SPARK-25934) private val EXCLUDED_SPARK_ENV_VARS = Set("SPARK_ENV_LOADED", "SPARK_HOME", "SPARK_CONF_DIR") private val REPORT_DRIVER_STATUS_INTERVAL = 1000 private val REPORT_DRIVER_STATUS_MAX_TRIES = 10 val PROTOCOL_VERSION = "v1" /** * Filter non-spark environment variables from any environment. */ private[rest] def filterSystemEnvironment(env: Map[String, String]): Map[String, String] = { env.filterKeys { k => (k.startsWith("SPARK_") && !EXCLUDED_SPARK_ENV_VARS.contains(k)) || k.startsWith("MESOS_") }.toMap } private[spark] def supportsRestClient(master: String): Boolean = { supportedMasterPrefixes.exists(master.startsWith) } } private[spark] class RestSubmissionClientApp extends SparkApplication { /** Submits a request to run the application and return the response. Visible for testing. */ def run( appResource: String, mainClass: String, appArgs: Array[String], conf: SparkConf, env: Map[String, String] = Map()): SubmitRestProtocolResponse = { val master = conf.getOption("spark.master").getOrElse { throw new IllegalArgumentException("'spark.master' must be set.") } val sparkProperties = conf.getAll.toMap val client = new RestSubmissionClient(master) val submitRequest = client.constructSubmitRequest( appResource, mainClass, appArgs, sparkProperties, env) client.createSubmission(submitRequest) } override def start(args: Array[String], conf: SparkConf): Unit = { if (args.length < 2) { sys.error("Usage: RestSubmissionClient [app resource] [main class] [app args*]") sys.exit(1) } val appResource = args(0) val mainClass = args(1) val appArgs = args.slice(2, args.length) val env = RestSubmissionClient.filterSystemEnvironment(sys.env) run(appResource, mainClass, appArgs, conf, env) } }
wangmiao1981/spark
core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
Scala
apache-2.0
18,560
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.python import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import org.apache.spark.api.python.PythonEvalType import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Project} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SparkPlan} /** * Extracts all the Python UDFs in logical aggregate, which depends on aggregate expression or * grouping key, evaluate them after aggregate. */ object ExtractPythonUDFFromAggregate extends Rule[LogicalPlan] { /** * Returns whether the expression could only be evaluated within aggregate. */ private def belongAggregate(e: Expression, agg: Aggregate): Boolean = { e.isInstanceOf[AggregateExpression] || agg.groupingExpressions.exists(_.semanticEquals(e)) } private def hasPythonUdfOverAggregate(expr: Expression, agg: Aggregate): Boolean = { expr.find { e => e.isInstanceOf[PythonUDF] && e.find(belongAggregate(_, agg)).isDefined }.isDefined } private def extract(agg: Aggregate): LogicalPlan = { val projList = new ArrayBuffer[NamedExpression]() val aggExpr = new ArrayBuffer[NamedExpression]() agg.aggregateExpressions.foreach { expr => if (hasPythonUdfOverAggregate(expr, agg)) { // Python UDF can only be evaluated after aggregate val newE = expr transformDown { case e: Expression if belongAggregate(e, agg) => val alias = e match { case a: NamedExpression => a case o => Alias(e, "agg")() } aggExpr += alias alias.toAttribute } projList += newE.asInstanceOf[NamedExpression] } else { aggExpr += expr projList += expr.toAttribute } } // There is no Python UDF over aggregate expression Project(projList, agg.copy(aggregateExpressions = aggExpr)) } def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case agg: Aggregate if agg.aggregateExpressions.exists(hasPythonUdfOverAggregate(_, agg)) => extract(agg) } } /** * Extracts PythonUDFs from operators, rewriting the query plan so that the UDF can be evaluated * alone in a batch. * * Only extracts the PythonUDFs that could be evaluated in Python (the single child is PythonUDFs * or all the children could be evaluated in JVM). * * This has the limitation that the input to the Python UDF is not allowed include attributes from * multiple child operators. */ object ExtractPythonUDFs extends Rule[SparkPlan] with PredicateHelper { private def hasPythonUDF(e: Expression): Boolean = { e.find(_.isInstanceOf[PythonUDF]).isDefined } private def canEvaluateInPython(e: PythonUDF): Boolean = { e.children match { // single PythonUDF child could be chained and evaluated in Python case Seq(u: PythonUDF) => canEvaluateInPython(u) // Python UDF can't be evaluated directly in JVM case children => !children.exists(hasPythonUDF) } } private def collectEvaluatableUDF(expr: Expression): Seq[PythonUDF] = expr match { case udf: PythonUDF if canEvaluateInPython(udf) => Seq(udf) case e => e.children.flatMap(collectEvaluatableUDF) } def apply(plan: SparkPlan): SparkPlan = plan transformUp { // FlatMapGroupsInPandas can be evaluated directly in python worker // Therefore we don't need to extract the UDFs case plan: FlatMapGroupsInPandasExec => plan case plan: SparkPlan => extract(plan) } /** * Extract all the PythonUDFs from the current operator and evaluate them before the operator. */ private def extract(plan: SparkPlan): SparkPlan = { val udfs = plan.expressions.flatMap(collectEvaluatableUDF) // ignore the PythonUDF that come from second/third aggregate, which is not used .filter(udf => udf.references.subsetOf(plan.inputSet)) if (udfs.isEmpty) { // If there aren't any, we are done. plan } else { val inputsForPlan = plan.references ++ plan.outputSet val prunedChildren = plan.children.map { child => val allNeededOutput = inputsForPlan.intersect(child.outputSet).toSeq if (allNeededOutput.length != child.output.length) { ProjectExec(allNeededOutput, child) } else { child } } val planWithNewChildren = plan.withNewChildren(prunedChildren) val attributeMap = mutable.HashMap[PythonUDF, Expression]() val splitFilter = trySplitFilter(planWithNewChildren) // Rewrite the child that has the input required for the UDF val newChildren = splitFilter.children.map { child => // Pick the UDF we are going to evaluate val validUdfs = udfs.filter { udf => // Check to make sure that the UDF can be evaluated with only the input of this child. udf.references.subsetOf(child.outputSet) } if (validUdfs.nonEmpty) { require(validUdfs.forall(udf => udf.evalType == PythonEvalType.SQL_BATCHED_UDF || udf.evalType == PythonEvalType.SQL_PANDAS_SCALAR_UDF ), "Can only extract scalar vectorized udf or sql batch udf") val resultAttrs = udfs.zipWithIndex.map { case (u, i) => AttributeReference(s"pythonUDF$i", u.dataType)() } val evaluation = validUdfs.partition( _.evalType == PythonEvalType.SQL_PANDAS_SCALAR_UDF ) match { case (vectorizedUdfs, plainUdfs) if plainUdfs.isEmpty => ArrowEvalPythonExec(vectorizedUdfs, child.output ++ resultAttrs, child) case (vectorizedUdfs, plainUdfs) if vectorizedUdfs.isEmpty => BatchEvalPythonExec(plainUdfs, child.output ++ resultAttrs, child) case _ => throw new IllegalArgumentException("Can not mix vectorized and non-vectorized UDFs") } attributeMap ++= validUdfs.zip(resultAttrs) evaluation } else { child } } // Other cases are disallowed as they are ambiguous or would require a cartesian // product. udfs.filterNot(attributeMap.contains).foreach { udf => sys.error(s"Invalid PythonUDF $udf, requires attributes from more than one child.") } val rewritten = splitFilter.withNewChildren(newChildren).transformExpressions { case p: PythonUDF if attributeMap.contains(p) => attributeMap(p) } // extract remaining python UDFs recursively val newPlan = extract(rewritten) if (newPlan.output != plan.output) { // Trim away the new UDF value if it was only used for filtering or something. ProjectExec(plan.output, newPlan) } else { newPlan } } } // Split the original FilterExec to two FilterExecs. Only push down the first few predicates // that are all deterministic. private def trySplitFilter(plan: SparkPlan): SparkPlan = { plan match { case filter: FilterExec => val (candidates, containingNonDeterministic) = splitConjunctivePredicates(filter.condition).span(_.deterministic) val (pushDown, rest) = candidates.partition(!hasPythonUDF(_)) if (pushDown.nonEmpty) { val newChild = FilterExec(pushDown.reduceLeft(And), filter.child) FilterExec((rest ++ containingNonDeterministic).reduceLeft(And), newChild) } else { filter } case o => o } } }
ron8hu/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala
Scala
apache-2.0
8,475
package org.qirx.littlespec.sbt import scala.concurrent.duration.Duration import scala.concurrent.duration.FiniteDuration import org.qirx.littlespec.fragments.Code import org.qirx.littlespec.fragments.CompoundResult import org.qirx.littlespec.fragments.Failure import org.qirx.littlespec.fragments.Pending import org.qirx.littlespec.fragments.Result import org.qirx.littlespec.fragments.Success import org.qirx.littlespec.fragments.Text import org.qirx.littlespec.fragments.Title import org.qirx.littlespec.fragments.UnexpectedFailure import sbt.testing.Event import sbt.testing.EventHandler import sbt.testing.Logger import sbt.testing.OptionalThrowable import sbt.testing.Status import sbt.testing.TaskDef trait SbtReporter { def report(taskDef: TaskDef, eventHandler: EventHandler, loggers: Seq[Logger], results: Seq[Result]): Unit } class DefaultSbtReporter(args:Array[String]) extends SbtReporter { def report(taskDef: TaskDef, eventHandler: EventHandler, loggers: Seq[Logger], results: Seq[Result]): Unit = { val event = eventFor(taskDef, eventHandler) _ val logLevel = logStringFor(loggers) _ def report(results: Seq[Result], level: Int): Unit = { val log = logLevel(level, true) val logError = logLevel(level, false) results.foreach { case CompoundResult(Title(title), results) => val indicator = if (level == 0) noIndicator else compoundIndicator log(_.info, title, indicator) report(results, level + 1) case s @ Success(Title(title)) => event(Status.Success, s.duration) log(_.info, title, successIndicator) case UnexpectedFailure(Title(title), throwable) => val logExceptionLine = logLevel(level + 2, false)(_.error, _: String, noIndicator) event(Status.Error, Duration.Zero) logError(_.error, title, failureIndicator) logException(throwable, logExceptionLine) logFor(loggers)(_.trace, throwable) case Failure(Text(title), message, failure) => val location = getLocationOf(failure) event(Status.Failure, Duration.Zero) logError(_.error, title + location, failureIndicator) logLevel(level + 2, false)(_.error, message, noIndicator) case Failure(Code(example), message, failure) => val location = getLocationOf(failure) event(Status.Failure, Duration.Zero) logError(_.error, "Example failed" + location, failureIndicator) logLevel(level + 1, false)(_.error, example, noIndicator) logLevel(level + 2, false)(_.error, message, noIndicator) case Pending(Title(title), message) => event(Status.Pending, Duration.Zero) val coloredMessage = warningColor + message + resetColor log(_.warn, title + " - " + coloredMessage, pendingIndicator) } } report(results, 0) if (results.nonEmpty) logEmptyLine(loggers) } private val errorColor = "\u001b[31m" private val successColor = "\u001b[32m" private val warningColor = "\u001b[33m" private val resetColor = "\u001b[0m" private val noIndicator = None private val successIndicator = Some(successColor + "+" + resetColor) private val pendingIndicator = Some(warningColor + "o" + resetColor) private val failureIndicator = Some(errorColor + "X" + resetColor) private val compoundIndicator = Some("-") private def logEmptyLine(loggers: Seq[Logger]) = logStringFor(loggers)(level = 0, extraSpace = false)(_.info, "", noIndicator) private def eventFor(taskDef: TaskDef, eventHandler: EventHandler)(actualStatus: Status, actualDuration: FiniteDuration) = eventHandler.handle( new Event { val duration = actualDuration.toMillis val fingerprint = taskDef.fingerprint val fullyQualifiedName = taskDef.fullyQualifiedName val selector = taskDef.selectors.head val status: Status = actualStatus val throwable = new OptionalThrowable }) private def logStringFor(loggers: Seq[Logger])(level: Int, extraSpace: Boolean)(method: Logger => String => Unit, message: String, indicator: Option[String]) = { val (indicatorWithSeparator, indicatorIndentation) = indicator.map(_ + " " -> " ").getOrElse("" -> "") val levelIndentation = " " * level val compensation = if (extraSpace) " " else "" val levelMessage = message .split("(\r\n|\r|\n)") .mkString( start = levelIndentation + compensation + indicatorWithSeparator, sep = "\n" + levelIndentation + compensation + indicatorIndentation, end = "") logFor(loggers, stringColorRemover)(method, levelMessage) } private def logFor[T](loggers: Seq[Logger], colorRemover: T => T = identity[T] _)( method: Logger => T => Unit, message: T) = loggers.foreach { logger => val cleanMessage = if (logger.ansiCodesSupported) message else colorRemover(message) method(logger)(cleanMessage) } private def stringColorRemover(message: String) = { val colorPattern = raw"\u001b\[\d{1,2}m" message.replaceAll(colorPattern, "") } def logException(throwable: Throwable, log: String => Unit):Unit = { log(throwable.getClass.getSimpleName + ": " + throwable.getMessage) filteredStackTrace(throwable) .map { s => s"- ${s.getFileName}:${s.getLineNumber} (${classNameOf(s)})" } .distinct .foreach(log) Option(throwable.getCause).foreach { x => log("== Caused by ==") logException(x, log) } } private def filteredStackTrace(throwable: Throwable) = throwable.getStackTrace.filter { s => val className = classNameOf(s) isLittleSpecTest(className) || !(className matches pattern) } private def getLocationOf(throwable: Throwable) = filteredStackTrace(throwable).headOption.map { s => " (" + s.getFileName + ":" + s.getLineNumber + ")" }.getOrElse("") private def isLittleSpecTest(className: String) = className.startsWith("org.qirx.littlespec.") && className.endsWith("Spec") private val ignoredPackages = Seq("org.qirx.littlespec.", "scala.", "java.", "sbt.") private val pattern = ignoredPackages .mkString("^(", "|", ")[^$]*") .replaceAll("\\.", "\\\\.") private def classNameOf(s: StackTraceElement) = s.getClassName.split("\\$").head }
EECOLOR/little-spec
sbt/src/main/scala/org/qirx/littlespec/sbt/SbtReporter.scala
Scala
mit
6,391
package org.jetbrains.plugins.scala.lang.macros.expansion import java.awt.event.MouseEvent import java.util import com.intellij.codeHighlighting.Pass import com.intellij.codeInsight.daemon._ import com.intellij.icons.AllIcons import com.intellij.navigation.GotoRelatedItem import com.intellij.notification.NotificationGroup import com.intellij.openapi.compiler.{CompileContext, CompileStatusNotification, CompilerManager} import com.intellij.openapi.diagnostic.Logger import com.intellij.openapi.editor.markup.GutterIconRenderer import com.intellij.openapi.util.{Key, TextRange} import com.intellij.openapi.wm.ToolWindowId import com.intellij.psi.codeStyle.CodeStyleManager import com.intellij.psi.{PsiElement, PsiElementVisitor, PsiManager, PsiWhiteSpace} import com.intellij.util.Function import javax.swing.Icon import org.jetbrains.plugins.scala.ScalaBundle import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.psi.api.base.ScAnnotation import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory import org.jetbrains.plugins.scala.settings.ScalaProjectSettings import org.jetbrains.plugins.scala.settings.ScalaProjectSettings.ScalaMetaMode import org.jetbrains.plugins.scala.util.ScalaNotificationGroups.toolWindowGroup import scala.collection.mutable.ArrayBuffer abstract class MacroExpansionLineMarkerProvider extends RelatedItemLineMarkerProvider { type Marker = RelatedItemLineMarkerInfo[_] type Markers = util.Collection[_ >: Marker] case class UndoExpansionData(original: String, companion: Option[String] = None) protected val EXPANDED_KEY = new Key[UndoExpansionData]("MACRO_EXPANDED_KEY") protected lazy val messageGroup: NotificationGroup = toolWindowGroup override def collectNavigationMarkers(element: PsiElement, result: Markers): Unit = { if (ScalaProjectSettings.getInstance(element.getProject).getScalaMetaMode == ScalaMetaMode.Disabled) return if (element.getNode == null || element.getNode.getElementType != ScalaTokenTypes.tIDENTIFIER ) return getExpandMarker(element).foreach(result.add) getUndoMarker(element).foreach(result.add) } protected def getExpandMarker(element: PsiElement): Option[Marker] protected def getUndoMarker(element: PsiElement): Option[Marker] protected def createNotCompiledLineMarker(element: PsiElement, annot: ScAnnotation): Marker = { import org.jetbrains.plugins.scala.project._ val eltPointer = element.createSmartPointer val module = annot.constructorInvocation.reference.get.resolve().module if (module.isEmpty) Logger.getInstance(getClass).error(s"No bound module for annotation ${annot.getText}") createMarker(element, AllIcons.General.ContextHelp, ScalaBundle.message("scala.meta.recompile")) { elt => CompilerManager.getInstance(elt.getProject).make(module.get, new CompileStatusNotification { override def finished(aborted: Boolean, errors: Int, warnings: Int, compileContext: CompileContext): Unit = { if (!compileContext.getProject.isDisposed) DaemonCodeAnalyzer.getInstance(eltPointer.getElement.getProject).restart(eltPointer.getElement.getContainingFile) } } ) } } protected def createMarker[T <: PsiElement, R](elem: T, icon: Icon, caption: String)(fun: T => R): Marker = { new RelatedItemLineMarkerInfo[PsiElement](elem, new TextRange(elem.getTextRange.getStartOffset, elem.getTextRange.getStartOffset), icon, new Function[PsiElement, String] { override def fun(param: PsiElement): String = caption }, new GutterIconNavigationHandler[PsiElement] { override def navigate(mouseEvent: MouseEvent, elt: PsiElement): Unit = fun(elt.asInstanceOf[T]) }, GutterIconRenderer.Alignment.RIGHT, () => util.Arrays.asList[GotoRelatedItem]() ) } protected def createExpandMarker[T <: PsiElement, R](elem: T)(fun: T => R): Marker = { createMarker(elem, AllIcons.Actions.Expandall, ScalaBundle.message("scala.meta.expand"))(fun) } protected def createUndoMarker[T](element: PsiElement): Marker = { val parent = element.getParent def undoExpansion(original: String, companion: Option[String] = None): Unit = { val newPsi = ScalaPsiElementFactory .createBlockExpressionWithoutBracesFromText(original.trim)(PsiManager.getInstance(element.getProject)) (parent, companion) match { case (td: ScTypeDefinition, Some(companionText)) => val definition = ScalaPsiElementFactory.createTypeDefinitionWithContext(companionText.trim, parent.getContext, null) td.baseCompanion.foreach(_.replace(definition)) case (td: ScTypeDefinition, None) => td.baseCompanion.foreach(c => c.getParent.getNode.removeChild(c.getNode)) case _ => None } parent.replace(newPsi) } val UndoExpansionData(original, savedCompanion) = parent.getCopyableUserData(EXPANDED_KEY) createMarker(element, AllIcons.Actions.Undo, ScalaBundle.message("undo.macro.expansion")) { _ => inWriteCommandAction(undoExpansion(original, savedCompanion))(element.getProject) } } protected def reformatCode(psi: PsiElement): PsiElement = { val res = CodeStyleManager.getInstance(psi.getProject).reformat(psi) val tobeDeleted = new ArrayBuffer[PsiElement] val v = new PsiElementVisitor { override def visitElement(element: PsiElement): Unit = { if (element.getNode.getElementType == ScalaTokenTypes.tSEMICOLON) { val file = element.getContainingFile val nextLeaf = file.findElementAt(element.getTextRange.getEndOffset) if (nextLeaf.isInstanceOf[PsiWhiteSpace] && nextLeaf.getText.contains("\\n")) { tobeDeleted += element } } element.acceptChildren(this) } } v.visitElement(res) tobeDeleted.foreach(_.delete()) res } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/macros/expansion/MacroExpansionLineMarkerProvider.scala
Scala
apache-2.0
6,072
package com.twitter.finagle.example.memcachedproxy import com.twitter.finagle.memcached.protocol.text.Memcached import com.twitter.finagle.memcached.protocol.{Command, Response} import com.twitter.finagle.Service import com.twitter.finagle.builder.{Server, ClientBuilder, ServerBuilder} import java.net.{ConnectException, Socket, InetSocketAddress} /** * Run a server on port 8080 that delegates all Memcached requests to a server * running on port 11211. This Proxy is protocol aware (it decodes the Memcached * protocol from the client and from the backend server); since the Proxy knows * message boundaries, it can easily multiplex requests (e.g., for cache * replication) or load-balance across replicas. */ object MemcachedProxy { def main(args: Array[String]) { assertMemcachedRunning() val client: Service[Command, Response] = ClientBuilder() .codec(Memcached()) .hosts(new InetSocketAddress(11211)) .hostConnectionLimit(1) .build() val proxyService = new Service[Command, Response] { def apply(request: Command) = client(request) } val server: Server = ServerBuilder() .codec(Memcached()) .bindTo(new InetSocketAddress(8080)) .name("memcachedproxy") .build(proxyService) } private[this] def assertMemcachedRunning() { try { new Socket("localhost", 11211) } catch { case e: ConnectException => println("Error: memcached must be running on port 11211") System.exit(1) } } }
lysu/finagle
finagle-example/src/main/scala/com/twitter/finagle/example/memcachedproxy/MemcachedProxy.scala
Scala
apache-2.0
1,516
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.{Map => JMap} import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.collection.mutable.LinkedHashSet import org.apache.avro.{Schema, SchemaNormalization} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.History._ import org.apache.spark.serializer.KryoSerializer import org.apache.spark.util.Utils /** * Configuration for a Spark application. Used to set various Spark parameters as key-value pairs. * * Most of the time, you would create a SparkConf object with `new SparkConf()`, which will load * values from any `spark.*` Java system properties set in your application as well. In this case, * parameters you set directly on the `SparkConf` object take priority over system properties. * * For unit tests, you can also call `new SparkConf(false)` to skip loading external settings and * get the same configuration no matter what the system properties are. * * All setter methods in this class support chaining. For example, you can write * `new SparkConf().setMaster("local").setAppName("My app")`. * * @param loadDefaults whether to also load values from Java system properties * * @note Once a SparkConf object is passed to Spark, it is cloned and can no longer be modified * by the user. Spark does not support modifying the configuration at runtime. */ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Serializable { import SparkConf._ /** Create a SparkConf that loads defaults from system properties and the classpath */ def this() = this(true) private val settings = new ConcurrentHashMap[String, String]() @transient private lazy val reader: ConfigReader = { val _reader = new ConfigReader(new SparkConfigProvider(settings)) _reader.bindEnv(new ConfigProvider { override def get(key: String): Option[String] = Option(getenv(key)) }) _reader } if (loadDefaults) { loadFromSystemProperties(false) } private[spark] def loadFromSystemProperties(silent: Boolean): SparkConf = { // Load any spark.* system properties for ((key, value) <- Utils.getSystemProperties if key.startsWith("spark.")) { set(key, value, silent) } this } /** Set a configuration variable. */ def set(key: String, value: String): SparkConf = { set(key, value, false) } private[spark] def set(key: String, value: String, silent: Boolean): SparkConf = { if (key == null) { throw new NullPointerException("null key") } if (value == null) { throw new NullPointerException("null value for " + key) } if (!silent) { logDeprecationWarning(key) } settings.put(key, value) this } private[spark] def set[T](entry: ConfigEntry[T], value: T): SparkConf = { set(entry.key, entry.stringConverter(value)) this } private[spark] def set[T](entry: OptionalConfigEntry[T], value: T): SparkConf = { set(entry.key, entry.rawStringConverter(value)) this } /** * The master URL to connect to, such as "local" to run locally with one thread, "local[4]" to * run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster. */ def setMaster(master: String): SparkConf = { set("spark.master", master) } /** Set a name for your application. Shown in the Spark web UI. */ def setAppName(name: String): SparkConf = { set("spark.app.name", name) } /** Set JAR files to distribute to the cluster. */ def setJars(jars: Seq[String]): SparkConf = { for (jar <- jars if (jar == null)) logWarning("null jar passed to SparkContext constructor") set("spark.jars", jars.filter(_ != null).mkString(",")) } /** Set JAR files to distribute to the cluster. (Java-friendly version.) */ def setJars(jars: Array[String]): SparkConf = { setJars(jars.toSeq) } /** * Set an environment variable to be used when launching executors for this application. * These variables are stored as properties of the form spark.executorEnv.VAR_NAME * (for example spark.executorEnv.PATH) but this method makes them easier to set. */ def setExecutorEnv(variable: String, value: String): SparkConf = { set("spark.executorEnv." + variable, value) } /** * Set multiple environment variables to be used when launching executors. * These variables are stored as properties of the form spark.executorEnv.VAR_NAME * (for example spark.executorEnv.PATH) but this method makes them easier to set. */ def setExecutorEnv(variables: Seq[(String, String)]): SparkConf = { for ((k, v) <- variables) { setExecutorEnv(k, v) } this } /** * Set multiple environment variables to be used when launching executors. * (Java-friendly version.) */ def setExecutorEnv(variables: Array[(String, String)]): SparkConf = { setExecutorEnv(variables.toSeq) } /** * Set the location where Spark is installed on worker nodes. */ def setSparkHome(home: String): SparkConf = { set("spark.home", home) } /** Set multiple parameters together */ def setAll(settings: Traversable[(String, String)]): SparkConf = { settings.foreach { case (k, v) => set(k, v) } this } /** Set a parameter if it isn't already configured */ def setIfMissing(key: String, value: String): SparkConf = { if (settings.putIfAbsent(key, value) == null) { logDeprecationWarning(key) } this } private[spark] def setIfMissing[T](entry: ConfigEntry[T], value: T): SparkConf = { if (settings.putIfAbsent(entry.key, entry.stringConverter(value)) == null) { logDeprecationWarning(entry.key) } this } private[spark] def setIfMissing[T](entry: OptionalConfigEntry[T], value: T): SparkConf = { if (settings.putIfAbsent(entry.key, entry.rawStringConverter(value)) == null) { logDeprecationWarning(entry.key) } this } /** * Use Kryo serialization and register the given set of classes with Kryo. * If called multiple times, this will append the classes from all calls together. */ def registerKryoClasses(classes: Array[Class[_]]): SparkConf = { val allClassNames = new LinkedHashSet[String]() allClassNames ++= get("spark.kryo.classesToRegister", "").split(',').map(_.trim) .filter(!_.isEmpty) allClassNames ++= classes.map(_.getName) set("spark.kryo.classesToRegister", allClassNames.mkString(",")) set("spark.serializer", classOf[KryoSerializer].getName) this } private final val avroNamespace = "avro.schema." /** * Use Kryo serialization and register the given set of Avro schemas so that the generic * record serializer can decrease network IO */ def registerAvroSchemas(schemas: Schema*): SparkConf = { for (schema <- schemas) { set(avroNamespace + SchemaNormalization.parsingFingerprint64(schema), schema.toString) } this } /** Gets all the avro schemas in the configuration used in the generic Avro record serializer */ def getAvroSchema: Map[Long, String] = { getAll.filter { case (k, v) => k.startsWith(avroNamespace) } .map { case (k, v) => (k.substring(avroNamespace.length).toLong, v) } .toMap } /** Remove a parameter from the configuration */ def remove(key: String): SparkConf = { settings.remove(key) this } private[spark] def remove(entry: ConfigEntry[_]): SparkConf = { remove(entry.key) } /** Get a parameter; throws a NoSuchElementException if it's not set */ def get(key: String): String = { getOption(key).getOrElse(throw new NoSuchElementException(key)) } /** Get a parameter, falling back to a default if not set */ def get(key: String, defaultValue: String): String = { getOption(key).getOrElse(defaultValue) } /** * Retrieves the value of a pre-defined configuration entry. * * - This is an internal Spark API. * - The return type if defined by the configuration entry. * - This will throw an exception is the config is not optional and the value is not set. */ private[spark] def get[T](entry: ConfigEntry[T]): T = { entry.readFrom(reader) } /** * Get a time parameter as seconds; throws a NoSuchElementException if it's not set. If no * suffix is provided then seconds are assumed. * @throws java.util.NoSuchElementException If the time parameter is not set * @throws NumberFormatException If the value cannot be interpreted as seconds */ def getTimeAsSeconds(key: String): Long = catchIllegalValue(key) { Utils.timeStringAsSeconds(get(key)) } /** * Get a time parameter as seconds, falling back to a default if not set. If no * suffix is provided then seconds are assumed. * @throws NumberFormatException If the value cannot be interpreted as seconds */ def getTimeAsSeconds(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.timeStringAsSeconds(get(key, defaultValue)) } /** * Get a time parameter as milliseconds; throws a NoSuchElementException if it's not set. If no * suffix is provided then milliseconds are assumed. * @throws java.util.NoSuchElementException If the time parameter is not set * @throws NumberFormatException If the value cannot be interpreted as milliseconds */ def getTimeAsMs(key: String): Long = catchIllegalValue(key) { Utils.timeStringAsMs(get(key)) } /** * Get a time parameter as milliseconds, falling back to a default if not set. If no * suffix is provided then milliseconds are assumed. * @throws NumberFormatException If the value cannot be interpreted as milliseconds */ def getTimeAsMs(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.timeStringAsMs(get(key, defaultValue)) } /** * Get a size parameter as bytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then bytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key)) } /** * Get a size parameter as bytes, falling back to a default if not set. If no * suffix is provided then bytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key, defaultValue)) } /** * Get a size parameter as bytes, falling back to a default if not set. * @throws NumberFormatException If the value cannot be interpreted as bytes */ def getSizeAsBytes(key: String, defaultValue: Long): Long = catchIllegalValue(key) { Utils.byteStringAsBytes(get(key, defaultValue + "B")) } /** * Get a size parameter as Kibibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Kibibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Kibibytes */ def getSizeAsKb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsKb(get(key)) } /** * Get a size parameter as Kibibytes, falling back to a default if not set. If no * suffix is provided then Kibibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Kibibytes */ def getSizeAsKb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsKb(get(key, defaultValue)) } /** * Get a size parameter as Mebibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Mebibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Mebibytes */ def getSizeAsMb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsMb(get(key)) } /** * Get a size parameter as Mebibytes, falling back to a default if not set. If no * suffix is provided then Mebibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Mebibytes */ def getSizeAsMb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsMb(get(key, defaultValue)) } /** * Get a size parameter as Gibibytes; throws a NoSuchElementException if it's not set. If no * suffix is provided then Gibibytes are assumed. * @throws java.util.NoSuchElementException If the size parameter is not set * @throws NumberFormatException If the value cannot be interpreted as Gibibytes */ def getSizeAsGb(key: String): Long = catchIllegalValue(key) { Utils.byteStringAsGb(get(key)) } /** * Get a size parameter as Gibibytes, falling back to a default if not set. If no * suffix is provided then Gibibytes are assumed. * @throws NumberFormatException If the value cannot be interpreted as Gibibytes */ def getSizeAsGb(key: String, defaultValue: String): Long = catchIllegalValue(key) { Utils.byteStringAsGb(get(key, defaultValue)) } /** Get a parameter as an Option */ def getOption(key: String): Option[String] = { Option(settings.get(key)).orElse(getDeprecatedConfig(key, settings)) } /** Get an optional value, applying variable substitution. */ private[spark] def getWithSubstitution(key: String): Option[String] = { getOption(key).map(reader.substitute(_)) } /** Get all parameters as a list of pairs */ def getAll: Array[(String, String)] = { settings.entrySet().asScala.map(x => (x.getKey, x.getValue)).toArray } /** * Get all parameters that start with `prefix` */ def getAllWithPrefix(prefix: String): Array[(String, String)] = { getAll.filter { case (k, v) => k.startsWith(prefix) } .map { case (k, v) => (k.substring(prefix.length), v) } } /** * Get a parameter as an integer, falling back to a default if not set * @throws NumberFormatException If the value cannot be interpreted as an integer */ def getInt(key: String, defaultValue: Int): Int = catchIllegalValue(key) { getOption(key).map(_.toInt).getOrElse(defaultValue) } /** * Get a parameter as a long, falling back to a default if not set * @throws NumberFormatException If the value cannot be interpreted as a long */ def getLong(key: String, defaultValue: Long): Long = catchIllegalValue(key) { getOption(key).map(_.toLong).getOrElse(defaultValue) } /** * Get a parameter as a double, falling back to a default if not ste * @throws NumberFormatException If the value cannot be interpreted as a double */ def getDouble(key: String, defaultValue: Double): Double = catchIllegalValue(key) { getOption(key).map(_.toDouble).getOrElse(defaultValue) } /** * Get a parameter as a boolean, falling back to a default if not set * @throws IllegalArgumentException If the value cannot be interpreted as a boolean */ def getBoolean(key: String, defaultValue: Boolean): Boolean = catchIllegalValue(key) { getOption(key).map(_.toBoolean).getOrElse(defaultValue) } /** Get all executor environment variables set on this SparkConf */ def getExecutorEnv: Seq[(String, String)] = { getAllWithPrefix("spark.executorEnv.") } /** * Returns the Spark application id, valid in the Driver after TaskScheduler registration and * from the start in the Executor. */ def getAppId: String = get("spark.app.id") /** Does the configuration contain a given parameter? */ def contains(key: String): Boolean = { settings.containsKey(key) || configsWithAlternatives.get(key).toSeq.flatten.exists { alt => contains(alt.key) } } private[spark] def contains(entry: ConfigEntry[_]): Boolean = contains(entry.key) /** Copy this object */ override def clone: SparkConf = { val cloned = new SparkConf(false) settings.entrySet().asScala.foreach { e => cloned.set(e.getKey(), e.getValue(), true) } cloned } /** * By using this instead of System.getenv(), environment variables can be mocked * in unit tests. */ private[spark] def getenv(name: String): String = System.getenv(name) /** * Wrapper method for get() methods which require some specific value format. This catches * any [[NumberFormatException]] or [[IllegalArgumentException]] and re-raises it with the * incorrectly configured key in the exception message. */ private def catchIllegalValue[T](key: String)(getValue: => T): T = { try { getValue } catch { case e: NumberFormatException => // NumberFormatException doesn't have a constructor that takes a cause for some reason. throw new NumberFormatException(s"Illegal value for config key $key: ${e.getMessage}") .initCause(e) case e: IllegalArgumentException => throw new IllegalArgumentException(s"Illegal value for config key $key: ${e.getMessage}", e) } } /** * Checks for illegal or deprecated config settings. Throws an exception for the former. Not * idempotent - may mutate this conf object to convert deprecated settings to supported ones. */ private[spark] def validateSettings() { if (contains("spark.local.dir")) { val msg = "Note that spark.local.dir will be overridden by the value set by " + "the cluster manager (via SPARK_LOCAL_DIRS in mesos/standalone/kubernetes and LOCAL_DIRS" + " in YARN)." logWarning(msg) } val executorOptsKey = "spark.executor.extraJavaOptions" val executorClasspathKey = "spark.executor.extraClassPath" val driverOptsKey = "spark.driver.extraJavaOptions" val driverClassPathKey = "spark.driver.extraClassPath" val driverLibraryPathKey = "spark.driver.extraLibraryPath" val sparkExecutorInstances = "spark.executor.instances" // Used by Yarn in 1.1 and before sys.props.get("spark.driver.libraryPath").foreach { value => val warning = s""" |spark.driver.libraryPath was detected (set to '$value'). |This is deprecated in Spark 1.2+. | |Please instead use: $driverLibraryPathKey """.stripMargin logWarning(warning) } // Validate spark.executor.extraJavaOptions getOption(executorOptsKey).foreach { javaOpts => if (javaOpts.contains("-Dspark")) { val msg = s"$executorOptsKey is not allowed to set Spark options (was '$javaOpts'). " + "Set them directly on a SparkConf or in a properties file when using ./bin/spark-submit." throw new Exception(msg) } if (javaOpts.contains("-Xmx")) { val msg = s"$executorOptsKey is not allowed to specify max heap memory settings " + s"(was '$javaOpts'). Use spark.executor.memory instead." throw new Exception(msg) } } // Validate memory fractions val deprecatedMemoryKeys = Seq( "spark.storage.memoryFraction", "spark.shuffle.memoryFraction", "spark.shuffle.safetyFraction", "spark.storage.unrollFraction", "spark.storage.safetyFraction") val memoryKeys = Seq( "spark.memory.fraction", "spark.memory.storageFraction") ++ deprecatedMemoryKeys for (key <- memoryKeys) { val value = getDouble(key, 0.5) if (value > 1 || value < 0) { throw new IllegalArgumentException(s"$key should be between 0 and 1 (was '$value').") } } // Warn against deprecated memory fractions (unless legacy memory management mode is enabled) val legacyMemoryManagementKey = "spark.memory.useLegacyMode" val legacyMemoryManagement = getBoolean(legacyMemoryManagementKey, false) if (!legacyMemoryManagement) { val keyset = deprecatedMemoryKeys.toSet val detected = settings.keys().asScala.filter(keyset.contains) if (detected.nonEmpty) { logWarning("Detected deprecated memory fraction settings: " + detected.mkString("[", ", ", "]") + ". As of Spark 1.6, execution and storage " + "memory management are unified. All memory fractions used in the old model are " + "now deprecated and no longer read. If you wish to use the old memory management, " + s"you may explicitly enable `$legacyMemoryManagementKey` (not recommended).") } } if (contains("spark.master") && get("spark.master").startsWith("yarn-")) { val warning = s"spark.master ${get("spark.master")} is deprecated in Spark 2.0+, please " + "instead use \\"yarn\\" with specified deploy mode." get("spark.master") match { case "yarn-cluster" => logWarning(warning) set("spark.master", "yarn") set("spark.submit.deployMode", "cluster") case "yarn-client" => logWarning(warning) set("spark.master", "yarn") set("spark.submit.deployMode", "client") case _ => // Any other unexpected master will be checked when creating scheduler backend. } } if (contains("spark.submit.deployMode")) { get("spark.submit.deployMode") match { case "cluster" | "client" => case e => throw new SparkException("spark.submit.deployMode can only be \\"cluster\\" or " + "\\"client\\".") } } if (contains("spark.cores.max") && contains("spark.executor.cores")) { val totalCores = getInt("spark.cores.max", 1) val executorCores = getInt("spark.executor.cores", 1) val leftCores = totalCores % executorCores if (leftCores != 0) { logWarning(s"Total executor cores: ${totalCores} is not " + s"divisible by cores per executor: ${executorCores}, " + s"the left cores: ${leftCores} will not be allocated") } } if (contains("spark.executor.cores") && contains("spark.task.cpus")) { val executorCores = getInt("spark.executor.cores", 1) val taskCpus = getInt("spark.task.cpus", 1) if (executorCores < taskCpus) { throw new SparkException("spark.executor.cores must not be less than spark.task.cpus.") } } val encryptionEnabled = get(NETWORK_ENCRYPTION_ENABLED) || get(SASL_ENCRYPTION_ENABLED) require(!encryptionEnabled || get(NETWORK_AUTH_ENABLED), s"${NETWORK_AUTH_ENABLED.key} must be enabled when enabling encryption.") val executorTimeoutThresholdMs = getTimeAsSeconds("spark.network.timeout", "120s") * 1000 val executorHeartbeatIntervalMs = get(EXECUTOR_HEARTBEAT_INTERVAL) // If spark.executor.heartbeatInterval bigger than spark.network.timeout, // it will almost always cause ExecutorLostFailure. See SPARK-22754. require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " + s"spark.network.timeout=${executorTimeoutThresholdMs}ms must be no less than the value of " + s"spark.executor.heartbeatInterval=${executorHeartbeatIntervalMs}ms.") } /** * Return a string listing all keys and values, one per line. This is useful to print the * configuration out for debugging. */ def toDebugString: String = { getAll.sorted.map{case (k, v) => k + "=" + v}.mkString("\\n") } } private[spark] object SparkConf extends Logging { /** * Maps deprecated config keys to information about the deprecation. * * The extra information is logged as a warning when the config is present in the user's * configuration. */ private val deprecatedConfigs: Map[String, DeprecatedConfig] = { val configs = Seq( DeprecatedConfig("spark.cache.class", "0.8", "The spark.cache.class property is no longer being used! Specify storage levels using " + "the RDD.persist() method instead."), DeprecatedConfig("spark.yarn.user.classpath.first", "1.3", "Please use spark.{driver,executor}.userClassPathFirst instead."), DeprecatedConfig("spark.kryoserializer.buffer.mb", "1.4", "Please use spark.kryoserializer.buffer instead. The default value for " + "spark.kryoserializer.buffer.mb was previously specified as '0.064'. Fractional values " + "are no longer accepted. To specify the equivalent now, one may use '64k'."), DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."), DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0", "Please use the new blacklisting options, spark.blacklist.*"), DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.shuffle.service.index.cache.entries", "2.3.0", "Not used anymore. Please use spark.shuffle.service.index.cache.size"), DeprecatedConfig("spark.yarn.credentials.file.retention.count", "2.4.0", "Not used anymore."), DeprecatedConfig("spark.yarn.credentials.file.retention.days", "2.4.0", "Not used anymore.") ) Map(configs.map { cfg => (cfg.key -> cfg) } : _*) } /** * Maps a current config key to alternate keys that were used in previous version of Spark. * * The alternates are used in the order defined in this map. If deprecated configs are * present in the user's configuration, a warning is logged. * * TODO: consolidate it with `ConfigBuilder.withAlternative`. */ private val configsWithAlternatives = Map[String, Seq[AlternateConfig]]( "spark.executor.userClassPathFirst" -> Seq( AlternateConfig("spark.files.userClassPathFirst", "1.3")), "spark.history.fs.update.interval" -> Seq( AlternateConfig("spark.history.fs.update.interval.seconds", "1.4"), AlternateConfig("spark.history.fs.updateInterval", "1.3"), AlternateConfig("spark.history.updateInterval", "1.3")), "spark.history.fs.cleaner.interval" -> Seq( AlternateConfig("spark.history.fs.cleaner.interval.seconds", "1.4")), MAX_LOG_AGE_S.key -> Seq( AlternateConfig("spark.history.fs.cleaner.maxAge.seconds", "1.4")), "spark.yarn.am.waitTime" -> Seq( AlternateConfig("spark.yarn.applicationMaster.waitTries", "1.3", // Translate old value to a duration, with 10s wait time per try. translation = s => s"${s.toLong * 10}s")), "spark.reducer.maxSizeInFlight" -> Seq( AlternateConfig("spark.reducer.maxMbInFlight", "1.4")), "spark.kryoserializer.buffer" -> Seq( AlternateConfig("spark.kryoserializer.buffer.mb", "1.4", translation = s => s"${(s.toDouble * 1000).toInt}k")), "spark.kryoserializer.buffer.max" -> Seq( AlternateConfig("spark.kryoserializer.buffer.max.mb", "1.4")), "spark.shuffle.file.buffer" -> Seq( AlternateConfig("spark.shuffle.file.buffer.kb", "1.4")), "spark.executor.logs.rolling.maxSize" -> Seq( AlternateConfig("spark.executor.logs.rolling.size.maxBytes", "1.4")), "spark.io.compression.snappy.blockSize" -> Seq( AlternateConfig("spark.io.compression.snappy.block.size", "1.4")), "spark.io.compression.lz4.blockSize" -> Seq( AlternateConfig("spark.io.compression.lz4.block.size", "1.4")), "spark.rpc.numRetries" -> Seq( AlternateConfig("spark.akka.num.retries", "1.4")), "spark.rpc.retry.wait" -> Seq( AlternateConfig("spark.akka.retry.wait", "1.4")), "spark.rpc.askTimeout" -> Seq( AlternateConfig("spark.akka.askTimeout", "1.4")), "spark.rpc.lookupTimeout" -> Seq( AlternateConfig("spark.akka.lookupTimeout", "1.4")), "spark.streaming.fileStream.minRememberDuration" -> Seq( AlternateConfig("spark.streaming.minRememberDuration", "1.5")), "spark.yarn.max.executor.failures" -> Seq( AlternateConfig("spark.yarn.max.worker.failures", "1.5")), MEMORY_OFFHEAP_ENABLED.key -> Seq( AlternateConfig("spark.unsafe.offHeap", "1.6")), "spark.rpc.message.maxSize" -> Seq( AlternateConfig("spark.akka.frameSize", "1.6")), "spark.yarn.jars" -> Seq( AlternateConfig("spark.yarn.jar", "2.0")), "spark.yarn.access.hadoopFileSystems" -> Seq( AlternateConfig("spark.yarn.access.namenodes", "2.2")), MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM.key -> Seq( AlternateConfig("spark.reducer.maxReqSizeShuffleToMem", "2.3")), LISTENER_BUS_EVENT_QUEUE_CAPACITY.key -> Seq( AlternateConfig("spark.scheduler.listenerbus.eventqueue.size", "2.3")), DRIVER_MEMORY_OVERHEAD.key -> Seq( AlternateConfig("spark.yarn.driver.memoryOverhead", "2.3")), EXECUTOR_MEMORY_OVERHEAD.key -> Seq( AlternateConfig("spark.yarn.executor.memoryOverhead", "2.3")), KEYTAB.key -> Seq( AlternateConfig("spark.yarn.keytab", "3.0")), PRINCIPAL.key -> Seq( AlternateConfig("spark.yarn.principal", "3.0")), KERBEROS_RELOGIN_PERIOD.key -> Seq( AlternateConfig("spark.yarn.kerberos.relogin.period", "3.0")) ) /** * A view of `configsWithAlternatives` that makes it more efficient to look up deprecated * config keys. * * Maps the deprecated config name to a 2-tuple (new config name, alternate config info). */ private val allAlternatives: Map[String, (String, AlternateConfig)] = { configsWithAlternatives.keys.flatMap { key => configsWithAlternatives(key).map { cfg => (cfg.key -> (key -> cfg)) } }.toMap } /** * Return whether the given config should be passed to an executor on start-up. * * Certain authentication configs are required from the executor when it connects to * the scheduler, while the rest of the spark configs can be inherited from the driver later. */ def isExecutorStartupConf(name: String): Boolean = { (name.startsWith("spark.auth") && name != SecurityManager.SPARK_AUTH_SECRET_CONF) || name.startsWith("spark.ssl") || name.startsWith("spark.rpc") || name.startsWith("spark.network") || isSparkPortConf(name) } /** * Return true if the given config matches either `spark.*.port` or `spark.port.*`. */ def isSparkPortConf(name: String): Boolean = { (name.startsWith("spark.") && name.endsWith(".port")) || name.startsWith("spark.port.") } /** * Looks for available deprecated keys for the given config option, and return the first * value available. */ def getDeprecatedConfig(key: String, conf: JMap[String, String]): Option[String] = { configsWithAlternatives.get(key).flatMap { alts => alts.collectFirst { case alt if conf.containsKey(alt.key) => val value = conf.get(alt.key) if (alt.translation != null) alt.translation(value) else value } } } /** * Logs a warning message if the given config key is deprecated. */ def logDeprecationWarning(key: String): Unit = { deprecatedConfigs.get(key).foreach { cfg => logWarning( s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " + s"may be removed in the future. ${cfg.deprecationMessage}") return } allAlternatives.get(key).foreach { case (newKey, cfg) => logWarning( s"The configuration key '$key' has been deprecated as of Spark ${cfg.version} and " + s"may be removed in the future. Please use the new key '$newKey' instead.") return } if (key.startsWith("spark.akka") || key.startsWith("spark.ssl.akka")) { logWarning( s"The configuration key $key is not supported anymore " + s"because Spark doesn't use Akka since 2.0") } } /** * Holds information about keys that have been deprecated and do not have a replacement. * * @param key The deprecated key. * @param version Version of Spark where key was deprecated. * @param deprecationMessage Message to include in the deprecation warning. */ private case class DeprecatedConfig( key: String, version: String, deprecationMessage: String) /** * Information about an alternate configuration key that has been deprecated. * * @param key The deprecated config key. * @param version The Spark version in which the key was deprecated. * @param translation A translation function for converting old config values into new ones. */ private case class AlternateConfig( key: String, version: String, translation: String => String = null) }
mdespriee/spark
core/src/main/scala/org/apache/spark/SparkConf.scala
Scala
apache-2.0
33,249
package com.github.hobbitvt.election import java.util.concurrent.atomic.AtomicBoolean import com.typesafe.scalalogging.LazyLogging import scala.concurrent.duration.{ Duration, _ } import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.util.{ Failure, Success } /** * Promoter starts leader election cycle. * When a leadership changes, you'll receive a notification about it. * @param electionDealer Dealer, provide you a leader election staff * @param instanceId Host which you want to promote as a leader * @param failRecoveryInterval How long you recover yourself from some errors */ class ElectionPromoter( val electionDealer: ElectionDealer, val instanceId: ElectionDealer.InstanceId, failRecoveryInterval: Duration )(implicit ec: ExecutionContext) extends LazyLogging { import ElectionDealer.InstanceId @volatile private var leader = Option.empty[InstanceId] @volatile private var closed = false private val started = new AtomicBoolean(false) private val done = Promise[Unit] val event = new Event[Option[InstanceId]] /** * Start promote someone as a leader */ def start(): Event[Option[InstanceId]] = { if (started.compareAndSet(false, true)) { logger.info(s"Start a promotion of $instanceId") acquire() } event } def whenCompletelyStarted: Future[Unit] = { done.future } /** * Get current leader */ def getLeaderAddress: Option[InstanceId] = leader /** * Check whether given address was promoted as a leader or not */ def isLeader: Boolean = leader.contains(instanceId) /** * Check whether leader exists */ def isLeaderExist: Boolean = leader.isDefined /** * Stop promotion */ def close(): Future[Unit] = { keepLeader(None) closed = true electionDealer.release() } /** * Process which trying to acquire a leadership for you */ private def acquire(): Unit = { if (!closed) { electionDealer.tryAcquire(instanceId).andThen({ case Success(elected) => val actualLeader = if (elected) { val leader = Some(instanceId) Future.successful(leader) } else { electionDealer.getLeader } actualLeader .flatMap(waitForNoLeader) .andThen({ case Success(_) => acquire() case Failure(ex) => scheduleRecovery(ex) }) case Failure(ex) => scheduleRecovery(ex) }) } } /** * Process which waiting a state of no leader */ private def waitForNoLeader( lastKnownLeader: Option[InstanceId] ): Future[Unit] = { keepLeader(lastKnownLeader) done.trySuccess() lastKnownLeader match { case Some(_) if !closed => electionDealer.waitForLeader(30.seconds, lastKnownLeader) .flatMap(waitForNoLeader) case _ => Future.successful(()) } } /** * Keeps leader for getting current leader immediately */ private def keepLeader(leader: Option[InstanceId]): Unit = synchronized { if (!closed && this.leader != leader) { this.leader = leader event.notify(leader) logLeader(leader) } } /** * Schedule recovery process */ private def scheduleRecovery(ex: Throwable): Unit = { keepLeader(None) logger.warn(s"Exception's been risen during promotion of [$instanceId] as a leader", ex) if (!closed) { SingleThreadedTimer.Default.schedule(failRecoveryInterval) { acquire() } } } /** * Logs a current leader in a pretty way. */ private def logLeader(leader: Option[InstanceId]): Unit = { leader match { case Some(v) if v == instanceId => logger.info(s"$v becomes a leader, and it's me") case Some(v) => logger.info(s"$v becomes a leader") case _ => logger.info("No one is a leader") } } }
hobbit-vt/leader-election
src/main/scala/com/github/hobbitvt/election/ElectionPromoter.scala
Scala
mit
3,895
object Test extends App { println(test1) println(test2) println(test3) println(test4) println(test5) try { println(test6) } catch { case _: Throwable => println("OK") } println(test7) try { println(test8) } catch { case _: Throwable => println("OK") } println(test9) println(test10) println(test11) println(test12) def test1 = { var x = 1 try { x = 2 } catch { case _: NullPointerException => x = 3 case _: Throwable => x = 4 } x } def test2 = { var x = 1 try { x = 2 try { x = 21 } catch { case _: Throwable => x = 22 } x = 23 } catch { case _: NullPointerException => x = 3 case _: Throwable => x = 4 } x } def test3 = { var x = 1 try { try{x = 2} catch { case _: Throwable => x = 4 } } catch { case _: NullPointerException => x = 3 case _: Throwable => x = 4 } x } def test4 = { var x = 1 try { x = 2 } catch { case _: NullPointerException => x = 3 case _: Throwable => x = 4 } try { x = 5 } catch { case _: NullPointerException => x = 6 } x } def test5 = { var x = 1 try { x = 2 } catch { case _: NullPointerException => try { x = 3 } catch { case f: Throwable => throw f } case _: Throwable => x = 4; try { x = 41 } catch { case _: Exception => x = 42 }; x = 43 } x } def test6: Int = { var x = 1 try { x = 2 (null: String).toString } catch { case e: NullPointerException => throw e case _: Throwable => x = 3 return 1000 } finally { x = 4 println(x) } x } def test7 = { var x = 1 try { x = 2 } finally { try { x = 4 } catch { case _: Throwable => x = 5 } } x } def test8 = { val x = 1 try { throw new NullPointerException } catch { case e: Throwable => throw e } x } def test9 = { try { "" match { case s: String => 10 }} catch { case _: Throwable => 20 } } var x10 = 1 def test10: Int = { try { 1 } catch { case e if (x10 == 1) => 1 } } def test11: Unit = { try { () } catch { case e: Throwable => () } } class E1 extends Exception class E2 extends Exception class E3 extends Exception def test12_impl(op: => Int) = try { op } catch { case e: E1 => 2 case e: E2 => 3 case e: E3 => 4 } def test12 = test12_impl(1) + test12_impl(throw new E1) + test12_impl(throw new E2) + test12_impl(throw new E3) }
scala/scala
test/files/run/exceptions-nest.scala
Scala
apache-2.0
2,697
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.spark.sql import scala.collection.Map import org.apache.commons.logging.Log import org.apache.commons.logging.LogFactory import org.apache.spark.Partition import org.apache.spark.SparkContext import org.apache.spark.TaskContext import org.apache.spark.sql.Row import org.elasticsearch.hadoop.cfg.Settings import org.elasticsearch.hadoop.rest.InitializationUtils import org.elasticsearch.hadoop.rest.PartitionDefinition import org.elasticsearch.spark.rdd.AbstractEsRDD import org.elasticsearch.spark.rdd.AbstractEsRDDIterator import org.elasticsearch.spark.rdd.EsPartition // while we could have just wrapped the ScalaEsRDD and unpack the top-level data into a Row the issue is the underlying Maps are StructTypes // and as such need to be mapped as Row resulting in either nested wrapping or using a ValueReader and which point wrapping becomes unyielding since the class signatures clash private[spark] class ScalaEsRowRDD( @transient sc: SparkContext, params: Map[String, String] = Map.empty, schema: SchemaUtils.Schema) extends AbstractEsRDD[Row](sc, params) { override def compute(split: Partition, context: TaskContext): ScalaEsRowRDDIterator = { new ScalaEsRowRDDIterator(context, split.asInstanceOf[EsPartition].esPartition, schema) } } private[spark] class ScalaEsRowRDDIterator( context: TaskContext, partition: PartitionDefinition, schema: SchemaUtils.Schema) extends AbstractEsRDDIterator[Row](context, partition) { override def getLogger() = LogFactory.getLog(classOf[ScalaEsRowRDD]) override def initReader(settings: Settings, log: Log) = { InitializationUtils.setValueReaderIfNotSet(settings, classOf[ScalaRowValueReader], log) // parse the structure and save the order (requested by Spark) for each Row (root and nested) // since the data returned from Elastic is likely to not be in the same order SchemaUtils.setRowInfo(settings, schema.struct) } override def createValue(value: Array[Object]): Row = { // drop the ID value(1).asInstanceOf[ScalaEsRow] } }
xjrk58/elasticsearch-hadoop
spark/sql-20/src/main/scala/org/elasticsearch/spark/sql/ScalaEsRowRDD.scala
Scala
apache-2.0
2,854
package com.arcusys.valamis.web.servlet.admin import javax.servlet.http.HttpServletRequest import com.arcusys.learn.liferay.util.PortletName import com.arcusys.valamis.lrssupport.lrsEndpoint.model.{AuthType, LrsEndpoint} import com.arcusys.valamis.persistence.common.SlickDBInfo import com.arcusys.valamis.slick.util.SlickDbTestBase import com.arcusys.valamis.web.configuration.database.DatabaseInit import com.arcusys.valamis.web.configuration.ioc.LrsSupportConfiguration import com.arcusys.valamis.web.portlet.base.PermissionBase import com.escalatesoft.subcut.inject.{BindingModule, NewBindingModule} import org.scalatest.{BeforeAndAfter, FunSuiteLike} import org.scalatra.test.scalatest._ import slick.driver.{JdbcDriver, JdbcProfile} import slick.jdbc.JdbcBackend class AdminServletTest extends ScalatraSuite with FunSuiteLike with BeforeAndAfter with SlickDbTestBase { implicit val companyId = 1L def slickDbInfo: SlickDBInfo = new SlickDBInfo { override def databaseDef: JdbcBackend#DatabaseDef = db override def slickProfile: JdbcProfile = driver override def slickDriver: JdbcDriver = driver } val adminServlet = new AdminServlet(){ override def checkCSRFToken: Unit = Unit override def requirePortletPermission(permission: PermissionBase, portlets: PortletName*): Unit = Unit override implicit val bindingModule: BindingModule = new NewBindingModule(fn = implicit module => { module <~ new LrsSupportConfiguration(slickDbInfo) }) override def getCompanyId = 1L } addServlet(adminServlet, "/*") before { createDB() new DatabaseInit(slickDbInfo).init() //add settings for registered lrs adminServlet.endpointService.setEndpoint { LrsEndpoint("/valamis-lrs-portlet/xapi", AuthType.INTERNAL, "appId", "appSecret", None) } } after { dropDB() } test("switch to internal lrs") { adminServlet.endpointService.setEndpoint { LrsEndpoint("/valamis-lrs-portlet/xapi", AuthType.OAUTH, "appId", "appSecret", None) } post("/administering/settings/lrs?isExternalLrs=false") { status shouldEqual 204 val lrsSetting = adminServlet.endpointService.getEndpoint lrsSetting.isDefined shouldBe true lrsSetting.get.customHost shouldEqual None lrsSetting.get.auth shouldEqual AuthType.INTERNAL } } test("switch to internal lrs with custom host") { adminServlet.endpointService.setEndpoint { LrsEndpoint("/valamis-lrs-portlet/xapi", AuthType.OAUTH, "appId", "appSecret", None) } post("/administering/settings/lrs?isExternalLrs=false&internalLrsCustomHost=http://localhost:8080") { status shouldEqual 204 val lrsSetting = adminServlet.endpointService.getEndpoint lrsSetting.isDefined shouldBe true lrsSetting.get.customHost shouldEqual Some("http://localhost:8080") lrsSetting.get.auth shouldEqual AuthType.INTERNAL } } }
arcusys/Valamis
valamis-portlets/src/test/scala/com/arcusys/valamis/web/servlet/admin/AdminServletTest.scala
Scala
gpl-3.0
2,939
/** * Digi-Lib-Test - various test helpers for Digi components * * Copyright (c) 2013-2015 Alexey Aksenov ezh@ezh.msk.ru * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.digimead.lib.test import org.mockito.Mockito import org.scalatest.{ ConfigMap, Finders, Matchers, WordSpec } import org.scalatest.exceptions.TestFailedException import org.scalatest.mock.MockitoSugar import org.slf4j.LoggerFactory import scala.collection.JavaConversions.asScalaBuffer class LoggingHelperSpec extends WordSpec with LoggingHelper with Matchers with MockitoSugar { val log = LoggerFactory.getLogger(getClass) "LoggingHelper" should { "interaction with mockito" in { withMockitoLogCaptor { log.debug("mockito test interception") } { logCaptor ⇒ val enter = logCaptor.getAllValues().head enter.getLevel() should be(org.apache.log4j.Level.DEBUG) enter.getMessage.toString should be("mockito test interception") } withMockitoLogMatcher { log.debug("mockito test interception", new Throwable()) } { case (org.apache.log4j.Level.DEBUG, "mockito test interception", Some(throwable)) ⇒ true } intercept[TestFailedException] { withMockitoLogMatcher { log.debug("mockito test interception", new Throwable()) log.warn("mockito test interception", new Throwable()) } { case (org.apache.log4j.Level.DEBUG, "mockito test interception", Some(throwable)) ⇒ true }(Mockito.atLeastOnce()) } } "provides detailed description of throwable" in { val cause1 = new RuntimeException("cause1") val cause2 = new RuntimeException("cause2", cause1) val renderer = new LoggingHelper.DetailedThrowableRenderer val result = renderer.doRender(new RuntimeException("123", cause2)) assert(result.filter(_.startsWith("Caused by: ")).size === 2) } } override def beforeAll(configMap: ConfigMap) { adjustLoggingBeforeAll(configMap) } }
ezh/digi-lib-test
src/test/scala/org/digimead/lib/test/LoggingHelperSpec.scala
Scala
apache-2.0
2,498
package edu.rice.habanero.benchmarks.pingpong import edu.rice.habanero.actors.HabaneroSelector import edu.rice.habanero.benchmarks.pingpong.PingPongConfig.{Message, PingMessage, StartMessage, StopMessage} import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner} import edu.rice.hj.Module0._ import edu.rice.hj.api.HjSuspendable /** * * @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu) */ object PingPongHabaneroSelectorBenchmark { def main(args: Array[String]) { BenchmarkRunner.runBenchmark(args, new PingPongHabaneroSelectorBenchmark) } private final class PingPongHabaneroSelectorBenchmark extends Benchmark { def initialize(args: Array[String]) { PingPongConfig.parseArgs(args) } def printArgInfo() { PingPongConfig.printArgs() } def runIteration() { finish(new HjSuspendable { override def run() = { val pong = new PongSelector() val ping = new PingSelector(PingPongConfig.N, pong) ping.start() pong.start() ping.send(0, StartMessage.ONLY) } }) } def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) { } } private class PingSelector(count: Int, pong: HabaneroSelector[PingPongConfig.Message]) extends HabaneroSelector[Message](1) { private var pingsLeft: Int = count override def process(msg: PingPongConfig.Message) { msg match { case _: PingPongConfig.StartMessage => pong.send(0, new PingPongConfig.SendPingMessage(this)) pingsLeft = pingsLeft - 1 case _: PingPongConfig.PingMessage => pong.send(0, new PingPongConfig.SendPingMessage(this)) pingsLeft = pingsLeft - 1 case _: PingPongConfig.SendPongMessage => if (pingsLeft > 0) { this.send(0, PingMessage.ONLY) } else { pong.send(0, StopMessage.ONLY) exit() } case message => val ex = new IllegalArgumentException("Unsupported message: " + message) ex.printStackTrace(System.err) } } } private class PongSelector extends HabaneroSelector[Message](1) { private var pongCount: Int = 0 override def process(msg: PingPongConfig.Message) { msg match { case message: PingPongConfig.SendPingMessage => val sender = message.sender.asInstanceOf[HabaneroSelector[PingPongConfig.Message]] sender.send(0, new PingPongConfig.SendPongMessage(this)) pongCount = pongCount + 1 case _: PingPongConfig.StopMessage => exit() case message => val ex = new IllegalArgumentException("Unsupported message: " + message) ex.printStackTrace(System.err) } } } }
shamsmahmood/savina
src/main/scala/edu/rice/habanero/benchmarks/pingpong/PingPongHabaneroSelectorBenchmark.scala
Scala
gpl-2.0
2,792
/* * Copyright 2013 Maurício Linhares * * Maurício Linhares licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.github.mauricio.async.db.pool import scala.concurrent.{ExecutionContext, Future, Promise} /** * Defines the common interface for async object pools. These are pools that do not block clients trying to acquire * a resource from it. Different than the usual synchronous pool, you **must** return objects back to it manually * since it's impossible for the pool to know when the object is ready to be given back. * * @tparam T */ trait AsyncObjectPool[T] { /** * Returns an object from the pool to the callee with the returned future. If the pool can not create or enqueue * requests it will fill the returned [[scala.concurrent.Future]] with an * [[com.github.mauricio.async.db.pool.PoolExhaustedException]]. * * @return future that will eventually return a usable pool object. */ def take: Future[T] /** * Returns an object taken from the pool back to it. This object will become available for another client to use. * If the object is invalid or can not be reused for some reason the [[scala.concurrent.Future]] returned will contain * the error that prevented this object of being added back to the pool. The object is then discarded from the pool. * * @param item * @return */ def giveBack(item: T): Future[AsyncObjectPool[T]] /** * Closes this pool and future calls to **take** will cause the [[scala.concurrent.Future]] to raise an * [[com.github.mauricio.async.db.pool.PoolAlreadyTerminatedException]]. * * @return */ def close: Future[AsyncObjectPool[T]] /** * Retrieve and use an object from the pool for a single computation, returning it when the operation completes. * * @param f function that uses the object * @return f wrapped with take and giveBack */ def use[A]( f: (T) => Future[A] )(implicit executionContext: ExecutionContext): Future[A] = take.flatMap { item => val p = Promise[A]() try { f(item).onComplete { r => giveBack(item).onComplete { _ => p.complete(r) } } } catch { // calling f might throw exception. // in that case the item will be removed from the pool if identified as invalid by the factory. // the error returned to the user is the original error thrown by f. case error: Throwable => giveBack(item).onComplete { _ => p.failure(error) } } p.future } }
dripower/postgresql-async
db-async-common/src/main/scala/com/github/mauricio/async/db/pool/AsyncObjectPool.scala
Scala
apache-2.0
3,093
package io.getquill.monad import language.experimental.macros import com.twitter.util.Future import io.getquill.context.Context import com.twitter.util.Try trait TwitterFutureIOMonad extends IOMonad { this: Context[_, _] => type Result[T] = Future[T] def runIO[T](quoted: Quoted[T]): IO[RunQuerySingleResult[T], Effect.Read] = macro IOMonadMacro.runIO def runIO[T](quoted: Quoted[Query[T]]): IO[RunQueryResult[T], Effect.Read] = macro IOMonadMacro.runIO def runIO(quoted: Quoted[Action[_]]): IO[RunActionResult, Effect.Write] = macro IOMonadMacro.runIO def runIO[T](quoted: Quoted[ActionReturning[_, T]]): IO[RunActionReturningResult[T], Effect.Write] = macro IOMonadMacro.runIO def runIO(quoted: Quoted[BatchAction[Action[_]]]): IO[RunBatchActionResult, Effect.Write] = macro IOMonadMacro.runIO def runIO[T](quoted: Quoted[BatchAction[ActionReturning[_, T]]]): IO[RunBatchActionReturningResult[T], Effect.Write] = macro IOMonadMacro.runIO case class Run[T, E <: Effect](f: () => Result[T]) extends IO[T, E] def performIO[T](io: IO[T, _], transactional: Boolean = false): Result[T] = io match { case FromTry(t) => Future.const(Try.fromScala(t)) case Run(f) => f() case Sequence(in, _, cbf) => Future.collect(in.map(performIO(_)).toSeq) .map(r => cbf().++=(r).result) case TransformWith(a, fA) => performIO(a) .liftToTry.map(_.asScala) .flatMap(v => performIO(fA(v))) case Transactional(io) => performIO(io, transactional = true) } }
mentegy/quill
quill-finagle-mysql/src/main/scala/io/getquill/monad/TwitterFutureIOMonad.scala
Scala
apache-2.0
1,555
/* ************************************************************************************* * Copyright 2011 Normation SAS ************************************************************************************* * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * In accordance with the terms of section 7 (7. Additional Terms.) of * the GNU Affero GPL v3, the copyright holders add the following * Additional permissions: * Notwithstanding to the terms of section 5 (5. Conveying Modified Source * Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3 * licence, when you create a Related Module, this Related Module is * not considered as a part of the work and may be distributed under the * license agreement of your choice. * A "Related Module" means a set of sources files including their * documentation that, without modification of the Source Code, enables * supplementary functions or services in addition to those offered by * the Software. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>. * ************************************************************************************* */ package com.normation.rudder.repository import com.normation.eventlog.EventActor import com.normation.eventlog.ModificationId import com.normation.rudder.domain.archives.RuleArchiveId import com.normation.rudder.domain.policies._ import net.liftweb.common._ /** * The directive repository. * * directive are instance of technique * (a technique + values for its parameters) * */ trait RoRuleRepository { /** * Try to find the rule with the given ID. * Empty: no directive with such ID * Full((parent,directive)) : found the directive (directive.id == directiveId) in given parent * Failure => an error happened. */ def get(ruleId:RuleId) : Box[Rule] /** * Return all rules. * To get only applied one, you can post-filter the seq * with the method RuleTargetService#isApplied */ def getAll(includeSytem:Boolean = false) : Box[Seq[Rule]] } trait WoRuleRepository { /** * Save the given directive into given active technique * If a directive with the same ID is already present in the * system, raise an error. * If the directive is not in the system, add it. * * Returned the saved Rule * * NOTE: only save here, deploy is done in the DeploymentService * * NOTE: some parameter may be forced to a value different from the * one provided (typically, serial will be set to 0 whatever it's value * is). It is the responsability of the user to check that if he wants * with the provided resulting rule. * */ def create(rule:Rule, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[AddRuleDiff] /** * Update the rule with the given ID with the given * parameters. * * If the rule is not in the repos, the method fails. * If the rule is a system one, the methods fails. * NOTE: the serial is *never* updated with that methods. */ def update(rule:Rule, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[Option[ModifyRuleDiff]] /** * Update the system configuration rule with the given ID with the given * parameters. * * NOTE: the serial is *never* updated with that methods. */ def updateSystem(rule:Rule, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[Option[ModifyRuleDiff]] /** * Increment the serial of rules with given ID by one. * Return the new serial value. * The method fails if no rule has such ID. */ def incrementSerial(id:RuleId) : Box[Int] /** * Delete the rule with the given ID. * If no rule with such ID exists, it is an error * (it's the caller site responsability to decide if it's * and error or not). * A system rule can not be deleted. */ def delete(id:RuleId, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[DeleteRuleDiff] /** * A (dangerous) method that replace all existing rules * by the list given in parameter. * If succeed, return an identifier of the place were * are stored the old rules - it is the * responsibility of the user to delete them. * * Most of the time, we don't want to change system rules. * So when "includeSystem" is false (default), swapRules * implementation have to take care to ignore any configuration (both in * newCr or in archive). * * Note: a really really special care have to be taken with serial IDs: * - for CR which exists in both imported and existing referential, the * serial ID MUST be updated (+1) * - for all other imported CR, the serial MUST be set to 0 */ def swapRules(newRules:Seq[Rule], includeSystem:Boolean = false) : Box[RuleArchiveId] /** * Delete a set of saved rules. */ def deleteSavedRuleArchiveId(saveId:RuleArchiveId) : Box[Unit] }
Kegeruneku/rudder
rudder-core/src/main/scala/com/normation/rudder/repository/RuleRepository.scala
Scala
agpl-3.0
5,418
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wvlet.airframe import org.scalatest._ import wvlet.log.LogFormatter.SourceCodeLogFormatter import wvlet.log.{LogSupport, Logger} import scala.language.implicitConversions /** */ trait AirframeSpec extends WordSpec with Matchers with GivenWhenThen with BeforeAndAfter with BeforeAndAfterAll with LogSupport { // A tag for the current working test protected val working = Tag("working") // A tag for the failing test. protected val failing = Tag("failing") protected def inCI: Boolean = { sys.env.get("TRAVIS").map(_.toBoolean).getOrElse(false) } implicit def toTag(s: String) = Tag(s) override def run(testName: Option[String], args: Args): Status = { // Add source code location to the debug logs Logger.setDefaultFormatter(SourceCodeLogFormatter) // Periodically scan log level file Logger.scheduleLogLevelScan val s = super.run(testName, args) Logger.stopScheduledLogLevelScan s } // Temporarily removed for Scala 2.13.0-M4, which doesn't have parallel collection yet. // https://github.com/scala/scala-parallel-collections/issues/41 // // private[airframe] object CompatParColls { // val Converters = { // import Compat._ // // { // import scala.collection.parallel._ // // CollectionConverters // } // } // // object Compat { // object CollectionConverters // } // } }
wvlet/airframe
airframe-scalatest/shared/src/main/scala/wvlet/airframe/AirframeSpec.scala
Scala
apache-2.0
1,974
package com.cave.metrics.data.postgresql import java.sql.{SQLException, Timestamp} import java.util.UUID import com.cave.metrics.data._ import com.cave.metrics.data.postgresql.Tables._ import org.joda.time.DateTime import org.postgresql.util.PSQLException import scala.concurrent._ import scala.slick.driver.PostgresDriver import scala.slick.driver.PostgresDriver.simple._ import scala.slick.lifted.TableQuery import scala.util.{Success, Try} class PostgresDataManagerImpl(awsConfig: AwsConfig) extends DatabaseConnection(awsConfig) with DataManager { lazy val organizationsTable = TableQuery[OrganizationsTable] lazy val tokensTable = TableQuery[TokensTable] lazy val alertsTable = TableQuery[AlertsTable] lazy val teamsTable = TableQuery[TeamsTable] lazy val queriesTable = TableQuery[QueriesTable] lazy val alert2queriesTable = TableQuery[AlertQueriesTable] lazy val usersTable = TableQuery[UsersTable] lazy val confirmationTokens = TableQuery[ConfirmationTokensTable] lazy val organizationUsersTable = TableQuery[OrganizationUsersTable] lazy val teamUsersTable = TableQuery[TeamUsersTable] lazy val sessionTokensTable = TableQuery[SessionTokensTable] lazy val statusTable = TableQuery[StatusTable] val DEFAULT_MAX_PAGINATION_LIMIT = 5000 private[postgresql] val orgSerializer = OrganizationSerializer private[postgresql] val alertSerializer = AlertSerializer private[postgresql] val teamSerializer = TeamSerializer private[postgresql] val statusSerializer = StatusSerializer /** * Delete a token from an existing team * * @param tokenId identifier of the token to delete * @return true if deleted, false if not found, or error */ override def deleteToken(tokenId: String): Try[Boolean] = { Try { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => tokensTable.filter(t => t.deletedAt.isEmpty && t.id === tokenId.toLong).map(t => (t.deletedByGuid, t.deletedAt)).update(Some(uuid), Some(timestamp)) == 1 } } } /** * Retrieve alert with given ID * * @param alertId the identifier of the alert * @return the alert configuration, if found; None if not found; Failure if error */ override def getAlert(alertId: String): Try[Option[Alert]] = { Try { db.withTransaction { implicit session => alertsTable.sortBy(_.createdAt).filter(a => a.deletedAt.isEmpty && a.id === alertId.toLong).list match { case List() => None case List(a) => Some(alertSerializer.fromPostgresRecord(a)) } } } recover { case e => sys.error(s"Unable to fetch an alert from DB ${e.getMessage}") } } /** * * @param organization the organization to update * @param organizationPatch the new data for update * @return the updated organization */ override def updateOrganization(organization: Organization, organizationPatch: OrganizationPatch): Try[Option[Organization]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => val email = organizationPatch.email.getOrElse(organization.email) val notificationUrl = organizationPatch.notificationUrl.getOrElse(organization.notificationUrl) organizationsTable.filter(o => o.deletedAt.isEmpty && o.id === organization.id.get.toLong). map(a => (a.email, a.notificationUrl, a.updatedAt, a.updatedByGuid)). update(email, notificationUrl, timestamp, uuid) match { case 1 => Some(Organization(organization.id, organization.name, email, notificationUrl, organization.tokens)) case 0 => None case _ => sys.error(s"Unable to update Organization with id ${organization.id.get}") } } } recover { case e => sys.error(s"Unable to update an organization ${e.getMessage}") } } /** * * @param organization the organization to update * @param cluster the new cluster for this organization * @return the updated organization */ override def updateOrganizationCluster(organization: Organization, cluster: Option[String]): Try[Option[Organization]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => organizationsTable.filter(o => o.deletedAt.isEmpty && o.id === organization.id.get.toLong). map(a => (a.cluster, a.updatedAt, a.updatedByGuid)). update(cluster, timestamp, uuid) match { case 1 => Some(Organization(organization.id, organization.name, organization.email, organization.notificationUrl, organization.tokens, cluster)) case 0 => None case _ => sys.error(s"Unable to update Organization with id ${organization.id.get}") } } } recover { case e => sys.error(s"Unable to update an organization ${e.getMessage}") } } /** * * @param alert the alert configuration to be updated * @param alertPatch the new alert configuration * @return the updated alert */ override def updateAlert(alert: Alert, alertPatch: AlertPatch): Try[Option[Alert]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp val description = alertPatch.description.getOrElse(alert.description) val enabled = alertPatch.enabled.getOrElse(alert.enabled) val period = alertPatch.period.getOrElse(alert.period) val handbookUrl = alertPatch.handbookUrl.orElse(alert.handbookUrl) val routingMap = alertPatch.routing.orElse(alert.routing) val routing = Alert.routingAsStr(routingMap) db.withTransaction { implicit session => val alertToUpdate = alertsTable.filter(a => a.deletedAt.isEmpty && a.id === alert.id.get.toLong).map(a => (a.description, a.status, a.period, a.handbookUrl, a.routing, a.updatedAt, a.updatedByGuid)) alertToUpdate.update(description, Some(enabled), period, handbookUrl, routing, timestamp, uuid) match { case 1 => Some(Alert(alert.id, description, enabled, period, alert.condition, handbookUrl, routingMap)) case 0 => None case _ => sys.error(s"Unable to update Alert with id=" + alert.id.getOrElse("ALERT_ID_UNDEFINED")) } } } recover { case e => sys.error(s"Unable to update an alert: ${e.getMessage}") } } /** * Add a token to an existing organization * * @param organization the organization to modify * @param token the token to add * @return the new token object */ override def addOrganizationToken(organization: Organization, token: Token): Try[Token] = { addToken(organization, None, token) } private def addToken(organization: Organization, teamId: Option[Long], token: Token): Try[Token] = { val (uuid, timestamp) = createUuidAndTimestamp Try { db.withTransaction { implicit session => val newTokenId = (tokensTable returning tokensTable.map(_.id)) += TokensRow(1, organization.id.get.toLong, teamId, token.description, token.value, uuid, timestamp, uuid, timestamp, None, None) Token(Some(newTokenId.toString), token.description, token.value, new DateTime(timestamp)) } } recover { case e => sys.error(s"Unable to add a new token to organization: ${e.getMessage}") } } /** * Add a token to an existing team * * @param organization the organization to modify * @param team the team to modify * @param token the token to add * @return the new token object */ override def addTeamToken(organization: Organization, team: Team, token: Token): Try[Token] = { addToken(organization, Some(team.id.get.toLong), token) } /** * Add an alert configuration for the given organization * * @param organization the organization for this alert * @param alert the alert configuration * @return the created alert */ override def createOrganizationAlert(organization: Organization, alert: Alert, queries: Set[String]): Try[Option[Alert]] = { createAlert(organization, alert, None, queries) } private def createAlert(organization: Organization, alert: Alert, teamId: Option[Long], queries: Set[String]): Try[Option[Alert]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp val orgId: Long = organization.id.get.toLong db.withTransaction { implicit session => val alertId = (alertsTable returning alertsTable.map(_.id)) += AlertsRow(1, orgId, teamId, alert.description, Some(alert.enabled), alert.condition, alert.period, alert.handbookUrl, alert.routingStr, uuid, timestamp, uuid, timestamp, None, None) queries.filter(queryName => { queriesTable.filter(_.name === queryName).list.isEmpty }).map((queriesTable returning queriesTable.map(_.id)) += QueriesRow(1, _, uuid, timestamp, uuid, timestamp, None, None)) queries.foreach(queryName => { queriesTable.filter(_.name === queryName).list.map(query => { alert2queriesTable += AlertQueriesRow(1, alertId, query.id, uuid, timestamp, uuid, timestamp, None, None) }) }) if (alertId >= 0) { Some(Alert(Some(alertId.toString), alert.description, alert.enabled, alert.period, alert.condition, alert.handbookUrl, alert.routing)) } else None } } recover { case e => sys.error(s"Unable to create an alert for organization: ${e.getMessage}") } } private def createUuidAndTimestamp: (UUID, Timestamp) = { val uuid = UUID.randomUUID() // FIXME: where to get uuid from? val timestamp = new Timestamp(System.currentTimeMillis()) (uuid, timestamp) } /** * Find all teams for specified organization * * @param organization the organization to lookup * @return the Team object, if found */ override def getTeams(organization: Organization): Try[Seq[Team]] = { Try { db.withTransaction { implicit session => val result = for { token <- tokensTable.sortBy(_.createdAt).filter(_.deletedAt.isEmpty) team <- token.teamsFk.filter(_.deletedAt.isEmpty) o <- team.organizationsFk.filter(org => org.deletedAt.isEmpty && org.id === organization.id.get.toLong) } yield (token, team) result.list.groupBy(_._2.id).map(team => teamSerializer.fromPostgresRecord(team._2).get).toList } } recover { case e => sys.error(s"Unable to fetch Teams from DB ${e.getMessage}") } } /** * Delete team with given name for specified organization * * @param organization the organization to lookup * @param teamName a team name to delete * @return true if deleted, false if it doesn't exist */ override def deleteTeam(organization: Organization, teamName: String): Try[Boolean] = { Try { getTeam(organization, teamName) match { case Success(Some(team)) => deleteTeamAlerts(team) db.withTransaction { implicit session => val (uuid, timestamp) = createUuidAndTimestamp teamsTable.filter(t => t.deletedAt.isEmpty && t.id === team.id.get.toLong).map(t => (t.deletedByGuid, t.deletedAt)).update(Some(uuid), Some(timestamp)) == 1 } case Success(None) => false case _ => sys.error(s"Unable to find a team with name=$teamName") } } } private def deleteTeamAlerts(team: Team) = { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => alertsTable.filter(a => a.deletedAt.isEmpty && a.id === team.id.get.toLong).map(t => (t.deletedByGuid, t.deletedAt)).update(Some(uuid), Some(timestamp)) } } /** * Find team with given name for specified organization * * @param organization the organization to lookup * @param teamName a team name to find * @return the Team object, if found */ override def getTeam(organization: Organization, teamName: String): Try[Option[Team]] = { Try { db.withTransaction { implicit session => val result = for { token <- tokensTable.sortBy(_.createdAt).filter(_.deletedAt.isEmpty) team <- token.teamsFk.filter(teamRow => teamRow.deletedAt.isEmpty && teamRow.name === teamName) o <- team.organizationsFk.filter(orgRow => orgRow.deletedAt.isEmpty && orgRow.id === organization.id.get.toLong) } yield (token, team) teamSerializer.fromPostgresRecord(result.list) } } recover { case e => sys.error(s"Unable to fetch a team from DB: ${e.getMessage}") } } /** * Update cluster for existing team in given organization * * @param organization the organization for this team * @param team the team for which we update the cluster * @param cluster the new cluster * @return the updated team */ override def updateTeamCluster(organization: Organization, team: Team, cluster: Option[String]): Try[Option[Team]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => teamsTable.filter(o => o.deletedAt.isEmpty && o.id === team.id.get.toLong && o.organizationId === organization.id.get.toLong). map(a => (a.cluster, a.updatedAt, a.updatedByGuid)). update(cluster, timestamp, uuid) match { case 1 => Some(Team(team.id, team.name, team.tokens, cluster)) case 0 => None case _ => sys.error(s"Unable to update Team with id ${team.id.get}") } } } recover { case e => sys.error(s"Unable to update a team: ${e.getMessage}") } } /** * Delete alert with given ID for given organization and team * * @param alertId the identifier of the alert * @return true, if found; None if not found; Failure if error */ override def deleteAlert(alertId: String): Try[Boolean] = { Try { db.withTransaction { implicit session => val toSoftDelete = alertsTable.filter(a => a.deletedAt.isEmpty && a.id === alertId.toLong).map(a => (a.deletedByGuid, a.deletedAt)) val (uuid, timestamp) = createUuidAndTimestamp toSoftDelete.update(Some(uuid), Some(timestamp)) match { case 1 => true case 0 => false case _ => sys.error(s"Unable to delete Alert with id $alertId") } } } recover { case e => false } } /** * Delete an organization from the table with the given name. * * @param name the name of the organization to delete * @return true if successful, false if not found */ override def deleteOrganization(name: String): Try[Boolean] = { Try { db.withTransaction { implicit session => val result = for { t <- teamsTable.filter(_.deletedAt.isEmpty) o <- t.organizationsFk.filter(o => o.deletedAt.isEmpty && o.name === name) } yield o if (result.list.isEmpty) { val (uuid, timestamp) = createUuidAndTimestamp organizationsTable.filter(_.name === name).map(o => (o.deletedByGuid, o.deletedAt)).update(Some(uuid), Some(timestamp)) > 0 } else sys.error(s"Unable to delete Organization $name. Delete the organization teams first.") } } recover { case e => sys.error(s"Unable to fetch an organization from DB ${e.getMessage}") } } /** * Add an alert configuration for the given organization and team * * @param organization the organization for this alert * @param team the team for this alert * @param alert the alert configuration * @return the created alert */ override def createTeamAlert(organization: Organization, team: Team, alert: Alert, queries: Set[String]): Try[Option[Alert]] = { createAlert(organization, alert, Some(team.id.get.toLong), queries) } /** * Retrieve configured alerts for given organization and team * * @param organization the organization whose alerts to fetch * @param team the team whose alerts to fetch * @param limit an optional limit for number of items to fetch * @param offset offset for pagination * @return list of alerts, and (optional) continuation key */ override def getTeamAlerts(organization: Organization, team: Team, limit: Int, offset: Int): Try[List[Alert]] = { def queryAlerts: Query[Tables.AlertsTable, Tables.AlertsRow, Seq] = { alertsTable.filter(a => a.deletedAt.isEmpty && a.organizationId === organization.id.get.toLong && a.teamId === team.id.get.toLong).sortBy(_.id) } getAlerts(limit, offset, queryAlerts) } /** * Create team with given data for specified organization * * @param organization the organization to lookup * @param team a team object to store * @return the resulting Team object */ override def createTeam(organization: Organization, team: Team): Try[Option[Team]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp val orgId: Long = organization.id.get.toLong db.withTransaction { implicit session => val newTeamId = (teamsTable returning teamsTable.map(_.id)) += TeamsRow(1, orgId, team.name, None, uuid, timestamp, uuid, timestamp, None, None) val tokens = team.tokens.map(_.map { token => val tokenId = (tokensTable returning tokensTable.map(_.id)) += TokensRow(1, orgId, Some(newTeamId), token.description, token.value, uuid, timestamp, uuid, timestamp, None, None) Token(Some(tokenId.toString), token.description, token.value, DateTime.now()) }) Some(Team(Some(newTeamId.toString), team.name, tokens)) } } recover { case e: SQLException => None case e: Exception => sys.error(s"Error while saving 'team' to DB ${e.getMessage}") } } /** * Retrieve configured alerts for given organization * * @param organization the organization whose alerts to fetch * @param limit an optional limit for number of items to fetch * @param offset offset for pagination * @return list of alerts, and (optional) continuation key */ override def getOrganizationAlerts(organization: Organization, limit: Int, offset: Int): Try[List[Alert]] = { def queryAlerts: Query[Tables.AlertsTable, Tables.AlertsRow, Seq] = { alertsTable.filter(a => a.deletedAt.isEmpty && a.organizationId === organization.id.get.toLong && a.teamId.isEmpty).sortBy(_.id) } getAlerts(limit, offset, queryAlerts) } private def getAlerts(limitRaw: Int, offset: Int, queryAlerts: PostgresDriver.simple.Query[Tables.AlertsTable, Tables.AlertsRow, Seq]): Try[List[Alert]] = { Try { val limit = Math.min(limitRaw, DEFAULT_MAX_PAGINATION_LIMIT) db.withTransaction { implicit session => queryAlerts.drop(offset).take(limit).list.map(r => alertSerializer.fromPostgresRecord(r)).toList } } recover { case e => sys.error(s"Unable to fetch Alerts from DB: ${e.getMessage}") } } /** * Create a new organization. * * Conditional on an organization not already existing with this name. * * @param user the organization admin * @param organization the organization to create * @return the organization that was created */ override def createOrganization(user: User, organization: Organization): Try[Option[Organization]] = { Try { val (uuid, timestamp) = createUuidAndTimestamp db.withTransaction { implicit session => val newOrganizationId = (organizationsTable returning organizationsTable.map(_.id)) += OrganizationsRow(1, organization.name, organization.email, organization.notificationUrl, None, uuid, timestamp, uuid, timestamp, None, None) organizationUsersTable += OrganizationUsersRow(1, newOrganizationId, user.id.get, Role.Admin.value) val tokens = organization.tokens.map { list => list.map { token => val tokenId = (tokensTable returning tokensTable.map(_.id)) += TokensRow(1, newOrganizationId, None, token.description, token.value, uuid, timestamp, uuid, timestamp, None, None) Token(Some(tokenId.toString), token.description, token.value, DateTime.now()) } } Some(Organization(Some(newOrganizationId.toString), organization.name, organization.email, organization.notificationUrl, tokens)) } } recover { case e: SQLException => None case e => sys.error(s"Unable to create an organization in DB $e") } } /** * Check if the dataManager is healthy * * @return true if healthy, false otherwise */ override def isHealthy: Boolean = // TODO: implement me true /** * Fetch an organization from the table with the given name. * * @param name the name of the organization to find * @return the organization object */ override def getOrganization(name: String): Try[Option[Organization]] = { Try { db.withTransaction { implicit session => val result = for { t <- tokensTable.sortBy(_.createdAt).filter(t => t.deletedAt.isEmpty && t.teamId.isEmpty) o <- t.organizationsFk.filter(o => o.deletedAt.isEmpty && o.name === name) } yield (t, o) orgSerializer.fromPostgresRecord(result.list) } } recover { case e: PSQLException => sys.error(s"Unable to find an organization in DB ${e.getServerErrorMessage.getMessage}") case e => sys.error(s"Unable to find an organization in DB ${e.getMessage}") } } /** * * @param query continuous query name * @return true if CQ exists */ override def queryDoesNotExist(query: String): Boolean = { db.withTransaction { implicit session => queriesTable.filter(_.name === query).list.isEmpty } } override def createUser(user: User) (implicit ec: ExecutionContext): Future[Option[User]] = future { try { db.withTransaction { implicit session => val id = (usersTable returning usersTable.map(_.id)) += UsersRow(1, user.firstName, user.lastName, user.email, user.password, user.salt) Some(User(Some(id), user.firstName, user.lastName, user.email, user.password, user.salt)) } } catch { case e: Throwable => None } } override def updateUser(user: User, first: Option[String], last: Option[String], passwordInfo: Option[PasswordInfo]) (implicit ec: ExecutionContext): Future[Option[User]] = future { try { db.withTransaction { implicit session => val query = usersTable.filter(_.id === user.id) query.list match { case List() => sys.error(s"Unable to find user with ID ${user.id}.") case List(u) => val newFirstName = first.getOrElse(u.firstName) val newLastName = last.getOrElse(u.lastName) val newHash = passwordInfo.map(_.hash) getOrElse user.password val newSalt = passwordInfo.map(_.salt) getOrElse user.salt query.map(u => (u.firstName, u.lastName, u.password, u.salt)).update(newFirstName, newLastName, newHash, newSalt) match { case 1 => Some(User(Some(u.id), newFirstName, newLastName, u.email, newHash, newSalt)) case 0 => None case _ => None } case l => None } } } catch { case e: Throwable => None } } override def deleteUser(id: Long) (implicit ec: ExecutionContext): Future[Unit] = future { db.withTransaction { implicit session => val query = usersTable.filter(_.id === id) query.list match { case List() => sys.error(s"Unable to find user with ID $id.") case List(u) => organizationUsersTable.filter(_.userId === u.id).delete teamUsersTable.filter(_.userId === u.id).delete query.delete case l => sys.error(s"Expected to find only one user with ID $id, but found $l") } } } override def getUser(id: Long) (implicit ec: ExecutionContext): Future[Option[User]] = future { db.withTransaction { implicit session => usersTable.filter(_.id === id).list match { case List() => None case List(user) => Some(User(Some(user.id), user.firstName, user.lastName, user.email, user.password, user.salt)) case l => sys.error(s"Expected to find only one user with ID $id, but found $l") } } } override def getUserByEmail(email: String) (implicit ec: ExecutionContext): Future[Option[User]] = future { db.withTransaction { implicit session => usersTable.filter(_.email === email).list match { case List() => None case List(user) => Some(User(Some(user.id), user.firstName, user.lastName, user.email, user.password, user.salt)) case l => sys.error(s"Expected to find only one user with email $email, but found $l") } } } override def findUser(query: String) (implicit ec: ExecutionContext): Future[List[User]] = future { db.withTransaction { implicit session => val q = s"%$query%".toLowerCase usersTable.filter(u => u.firstName.toLowerCase.like(q) || u.lastName.toLowerCase.like(q) || u.email.toLowerCase.like(q)).list map { case user => User(Some(user.id), user.firstName, user.lastName, user.email, user.password, user.salt) } } } override def createConfirmationToken(confirmationToken: ConfirmationToken) (implicit ec: ExecutionContext): Future[Option[ConfirmationToken]] = future { try { db.withTransaction { implicit session => val id = (confirmationTokens returning confirmationTokens.map(_.id)) += ConfirmationTokensRow(1, confirmationToken.uuid, confirmationToken.email, new Timestamp(confirmationToken.creationTime.getMillis), new Timestamp(confirmationToken.expirationTime.getMillis), confirmationToken.isSignUp) Some(ConfirmationToken(Some(id), confirmationToken.uuid, confirmationToken.email, confirmationToken.creationTime, confirmationToken.expirationTime, confirmationToken.isSignUp)) } } catch { case e: SQLException => None case e: Throwable => sys.error(s"Unable to create a user in DB $e") } } override def deleteExpiredConfirmationTokens(expirationTime: DateTime) (implicit ec: ExecutionContext): Future[Unit] = future { try { val now = new Timestamp(expirationTime.getMillis) db.withTransaction { implicit session => confirmationTokens.filter(_.expirationTime < now).delete } } catch { case e: Throwable => sys.error(s"Unable to delete expired tokens: ${e.getMessage}") } } override def deleteConfirmationToken(uuid: String) (implicit ec: ExecutionContext): Future[Option[ConfirmationToken]] = future { try { db.withTransaction { implicit session => val tokenResults = confirmationTokens.filter(_.uuid === uuid) tokenResults.list match { case List() => None case List(t) => tokenResults.delete Some(ConfirmationToken(Some(t.id), t.uuid, t.email, new DateTime(t.creationTime.getTime), new DateTime(t.expirationTime.getTime), t.isSignUp)) case l => sys.error(s"Should have found only one token with uuid $uuid, but found $l") } } } catch { case e: Throwable => sys.error(s"Unable to delete token with uuid $uuid: ${e.getMessage}") } } override def getConfirmationTokenByUUID(uuid: String) (implicit ec: ExecutionContext): Future[Option[ConfirmationToken]] = future { try { db.withTransaction { implicit session => confirmationTokens.filter(t => t.uuid === uuid).list match { case List() => None case List(token) => Some( ConfirmationToken(Some(token.id), token.uuid, token.email, new DateTime(token.creationTime.getTime), new DateTime(token.expirationTime.getTime), token.isSignUp)) case l => sys.error(s"Should have found only one token with uuid $uuid, but found $l") } } } catch { case e: Throwable => sys.error(s"Unable to get token for uuid $uuid, ${e.getMessage}") } } override def getOrganizationsForUser(user: User) (implicit ec: ExecutionContext): Future[List[(String, Role)]] = user.id match { case None => Future.failed(new RuntimeException(s"User with ID ${user.id} does not exist.")) case Some(id) => future { try { db.withTransaction { implicit session => val organizations = for { orgUser <- organizationUsersTable.filter(_.userId === id) org <- orgUser.organizationsFk.filter(_.deletedAt.isEmpty) } yield (org.name, orgUser.role) val orgList = organizations.list map { case (name, role) => name -> Role(role) } val teams = for { teamUser <- teamUsersTable.filter(_.userId === id) team <- teamUser.teamsFk.filter(_.deletedAt.isEmpty) org <- team.organizationsFk.filter(_.deletedAt.isEmpty) } yield (org.name) val teamsList = teams.list.filterNot(orgList.toMap.contains(_)).toSet.toList orgList union teamsList.map { name => name -> Role.Team } } } catch { case e: Throwable => sys.error(s"Unable to fetch organizations for user $user: ${e.getMessage}") } } } override def getTeamsForUser(org: Organization, user: User) (implicit ec: ExecutionContext): Future[List[(String, Role)]] = (org.id, user.id) match { case (Some(orgId), Some(userId)) => future { try { db.withTransaction { implicit session => val teams = for { teamUser <- teamUsersTable.filter(_.userId === userId) team <- teamUser.teamsFk.filter(t => t.organizationId === orgId.toLong && t.deletedAt.isEmpty) } yield (team.name, teamUser.role) teams.list map { case (teamName, role) => teamName -> Role(role) } } } catch { case e: Throwable => sys.error(s"Unable to fetch organizations for user $user: ${e.getMessage}") } } case _ => Future.failed(new RuntimeException(s"Both organization ${org.id} and user ${user.id} must exist.")) } override def getUsersForOrganization(organization: Organization) (implicit ec: ExecutionContext): Future[List[(User, Role)]] = organization.id match { case None => Future.failed(new RuntimeException(s"Organization with id ${organization.id} does not exist.")) case Some(id) => future { try { db.withTransaction { implicit session => val users = for { orgUser <- organizationUsersTable.filter(_.organizationId === id.toLong) user <- orgUser.usersFk } yield (user.id, user.firstName, user.lastName, user.email, user.password, user.salt, orgUser.role) users.list map { case (userId, first, last, email, password, salt, role) => (User(Some(userId), first, last, email, password, salt), Role(role)) } } } catch { case e: Throwable => sys.error(s"Unable to fetch users for organization ${organization.name}.") } } } override def getUsersForTeam(team: Team) (implicit ec: ExecutionContext): Future[List[(User, Role)]] = team.id match { case None => Future.failed(new RuntimeException(s"Team with id ${team.id} does not exist.")) case Some(id) => future { try { db.withTransaction { implicit session => val users = for { teamUser <- teamUsersTable.filter(_.teamId === id.toLong) user <- teamUser.usersFk } yield (user.id, user.firstName, user.lastName, user.email, user.password, user.salt, teamUser.role) users.list map { case (userId, first, last, email, password, salt, role) => (User(Some(userId), first, last, email, password, salt), Role(role)) } } } } } override def addUserToOrganization(user: User, organization: Organization, role: Role) (implicit ec: ExecutionContext): Future[Boolean] = (user.id, organization.id) match { case (Some(userId), Some(orgId)) => future { try { db.withTransaction { implicit session => if (organizationUsersTable.filter(r => r.userId === userId && r.organizationId === orgId.toLong).list.isEmpty) { organizationUsersTable += OrganizationUsersRow(1, orgId.toLong, userId, role.value) true } else { false } } } catch { case e: Throwable => log.warn(s"Error while adding user $userId to organization $orgId: ${e.getMessage}") sys.error(s"Unable to add user $userId to organization $orgId.") } } case _ => Future.failed(new RuntimeException(s"Cannot add user to organization unless both exist.")) } override def addUserToTeam(user: User, team: Team, role: Role) (implicit ec: ExecutionContext): Future[Boolean] = (user.id, team.id) match { case (Some(userId), Some(teamId)) => future { try { db.withTransaction { implicit session => if (teamUsersTable.filter(r => r.userId === userId && r.teamId === teamId.toLong).list.isEmpty) { teamUsersTable += TeamUsersRow(1, teamId.toLong, userId, role.value) true } else false } } catch { case e: Throwable => sys.error(s"Unable to add user $userId to team $teamId.") } } case _ => Future.failed(new RuntimeException(s"Cannot add user to team unless both exist.")) } override def deleteUserFromOrganization(user: User, organization: Organization) (implicit ec: ExecutionContext): Future[Unit] = (user.id, organization.id) match { case (Some(userId), Some(orgId)) => future { try { db.withTransaction { implicit session => organizationUsersTable.filter(f => f.userId === userId && f.organizationId === orgId.toLong).delete } } catch { case e: Throwable => sys.error(s"Unable to delete user $userId from organization $orgId.") } } case _ => Future.failed(new RuntimeException(s"Cannot add user to organization unless both exist.")) } override def deleteUserFromTeam(user: User, team: Team) (implicit ec: ExecutionContext): Future[Unit] = (user.id, team.id) match { case (Some(userId), Some(teamId)) => future { try { db.withTransaction { implicit session => teamUsersTable.filter(f => f.userId === userId && f.teamId === teamId.toLong).delete } } catch { case e: Throwable => sys.error(s"Unable to delete user $userId from team $teamId.") } } case _ => Future.failed(new RuntimeException(s"Cannot add user to team unless both exist.")) } override def changeOrganizationRole(user: User, organization: Organization, role: Role) (implicit ec: ExecutionContext): Future[Unit] = (user.id, organization.id) match { case (Some(userId), Some(orgId)) => future { try { db.withTransaction { implicit session => organizationUsersTable.filter(ou => ou.organizationId === orgId.toLong && ou.userId === userId).map(_.role).update(role.value) match { case n if n !=1 => sys.error(s"Failed to update role of user $userId in organization $orgId.") case _ => } } } catch { case e: Throwable => sys.error(s"Unable to modify role of user $userId in organization $orgId.") } } case _ => Future.failed(new RuntimeException(s"Cannot modify role of user in organization unless both exist.")) } override def changeTeamRole(user: User, team: Team, role: Role) (implicit ec: ExecutionContext): Future[Unit] = (user.id, team.id) match { case (Some(userId), Some(teamId)) => future { try { db.withTransaction { implicit session => teamUsersTable.filter(tu => tu.teamId === teamId.toLong && tu.userId === userId).map(_.role).update(role.value) match { case n if n !=1 => sys.error(s"Failed to update role of user $userId in team $teamId.") case _ => } } } catch { case e: Throwable => sys.error(s"Unable to add user $userId to team $teamId.") } } case _ => Future.failed(new RuntimeException(s"Cannot add user to team unless both exist.")) } override def createSessionToken(userId: Long, creationTime: DateTime, expirationTime: DateTime) (implicit ec: ExecutionContext): Future[SessionToken] = future { db.withTransaction { implicit session => val token = UUID.randomUUID().toString val tokenId = (sessionTokensTable returning sessionTokensTable.map(_.id)) += SessionTokensRow(1, userId, token, new Timestamp(creationTime.getMillis), new Timestamp(expirationTime.getMillis)) SessionToken(Some(tokenId), userId, token, creationTime, expirationTime) } } override def findUserByToken(tokenString: String, expirationTime: DateTime) (implicit ec: ExecutionContext): Future[Option[User]] = future { db.withTransaction { implicit session => val now = new Timestamp(expirationTime.getMillis) val later = new Timestamp(new DateTime(now).plusHours(1).getMillis) val query = for { token <- sessionTokensTable.filter(t => t.token === tokenString && t.expirationTime > now) user <- token.usersFk } yield (token.id, user.id, user.firstName, user.lastName, user.email, user.password, user.salt) val users = query.list map { case (tokenId, id, first, last, email, password, salt) => sessionTokensTable.filter(t => t.id === tokenId).map(_.expirationTime).update(later) User(Some(id), first, last, email, password, salt) } users.headOption } } override def deleteSessionToken(id: Long)(implicit ec: ExecutionContext): Future[Unit] = future { db.withTransaction { implicit session => sessionTokensTable.filter(_.id === id).delete } } override def deleteExpiredSessionTokens(expirationTime: DateTime)(implicit ec: ExecutionContext): Future[Unit] = future { db.withTransaction { implicit session => val now = new Timestamp(expirationTime.getMillis) db.withTransaction { implicit session => sessionTokensTable.filter(_.expirationTime < now).delete } } } override def getStatus()(implicit ec: ExecutionContext): Future[CaveStatus] = future { db.withTransaction { implicit session => statusSerializer.fromPostgresRecord(statusTable.list) } } }
gilt/cave
core/src/main/scala/com/cave/metrics/data/postgresql/PostgresDataManagerImpl.scala
Scala
mit
39,816
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package scaps.api case class Module(organization: String, name: String, revision: String) { def moduleId = s"$organization:$name:$revision" def isSnapshot = revision.endsWith("SNAPSHOT") } object Module { val Unknown = Module("unknown", "unknown", "0.1.0-SNAPSHOT") }
scala-search/scaps
api/shared/src/main/scala/scaps/api/Module.scala
Scala
mpl-2.0
482
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.param import org.apache.spark.SparkFunSuite class ParamsSuite extends SparkFunSuite { test("json encode/decode") { val dummy = new Params { override def copy(extra: ParamMap): Params = defaultCopy(extra) override val uid: String = "dummy" } { // BooleanParam val param = new BooleanParam(dummy, "name", "doc") for (value <- Seq(true, false)) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } { // IntParam val param = new IntParam(dummy, "name", "doc") for (value <- Seq(Int.MinValue, -1, 0, 1, Int.MaxValue)) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } { // LongParam val param = new LongParam(dummy, "name", "doc") for (value <- Seq(Long.MinValue, -1L, 0L, 1L, Long.MaxValue)) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } { // FloatParam val param = new FloatParam(dummy, "name", "doc") for (value <- Seq(Float.NaN, Float.NegativeInfinity, Float.MinValue, -1.0f, -0.5f, 0.0f, Float.MinPositiveValue, 0.5f, 1.0f, Float.MaxValue, Float.PositiveInfinity)) { val json = param.jsonEncode(value) val decoded = param.jsonDecode(json) if (value.isNaN) { assert(decoded.isNaN) } else { assert(decoded === value) } } } { // DoubleParam val param = new DoubleParam(dummy, "name", "doc") for (value <- Seq(Double.NaN, Double.NegativeInfinity, Double.MinValue, -1.0, -0.5, 0.0, Double.MinPositiveValue, 0.5, 1.0, Double.MaxValue, Double.PositiveInfinity)) { val json = param.jsonEncode(value) val decoded = param.jsonDecode(json) if (value.isNaN) { assert(decoded.isNaN) } else { assert(decoded === value) } } } { // StringParam val param = new Param[String](dummy, "name", "doc") // Currently we do not support null. for (value <- Seq("", "1", "abc", "quote\\"", "newline\\n")) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } { // IntArrayParam val param = new IntArrayParam(dummy, "name", "doc") val values: Seq[Array[Int]] = Seq( Array(), Array(1), Array(Int.MinValue, 0, Int.MaxValue)) for (value <- values) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } { // DoubleArrayParam val param = new DoubleArrayParam(dummy, "name", "doc") val values: Seq[Array[Double]] = Seq( Array(), Array(1.0), Array(Double.NaN, Double.NegativeInfinity, Double.MinValue, -1.0, 0.0, Double.MinPositiveValue, 1.0, Double.MaxValue, Double.PositiveInfinity)) for (value <- values) { val json = param.jsonEncode(value) val decoded = param.jsonDecode(json) assert(decoded.length === value.length) decoded.zip(value).foreach { case (actual, expected) => if (expected.isNaN) { assert(actual.isNaN) } else { assert(actual === expected) } } } } { // StringArrayParam val param = new StringArrayParam(dummy, "name", "doc") val values: Seq[Array[String]] = Seq( Array(), Array(""), Array("", "1", "abc", "quote\\"", "newline\\n")) for (value <- values) { val json = param.jsonEncode(value) assert(param.jsonDecode(json) === value) } } } test("param") { val solver = new TestParams() val uid = solver.uid import solver.{maxIter, inputCol} assert(maxIter.name === "maxIter") assert(maxIter.doc === "maximum number of iterations (>= 0)") assert(maxIter.parent === uid) assert(maxIter.toString === s"${uid}__maxIter") assert(!maxIter.isValid(-1)) assert(maxIter.isValid(0)) assert(maxIter.isValid(1)) solver.setMaxIter(5) assert(solver.explainParam(maxIter) === "maxIter: maximum number of iterations (>= 0) (default: 10, current: 5)") assert(inputCol.toString === s"${uid}__inputCol") intercept[java.util.NoSuchElementException] { solver.getOrDefault(solver.handleInvalid) } intercept[IllegalArgumentException] { solver.setMaxIter(-1) } } test("param pair") { val solver = new TestParams() import solver.maxIter val pair0 = maxIter -> 5 val pair1 = maxIter.w(5) val pair2 = ParamPair(maxIter, 5) for (pair <- Seq(pair0, pair1, pair2)) { assert(pair.param.eq(maxIter)) assert(pair.value === 5) } intercept[IllegalArgumentException] { val pair = maxIter -> -1 } } test("param map") { val solver = new TestParams() import solver.{maxIter, inputCol} val map0 = ParamMap.empty assert(!map0.contains(maxIter)) map0.put(maxIter, 10) assert(map0.contains(maxIter)) assert(map0(maxIter) === 10) intercept[IllegalArgumentException] { map0.put(maxIter, -1) } assert(!map0.contains(inputCol)) intercept[NoSuchElementException] { map0(inputCol) } map0.put(inputCol -> "input") assert(map0.contains(inputCol)) assert(map0(inputCol) === "input") val map1 = map0.copy val map2 = ParamMap(maxIter -> 10, inputCol -> "input") val map3 = new ParamMap() .put(maxIter, 10) .put(inputCol, "input") val map4 = ParamMap.empty ++ map0 val map5 = ParamMap.empty map5 ++= map0 for (m <- Seq(map1, map2, map3, map4, map5)) { assert(m.contains(maxIter)) assert(m(maxIter) === 10) assert(m.contains(inputCol)) assert(m(inputCol) === "input") } } test("params") { val solver = new TestParams() import solver.{handleInvalid, maxIter, inputCol} val params = solver.params assert(params.length === 3) assert(params(0).eq(handleInvalid), "params must be ordered by name") assert(params(1).eq(inputCol), "params must be ordered by name") assert(params(2).eq(maxIter)) assert(!solver.isSet(maxIter)) assert(solver.isDefined(maxIter)) assert(solver.getMaxIter === 10) solver.setMaxIter(100) assert(solver.isSet(maxIter)) assert(solver.getMaxIter === 100) assert(!solver.isSet(inputCol)) assert(!solver.isDefined(inputCol)) intercept[NoSuchElementException](solver.getInputCol) assert(solver.explainParam(maxIter) === "maxIter: maximum number of iterations (>= 0) (default: 10, current: 100)") assert(solver.explainParams() === Seq(handleInvalid, inputCol, maxIter).map(solver.explainParam).mkString("\\n")) assert(solver.getParam("inputCol").eq(inputCol)) assert(solver.getParam("maxIter").eq(maxIter)) assert(solver.hasParam("inputCol")) assert(!solver.hasParam("abc")) intercept[NoSuchElementException] { solver.getParam("abc") } intercept[IllegalArgumentException] { solver.validateParams() } solver.copy(ParamMap(inputCol -> "input")).validateParams() solver.setInputCol("input") assert(solver.isSet(inputCol)) assert(solver.isDefined(inputCol)) assert(solver.getInputCol === "input") solver.validateParams() intercept[IllegalArgumentException] { ParamMap(maxIter -> -10) } intercept[IllegalArgumentException] { solver.setMaxIter(-10) } solver.clearMaxIter() assert(!solver.isSet(maxIter)) // Re-set and clear maxIter using the generic clear API solver.setMaxIter(10) solver.clear(maxIter) assert(!solver.isSet(maxIter)) val copied = solver.copy(ParamMap(solver.maxIter -> 50)) assert(copied.uid === solver.uid) assert(copied.getInputCol === solver.getInputCol) assert(copied.getMaxIter === 50) } test("ParamValidate") { val alwaysTrue = ParamValidators.alwaysTrue[Int] assert(alwaysTrue(1)) val gt1Int = ParamValidators.gt[Int](1) assert(!gt1Int(1) && gt1Int(2)) val gt1Double = ParamValidators.gt[Double](1) assert(!gt1Double(1.0) && gt1Double(1.1)) val gtEq1Int = ParamValidators.gtEq[Int](1) assert(!gtEq1Int(0) && gtEq1Int(1)) val gtEq1Double = ParamValidators.gtEq[Double](1) assert(!gtEq1Double(0.9) && gtEq1Double(1.0)) val lt1Int = ParamValidators.lt[Int](1) assert(lt1Int(0) && !lt1Int(1)) val lt1Double = ParamValidators.lt[Double](1) assert(lt1Double(0.9) && !lt1Double(1.0)) val ltEq1Int = ParamValidators.ltEq[Int](1) assert(ltEq1Int(1) && !ltEq1Int(2)) val ltEq1Double = ParamValidators.ltEq[Double](1) assert(ltEq1Double(1.0) && !ltEq1Double(1.1)) val inRange02IntInclusive = ParamValidators.inRange[Int](0, 2) assert(inRange02IntInclusive(0) && inRange02IntInclusive(1) && inRange02IntInclusive(2) && !inRange02IntInclusive(-1) && !inRange02IntInclusive(3)) val inRange02IntExclusive = ParamValidators.inRange[Int](0, 2, lowerInclusive = false, upperInclusive = false) assert(!inRange02IntExclusive(0) && inRange02IntExclusive(1) && !inRange02IntExclusive(2)) val inRange02DoubleInclusive = ParamValidators.inRange[Double](0, 2) assert(inRange02DoubleInclusive(0) && inRange02DoubleInclusive(1) && inRange02DoubleInclusive(2) && !inRange02DoubleInclusive(-0.1) && !inRange02DoubleInclusive(2.1)) val inRange02DoubleExclusive = ParamValidators.inRange[Double](0, 2, lowerInclusive = false, upperInclusive = false) assert(!inRange02DoubleExclusive(0) && inRange02DoubleExclusive(1) && !inRange02DoubleExclusive(2)) val inArray = ParamValidators.inArray[Int](Array(1, 2)) assert(inArray(1) && inArray(2) && !inArray(0)) val arrayLengthGt = ParamValidators.arrayLengthGt[Int](2.0) assert(arrayLengthGt(Array(0, 1, 2)) && !arrayLengthGt(Array(0, 1))) } test("Params.copyValues") { val t = new TestParams() val t2 = t.copy(ParamMap.empty) assert(!t2.isSet(t2.maxIter)) val t3 = t.copy(ParamMap(t.maxIter -> 20)) assert(t3.isSet(t3.maxIter)) } } object ParamsSuite extends SparkFunSuite { /** * Checks common requirements for [[Params.params]]: * - params are ordered by names * - param parent has the same UID as the object's UID * - param name is the same as the param method name * - obj.copy should return the same type as the obj */ def checkParams(obj: Params): Unit = { val clazz = obj.getClass val params = obj.params val paramNames = params.map(_.name) require(paramNames === paramNames.sorted, "params must be ordered by names") params.foreach { p => assert(p.parent === obj.uid) assert(obj.getParam(p.name) === p) // TODO: Check that setters return self, which needs special handling for generic types. } val copyMethod = clazz.getMethod("copy", classOf[ParamMap]) val copyReturnType = copyMethod.getReturnType require(copyReturnType === obj.getClass, s"${clazz.getName}.copy should return ${clazz.getName} instead of ${copyReturnType.getName}.") } }
pronix/spark
mllib/src/test/scala/org/apache/spark/ml/param/ParamsSuite.scala
Scala
apache-2.0
12,035
package mesosphere.marathon package core.task.update.impl import java.time.Clock import javax.inject.Inject import akka.event.EventStream import com.google.inject.name.Names import com.typesafe.scalalogging.StrictLogging import mesosphere.marathon.core.condition.Condition import mesosphere.marathon.core.event.UnknownInstanceTerminated import mesosphere.marathon.core.instance.Instance import mesosphere.marathon.core.instance.update.InstanceUpdateOperation import mesosphere.marathon.core.task.termination.{ KillReason, KillService } import mesosphere.marathon.core.task.tracker.{ InstanceTracker, TaskStateOpProcessor } import mesosphere.marathon.core.task.update.TaskStatusUpdateProcessor import mesosphere.marathon.core.task.{ Task, TaskCondition } import mesosphere.marathon.metrics.{ Metrics, ServiceMetric, Timer } import org.apache.mesos.{ Protos => MesosProtos } import scala.concurrent.Future /** * Executes the given TaskStatusUpdateSteps for every update. */ class TaskStatusUpdateProcessorImpl @Inject() ( clock: Clock, instanceTracker: InstanceTracker, stateOpProcessor: TaskStateOpProcessor, driverHolder: MarathonSchedulerDriverHolder, killService: KillService, eventStream: EventStream) extends TaskStatusUpdateProcessor with StrictLogging { import mesosphere.marathon.core.async.ExecutionContexts.global private[this] val publishTimer: Timer = Metrics.timer(ServiceMetric, getClass, "publishFuture") private[this] val killUnknownTaskTimer: Timer = Metrics.timer(ServiceMetric, getClass, "killUnknownTask") logger.info("Started status update processor") override def publish(status: MesosProtos.TaskStatus): Future[Unit] = publishTimer { logger.debug(s"Received status update\n${status}") import TaskStatusUpdateProcessorImpl._ // TODO: should be Timestamp.fromTaskStatus(status), but this breaks unit tests as there are invalid stubs val now = clock.now() val taskId = Task.Id(status.getTaskId) val taskCondition = TaskCondition(status) instanceTracker.instance(taskId.instanceId).flatMap { case Some(instance) => // TODO(PODS): we might as well pass the taskCondition here val op = InstanceUpdateOperation.MesosUpdate(instance, status, now) stateOpProcessor.process(op).flatMap(_ => acknowledge(status)) case None if terminalUnknown(taskCondition) => logger.warn(s"Received terminal status update for unknown ${taskId}") eventStream.publish(UnknownInstanceTerminated(taskId.instanceId, taskId.runSpecId, taskCondition)) acknowledge(status) case None if killWhenUnknown(taskCondition) => killUnknownTaskTimer { logger.warn(s"Kill unknown ${taskId}") killService.killUnknownTask(taskId, KillReason.Unknown) acknowledge(status) } case maybeTask: Option[Instance] => val taskStr = taskKnownOrNotStr(maybeTask) logger.info(s"Ignoring ${status.getState} update for $taskStr $taskId") acknowledge(status) } } private[this] def acknowledge(status: MesosProtos.TaskStatus): Future[Unit] = { driverHolder.driver.foreach{ driver => logger.info(s"Acknowledge status update for task ${status.getTaskId.getValue}: ${status.getState} (${status.getMessage})") driver.acknowledgeStatusUpdate(status) } Future.successful(()) } } object TaskStatusUpdateProcessorImpl { lazy val name = Names.named(getClass.getSimpleName) /** Matches all states that are considered terminal for an unknown task */ def terminalUnknown(condition: Condition): Boolean = condition match { case t: Condition.Terminal => true case Condition.Unreachable => true case _ => false } // TODO(PODS): align this with similar extractors/functions private[this] val ignoreWhenUnknown = Set[Condition]( Condition.Killed, Condition.Killing, Condition.Error, Condition.Failed, Condition.Finished, Condition.Unreachable, Condition.Gone, Condition.Dropped, Condition.Unknown ) // It doesn't make sense to kill an unknown task if it is in a terminal or killing state // We'd only get another update for the same task private def killWhenUnknown(condition: Condition): Boolean = { !ignoreWhenUnknown.contains(condition) } private def taskKnownOrNotStr(maybeTask: Option[Instance]): String = if (maybeTask.isDefined) "known" else "unknown" }
Caerostris/marathon
src/main/scala/mesosphere/marathon/core/task/update/impl/TaskStatusUpdateProcessorImpl.scala
Scala
apache-2.0
4,438
package com.clemble.query import reactivemongo.api.{BSONSerializationPack} import reactivemongo.bson.{BSONDocumentReader, BSONInteger, BSONDocument} import reactivemongo.play.json.ImplicitBSONHandlers._ import com.clemble.query.model._ import play.api.libs.iteratee.Enumerator import play.api.libs.json.{JsObject} import reactivemongo.api.QueryOpts import reactivemongo.api.collections.GenericQueryBuilder import reactivemongo.api.collections.bson.BSONCollection import reactivemongo.bson._ import reactivemongo.play.iteratees.cursorProducer import scala.concurrent.{Future, ExecutionContext} class MongoBSONQueryFactory[T]( collection: BSONCollection, queryTranslator: QueryTranslator[BSONDocument, BSONDocument] = new MongoBSONQueryTranslator )(implicit format: BSONDocumentReader[T]) extends QueryFactory[T] { override def create(exp: Expression): QueryBuilder[T] = { new MongoBSONQueryBuilder[T](collection.find(queryTranslator.translate(exp))) } } private case class MongoBSONQueryBuilder[T]( var queryBuilder: GenericQueryBuilder[BSONSerializationPack.type] )(implicit val format: BSONDocumentReader[T]) extends QueryBuilder[T] { private var pagination: PaginationParams = PaginationParams.empty override def pagination(paginationParams: PaginationParams): QueryBuilder[T] = { this.pagination = paginationParams this } override def projection(projection: List[Projection]): QueryBuilder[T] = { if (projection.isEmpty) return this val projectionFields = projection.map({ case Include(field) => field -> BSONInteger(1) case Exclude(field) => field -> BSONInteger(0) }) val projectionQuery = BSONDocument(projectionFields) queryBuilder = queryBuilder.projection(projectionQuery) this } override def sorted(sorts: List[SortOrder]): QueryBuilder[T] = { val sortFields = sorts.map({ case Ascending(field) => field -> BSONInteger(1) case Descending(field) => field -> BSONInteger(-1) }) val sortQuery = BSONDocument(sortFields) queryBuilder = queryBuilder.sort(sortQuery) this } override def findOne()(implicit ec: ExecutionContext): Future[Option[T]] = { queryBuilder.one[T] } override def findOneWithProjection()(implicit ec: ExecutionContext): Future[Option[JsObject]] = { queryBuilder.one[JsObject].map(_.map(_ - "_id")) } override def find()(implicit ec: ExecutionContext): Enumerator[T] = { queryBuilder.options(QueryOpts(skipN = pagination.offset())).cursor[T]().enumerator(maxDocs = pagination.pageSize) } override def findWithProjection()(implicit ec: ExecutionContext): Enumerator[JsObject] = { queryBuilder.options(QueryOpts(skipN = pagination.offset())).cursor[JsObject]().enumerator(maxDocs = pagination.pageSize).map(_ - "_id") } }
clemble/scala-query-dsl
src/main/scala/com/clemble/query/MongoBSONQueryFactory.scala
Scala
apache-2.0
2,980
package akka.persistence.couchbase import org.apache.commons.codec.binary.Base64 /** * Wraps message bytes to easily support serialization. * * @param bytes of the message. */ case class Message(bytes: Array[Byte]) object Message { def deserialize(s: String): Message = apply(Base64.decodeBase64(s)) def serialize(message: Message): String = Base64.encodeBase64String(message.bytes) }
Product-Foundry/akka-persistence-couchbase
src/main/scala/akka/persistence/couchbase/Message.scala
Scala
apache-2.0
402
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // License: http://www.gnu.org/licenses/gpl-3.0.en.html // Copyright (c) 2011-2015 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors) // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package org.scalamock.scalatest import org.scalamock.MockFactoryBase import org.scalatest.exceptions.{ StackDepthException, TestFailedException } import org.scalatest.{ Failed, Outcome, TestSuite, SuiteMixin } trait AbstractMockFactory extends TestSuite with SuiteMixin with MockFactoryBase { type ExpectationException = TestFailedException abstract override def withFixture(test: NoArgTest): Outcome = { if (autoVerify) { withExpectations { val outcome = super.withFixture(test) outcome match { case Failed(throwable) => // MockFactoryBase does not know how to handle ScalaTest Outcome. // Throw error that caused test failure to prevent hiding it by // "unsatisfied expectation" exception (see issue #72) throw throwable case _ => outcome } } } else { super.withFixture(test) } } protected def newExpectationException(message: String, methodName: Option[Symbol]) = new TestFailedException({ (e: StackDepthException) => Some(message) }, None, failedCodeStackDepthFn(methodName)) protected var autoVerify = true }
espinhogr/ensime-server
testutil/src/main/scala/org/scalamock/scalatest/AbstractMockFactory.scala
Scala
gpl-3.0
2,476
package pl.touk.nussknacker.engine.management.streaming import pl.touk.nussknacker.engine.build.{ScenarioBuilder, GraphBuilder} import pl.touk.nussknacker.engine.graph.EspProcess import pl.touk.nussknacker.engine.graph.node.SubsequentNode import pl.touk.nussknacker.engine.spel object SampleProcess { import spel.Implicits._ def prepareProcess(id: String, parallelism: Option[Int] = None) : EspProcess = { val baseProcessBuilder = ScenarioBuilder.streaming(id) parallelism.map(baseProcessBuilder.parallelism).getOrElse(baseProcessBuilder) .source("startProcess", "kafka-transaction") .filter("nightFilter", "true", endWithMessage("endNight", "Odrzucenie noc")) .emptySink("endSend", "sendSms", "value" -> "'message'") } def kafkaProcess(id: String, topic: String) : EspProcess = { ScenarioBuilder .streaming(id) .source("startProcess", "real-kafka", "topic" -> s"'$topic'") .emptySink("end", "kafka-string", "topic" -> s"'output-$id'", "value" -> "#input") } private def endWithMessage(idSuffix: String, message: String): SubsequentNode = { GraphBuilder .buildVariable("message" + idSuffix, "output", "message" -> s"'$message'") .emptySink("end" + idSuffix, "monitor") } }
TouK/nussknacker
engine/flink/management/src/it/scala/pl/touk/nussknacker/engine/management/streaming/SampleProcess.scala
Scala
apache-2.0
1,258
package shopScala.promises import java.lang.Thread.sleep import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success, Try} import scala.concurrent.ExecutionContext.Implicits.global object PromiseSync extends App { def now() = new java.util.Date() def hello = { sleep(5000) "hello" } def helloSync: Future[String] = { println(now() + ": constructing the future ...") val p = Promise[String] p.complete(Try { hello }) println(now() + ": returning the future ...") p.future } val f: Future[String] = helloSync f onComplete { case Success(result) => println(now() + ": result = " + result) case Failure(e) => println(now() + ": exception = " + e); e.printStackTrace() } }
hermannhueck/reactive-mongo-access
src/main/scala/shopScala/promises/PromiseSync.scala
Scala
apache-2.0
756
package crochet import util.DynamicVariable import javax.servlet.http.{HttpSession, HttpServletResponse, HttpServletRequest} /** * This trait provides the basic dynamic environment for a web API * * @author Xavier Llora * @date Jan 10, 2010 at 2:38:45 PM * */ protected trait CrochetDynamicEnvironment { val version = "0.1.6vcli" // // Dynamic variables available to the method // protected val pathVal = new DynamicVariable[String](null) protected val requestVal = new DynamicVariable[HttpServletRequest](null) protected val responseVal = new DynamicVariable[HttpServletResponse](null) protected val sessionVal = new DynamicVariable[Option[HttpSession]](null) protected val headerVal = new DynamicVariable[Map[String,String]](null) protected val paramVal = new DynamicVariable[Map[String,String]](null) protected val paramMapVal = new DynamicVariable[Map[String,Array[String]]](null) protected val elementsVal = new DynamicVariable[List[String]](null) protected val messageVal = new DynamicVariable[Any](null) protected val errorVal = new DynamicVariable[Any](null) protected val errorSummaryVal = new DynamicVariable[String](null) // // Exposed names of the dynamic variables // def path = pathVal.value def request = requestVal.value def response = responseVal.value def session = sessionVal.value def header = headerVal.value def params = paramVal.value def paramsMap = paramMapVal.value def elements = elementsVal.value def message = messageVal.value def error = errorVal.value def errorSummary = errorSummaryVal.value // Auxiliar methods protected def extractSession(request:HttpServletRequest):Option[HttpSession] = { try { val s = request.getSession if (s==null) None else Some(s) } catch { case _ => None } } }
xllora/Crochet
src/main/scala/CrochetDynamicEnvironment.scala
Scala
bsd-3-clause
1,982
package io.scalajs.nodejs.os import scala.scalajs.js /** * User Information Object * @example {{{ {"uid":501,"gid":20,"username":"ldaniels","homedir":"/Users/ldaniels","shell":"/bin/bash"} }}} */ class UserInfoObject(val uid: Int, val gid: Int, val username: String, val homedir: String, val shell: String) extends js.Object
scalajs-io/nodejs
app/common/src/main/scala/io/scalajs/nodejs/os/UserInfoObject.scala
Scala
apache-2.0
339
/** * Copyright (c) 2002-2014, OnPoint Digital, Inc. All rights reserved * * THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * @author Alex Westphal 09/Jun/2014 * @version 09/Jun/2014 */ package timez.syntax.time import java.time.ZonedDateTime import java.time.temporal.TemporalField trait ZonedDateTimeOps extends Ops[ZonedDateTime] { def apply(field: TemporalField) = self.get(field) def dayOfMonth = self.getDayOfMonth def dayofWeek = self.getDayOfWeek def dayofYear = self.getDayOfYear def hour = self.getHour def minute = self.getMinute def month = self.getMonth def offset = self.getOffset def second = self.getSecond def year = self.getYear def zone = self.getZone } trait ZonedDateTimeSyntax { implicit def ToZonedDateTimeOps(dateTime: ZonedDateTime) = new ZonedDateTimeOps { override def self = dateTime } }
alexwestphal/timez
src/main/scala/timez/syntax/time/ZonedDateTimeSyntax.scala
Scala
bsd-3-clause
1,292
package services import java.security.MessageDigest import org.apache.commons.codec.binary.Hex /** * Created by unoedx on 17/05/16. */ object Hash { private def md = MessageDigest.getInstance("MD5"); private val HASH_SALT = "sad9834jkfad89gvv3qy1@#%agad4wvaguy8ab5sr46sr8eg435fb4s6e8r3g54btr68h3fd5g4hs8th6sd5fh4683hdh445yhw54w" def hashEmail(email: String): String = md5(HASH_SALT + md5(email)) private def md5(s:String) = Hex.encodeHexString(md.digest(s.getBytes)) }
waveinch/ses-transactional
app/services/Hash.scala
Scala
apache-2.0
487
package io.dmitryivanov.tcpfrontman import io.netty.channel._ class TcpProxyBackendHandler(inboundChannel: Channel) extends ChannelInboundHandlerAdapter { import TcpProxyFrontendHandler._ override def channelRead(ctx: ChannelHandlerContext, msg: scala.Any): Unit = { inboundChannel.writeAndFlush(msg).addListener(new ChannelFutureListener() { override def operationComplete(future: ChannelFuture) { if (future.isSuccess) { ctx.channel().read() } else { future.channel().close() } } }) } override def channelInactive(ctx: ChannelHandlerContext): Unit = closeOnFlush(inboundChannel) override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit = { cause.printStackTrace() closeOnFlush(ctx.channel()) } override def channelActive(ctx: ChannelHandlerContext): Unit = ctx.read() }
ajantis/tcp-frontman
src/main/scala/io/dmitryivanov/tcpfrontman/TcpProxyBackendHandler.scala
Scala
apache-2.0
889
package com.crobox.clickhouse.dsl sealed trait FromQuery extends Query with OperationalQuery { override val internalQuery: InternalQuery = InternalQuery(from = Some(this)) val alias: Option[String] val finalized: Boolean } sealed case class InnerFromQuery(innerQuery: OperationalQuery, alias: Option[String] = None) extends FromQuery { /** Queries can never have 'final' clause: Illegal FINAL */ override val finalized = false } sealed case class TableFromQuery[T <: Table](table: T, alias: Option[String] = None, finalized: Boolean = false) extends FromQuery
crobox/clickhouse-scala-client
dsl/src/main/scala/com.crobox.clickhouse/dsl/FromQuery.scala
Scala
lgpl-3.0
578
/* * Copyright 2015 LG CNS. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package scouter.server.netio.service.net; import java.net.ServerSocket import java.net.Socket import java.util.concurrent.ExecutorService import scouter.server.Configure import scouter.server.Logger import scouter.server.util.ThreadScala import scouter.util.FileUtil import scouter.util.ThreadUtil object TcpServer { val conf = Configure.getInstance(); val threadPool = ThreadUtil.createExecutor("ServiceServer", 30, 1000, 10000, true); ThreadScala.startDaemon("scouter.server.netio.service.net.TcpServer") { Logger.println("\ttcp_port=" + conf.tcp_port); Logger.println("\tcp_agent_so_timeout=" + conf.tcp_agent_so_timeout); Logger.println("\tcp_client_so_timeout=" + conf.tcp_client_so_timeout); var server: ServerSocket = null; try { server = new ServerSocket( conf.tcp_port); while (true) { val client = server.accept(); // TODO 주의하여 테스트해야 client.setSoTimeout(conf.tcp_client_so_timeout); client.setReuseAddress(true); try { threadPool.execute(new ServiceWorker(client)); } catch { case e: Throwable => e.printStackTrace(); } } } catch { case e: Throwable => Logger.println("S167", 1, "tcp port=" + conf.tcp_port, e); } finally { FileUtil.close(server); } } }
jw0201/scouter
scouter.server/src/scouter/server/netio/service/net/TcpServer.scala
Scala
apache-2.0
2,079
class Plugins(info: sbt.ProjectInfo) extends sbt.PluginDefinition(info) { val codasRepo = "codahale.com" at "http://repo.codahale.com/" val rsync = "com.codahale" % "rsync-sbt" % "0.1.1" }
jamesgolick/sbt-ruby
project/plugins/Plugins.scala
Scala
mit
193
package xyz.sethy.minigame.tasks import org.bukkit.entity.Player import org.bukkit.{Bukkit, ChatColor, GameMode, Location} import org.bukkit.scheduler.BukkitRunnable /** * Created by Seth on 29/04/2017. */ class PreGameTask extends BukkitRunnable { override def run(): Unit = { val online = Bukkit.getOnlinePlayers.size() if(online < 20) { new BukkitRunnable { val i = 10 override def run(): Unit = { if(i != 10) { Bukkit.broadcastMessage(ChatColor.translateAlternateColorCodes('&', "&7[Game] &eThe game is starting in &a" + i + "&e seconds.")) } val it = Bukkit.getOnlinePlayers.iterator() while(it.hasNext) { val player = it.next() player.setGameMode(GameMode.SURVIVAL) player.teleport(new Location(Bukkit.getWorld("world"), 0, 0, 0)) } } } } } }
McAllisterSoftware/Minigame
src/main/scala/xyz/sethy/minigame/tasks/PreGameTask.scala
Scala
agpl-3.0
982
package sodium abstract class Listener { def unlisten() /** * Combine listeners into one where a single unlisten() invocation will unlisten * both the inputs. */ final def append(two: Listener): Listener = { val one = this new Listener() { override def unlisten() { one.unlisten() two.unlisten() } } } }
kevintvh/sodium
scala/src/main/scala/sodium/Listener.scala
Scala
bsd-3-clause
365
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.connector import scala.collection.JavaConverters._ import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.connector.catalog.CatalogV2Util.withDefaultOwnership import org.apache.spark.sql.connector.catalog.Table import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ trait AlterTableTests extends SharedSparkSession { override def sparkConf: SparkConf = super.sparkConf .setAppName("test") .set("spark.sql.parquet.columnarReaderBatchSize", "4096") .set("spark.sql.sources.useV1SourceList", "avro") .set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin") .set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096") //.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager") .set("spark.memory.offHeap.enabled", "true") .set("spark.memory.offHeap.size", "50m") .set("spark.sql.join.preferSortMergeJoin", "false") .set("spark.sql.columnar.codegen.hashAggregate", "false") .set("spark.oap.sql.columnar.wholestagecodegen", "false") .set("spark.sql.columnar.window", "false") .set("spark.unsafe.exceptionOnMemoryLeak", "false") //.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/") .set("spark.sql.columnar.sort.broadcastJoin", "true") .set("spark.oap.sql.columnar.preferColumnar", "true") protected def getTableMetadata(tableName: String): Table protected val catalogAndNamespace: String protected val v2Format: String private def fullTableName(tableName: String): String = { if (catalogAndNamespace.isEmpty) { s"default.$tableName" } else { s"${catalogAndNamespace}table_name" } } test("AlterTable: table does not exist") { val t2 = s"${catalogAndNamespace}fake_table" withTable(t2) { sql(s"CREATE TABLE $t2 (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE ${catalogAndNamespace}table_name DROP COLUMN id") } assert(exc.getMessage.contains(s"${catalogAndNamespace}table_name")) assert(exc.getMessage.contains("Table not found")) } } test("AlterTable: change rejected by implementation") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[SparkException] { sql(s"ALTER TABLE $t DROP COLUMN id") } assert(exc.getMessage.contains("Unsupported table change")) assert(exc.getMessage.contains("Cannot drop all fields")) // from the implementation val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("id", IntegerType)) } } test("AlterTable: add top-level column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN data string") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("id", IntegerType).add("data", StringType)) } } test("AlterTable: add column with NOT NULL") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN data string NOT NULL") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === StructType(Seq( StructField("id", IntegerType), StructField("data", StringType, nullable = false)))) } } test("AlterTable: add column with comment") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN data string COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === StructType(Seq( StructField("id", IntegerType), StructField("data", StringType).withComment("doc")))) } } test("AlterTable: add column with interval type") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double>) USING $v2Format") val e1 = intercept[AnalysisException](sql(s"ALTER TABLE $t ADD COLUMN data interval")) assert(e1.getMessage.contains("Cannot use interval type in the table schema.")) val e2 = intercept[AnalysisException](sql(s"ALTER TABLE $t ADD COLUMN point.z interval")) assert(e2.getMessage.contains("Cannot use interval type in the table schema.")) } } test("AlterTable: add column with position") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (point struct<x: int>) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN a string FIRST") val tableName = fullTableName(t) assert(getTableMetadata(tableName).schema == new StructType() .add("a", StringType) .add("point", new StructType().add("x", IntegerType))) sql(s"ALTER TABLE $t ADD COLUMN b string AFTER point") assert(getTableMetadata(tableName).schema == new StructType() .add("a", StringType) .add("point", new StructType().add("x", IntegerType)) .add("b", StringType)) val e1 = intercept[AnalysisException]( sql(s"ALTER TABLE $t ADD COLUMN c string AFTER non_exist")) assert(e1.getMessage().contains("Couldn't find the reference column")) sql(s"ALTER TABLE $t ADD COLUMN point.y int FIRST") assert(getTableMetadata(tableName).schema == new StructType() .add("a", StringType) .add("point", new StructType() .add("y", IntegerType) .add("x", IntegerType)) .add("b", StringType)) sql(s"ALTER TABLE $t ADD COLUMN point.z int AFTER x") assert(getTableMetadata(tableName).schema == new StructType() .add("a", StringType) .add("point", new StructType() .add("y", IntegerType) .add("x", IntegerType) .add("z", IntegerType)) .add("b", StringType)) val e2 = intercept[AnalysisException]( sql(s"ALTER TABLE $t ADD COLUMN point.x2 int AFTER non_exist")) assert(e2.getMessage().contains("Couldn't find the reference column")) } } test("SPARK-30814: add column with position referencing new columns being added") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (a string, b int, point struct<x: double, y: double>) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMNS (x int AFTER a, y int AFTER x, z int AFTER y)") val tableName = fullTableName(t) assert(getTableMetadata(tableName).schema === new StructType() .add("a", StringType) .add("x", IntegerType) .add("y", IntegerType) .add("z", IntegerType) .add("b", IntegerType) .add("point", new StructType() .add("x", DoubleType) .add("y", DoubleType))) sql(s"ALTER TABLE $t ADD COLUMNS (point.z double AFTER x, point.zz double AFTER z)") assert(getTableMetadata(tableName).schema === new StructType() .add("a", StringType) .add("x", IntegerType) .add("y", IntegerType) .add("z", IntegerType) .add("b", IntegerType) .add("point", new StructType() .add("x", DoubleType) .add("z", DoubleType) .add("zz", DoubleType) .add("y", DoubleType))) // The new column being referenced should come before being referenced. val e = intercept[AnalysisException]( sql(s"ALTER TABLE $t ADD COLUMNS (yy int AFTER xx, xx int)")) assert(e.getMessage().contains("Couldn't find the reference column for AFTER xx at root")) } } test("AlterTable: add multiple columns") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMNS data string COMMENT 'doc', ts timestamp") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === StructType(Seq( StructField("id", IntegerType), StructField("data", StringType).withComment("doc"), StructField("ts", TimestampType)))) } } test("AlterTable: add nested column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double>) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN point.z double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType), StructField("z", DoubleType))))) } } test("AlterTable: add nested column to map key") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<struct<x: double, y: double>, bigint>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN points.key.z double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType), StructField("z", DoubleType))), LongType))) } } test("AlterTable: add nested column to map value") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<string, struct<x: double, y: double>>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN points.value.z double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StringType, StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType), StructField("z", DoubleType)))))) } } test("AlterTable: add nested column to array element") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: double, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN points.element.z double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType), StructField("z", DoubleType)))))) } } test("AlterTable: add complex column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN points array<struct<x: double, y: double>>") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType)))))) } } test("AlterTable: add nested column with comment") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: double, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t ADD COLUMN points.element.z double COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType), StructField("z", DoubleType).withComment("doc")))))) } } test("AlterTable: add nested column parent must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ADD COLUMN point.z double") } assert(exc.getMessage.contains("point")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: add column - new column should not exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql( s"""CREATE TABLE $t ( |id int, |point struct<x: double, y: double>, |arr array<struct<x: double, y: double>>, |mk map<struct<x: double, y: double>, string>, |mv map<string, struct<x: double, y: double>> |) |USING $v2Format""".stripMargin) Seq("id", "point.x", "arr.element.x", "mk.key.x", "mv.value.x").foreach { field => val e = intercept[AnalysisException] { sql(s"ALTER TABLE $t ADD COLUMNS $field double") } assert(e.getMessage.contains("add")) assert(e.getMessage.contains(s"$field already exists")) } } } test("AlterTable: update column type int -> long") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN id TYPE bigint") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("id", LongType)) } } test("AlterTable: update column type to interval") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val e = intercept[AnalysisException](sql(s"ALTER TABLE $t ALTER COLUMN id TYPE interval")) assert(e.getMessage.contains("id to interval type")) } } test("AlterTable: SET/DROP NOT NULL") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id bigint NOT NULL) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN id SET NOT NULL") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("id", LongType, nullable = false)) sql(s"ALTER TABLE $t ALTER COLUMN id DROP NOT NULL") val table2 = getTableMetadata(tableName) assert(table2.name === tableName) assert(table2.schema === new StructType().add("id", LongType)) val e = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN id SET NOT NULL") } assert(e.message.contains("Cannot change nullable column to non-nullable")) } } test("AlterTable: update nested type float -> double") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: float, y: double>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN point.x TYPE double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType))))) } } test("AlterTable: update column with struct type fails") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double>) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN point TYPE struct<x: double, y: double, z: double>") } assert(exc.getMessage.contains("point")) assert(exc.getMessage.contains("update a struct by updating its fields")) val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType))))) } } test("AlterTable: update column with array type fails") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<int>) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN points TYPE array<long>") } assert(exc.getMessage.contains("update the element by updating points.element")) val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(IntegerType))) } } test("AlterTable: update column array element type") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<int>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.element TYPE long") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(LongType))) } } test("AlterTable: update column with map type fails") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, m map<string, int>) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN m TYPE map<string, long>") } assert(exc.getMessage.contains("update a map by updating m.key or m.value")) val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("m", MapType(StringType, IntegerType))) } } test("AlterTable: update column map value type") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, m map<string, int>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN m.value TYPE long") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("m", MapType(StringType, LongType))) } } test("AlterTable: update nested type in map key") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<struct<x: float, y: double>, bigint>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.key.x TYPE double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType))), LongType))) } } test("AlterTable: update nested type in map value") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<string, struct<x: float, y: double>>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.value.x TYPE double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StringType, StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType)))))) } } test("AlterTable: update nested type in array") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: float, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.element.x TYPE double") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType)))))) } } test("AlterTable: update column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN data TYPE string") } assert(exc.getMessage.contains("data")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: nested update column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN point.x TYPE double") } assert(exc.getMessage.contains("point.x")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: update column type must be compatible") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN id TYPE boolean") } assert(exc.getMessage.contains("id")) assert(exc.getMessage.contains("int cannot be cast to boolean")) } } test("AlterTable: update column comment") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN id COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === StructType(Seq(StructField("id", IntegerType).withComment("doc")))) } } test("AlterTable: update column position") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (a int, b int, point struct<x: int, y: int, z: int>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN b FIRST") val tableName = fullTableName(t) assert(getTableMetadata(tableName).schema == new StructType() .add("b", IntegerType) .add("a", IntegerType) .add("point", new StructType() .add("x", IntegerType) .add("y", IntegerType) .add("z", IntegerType))) sql(s"ALTER TABLE $t ALTER COLUMN b AFTER point") assert(getTableMetadata(tableName).schema == new StructType() .add("a", IntegerType) .add("point", new StructType() .add("x", IntegerType) .add("y", IntegerType) .add("z", IntegerType)) .add("b", IntegerType)) val e1 = intercept[AnalysisException]( sql(s"ALTER TABLE $t ALTER COLUMN b AFTER non_exist")) assert(e1.getMessage.contains("Couldn't resolve positional argument")) sql(s"ALTER TABLE $t ALTER COLUMN point.y FIRST") assert(getTableMetadata(tableName).schema == new StructType() .add("a", IntegerType) .add("point", new StructType() .add("y", IntegerType) .add("x", IntegerType) .add("z", IntegerType)) .add("b", IntegerType)) sql(s"ALTER TABLE $t ALTER COLUMN point.y AFTER z") assert(getTableMetadata(tableName).schema == new StructType() .add("a", IntegerType) .add("point", new StructType() .add("x", IntegerType) .add("z", IntegerType) .add("y", IntegerType)) .add("b", IntegerType)) val e2 = intercept[AnalysisException]( sql(s"ALTER TABLE $t ALTER COLUMN point.y AFTER non_exist")) assert(e2.getMessage.contains("Couldn't resolve positional argument")) // `AlterTable.resolved` checks column existence. intercept[AnalysisException]( sql(s"ALTER TABLE $t ALTER COLUMN a.y AFTER x")) } } test("AlterTable: update nested column comment") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN point.y COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType).withComment("doc"))))) } } test("AlterTable: update nested column comment in map key") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<struct<x: double, y: double>, bigint>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.key.y COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType).withComment("doc"))), LongType))) } } test("AlterTable: update nested column comment in map value") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<string, struct<x: double, y: double>>) " + s"USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.value.y COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StringType, StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType).withComment("doc")))))) } } test("AlterTable: update nested column comment in array") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: double, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t ALTER COLUMN points.element.y COMMENT 'doc'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType).withComment("doc")))))) } } test("AlterTable: comment update column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN data COMMENT 'doc'") } assert(exc.getMessage.contains("data")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: nested comment update column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t ALTER COLUMN point.x COMMENT 'doc'") } assert(exc.getMessage.contains("point.x")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: rename column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t RENAME COLUMN id TO user_id") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("user_id", IntegerType)) } } test("AlterTable: rename nested column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double>) USING $v2Format") sql(s"ALTER TABLE $t RENAME COLUMN point.y TO t") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("t", DoubleType))))) } } test("AlterTable: rename nested column in map key") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point map<struct<x: double, y: double>, bigint>) " + s"USING $v2Format") sql(s"ALTER TABLE $t RENAME COLUMN point.key.y TO t") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", MapType(StructType(Seq( StructField("x", DoubleType), StructField("t", DoubleType))), LongType))) } } test("AlterTable: rename nested column in map value") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<string, struct<x: double, y: double>>) " + s"USING $v2Format") sql(s"ALTER TABLE $t RENAME COLUMN points.value.y TO t") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StringType, StructType(Seq( StructField("x", DoubleType), StructField("t", DoubleType)))))) } } test("AlterTable: rename nested column in array element") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: double, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t RENAME COLUMN points.element.y TO t") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType), StructField("t", DoubleType)))))) } } test("AlterTable: rename column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t RENAME COLUMN data TO some_string") } assert(exc.getMessage.contains("data")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: nested rename column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t RENAME COLUMN point.x TO z") } assert(exc.getMessage.contains("point.x")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: rename column - new name should not exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql( s"""CREATE TABLE $t ( |id int, |user_id int, |point struct<x: double, y: double>, |arr array<struct<x: double, y: double>>, |mk map<struct<x: double, y: double>, string>, |mv map<string, struct<x: double, y: double>> |) |USING $v2Format""".stripMargin) Seq( "id" -> "user_id", "point.x" -> "y", "arr.element.x" -> "y", "mk.key.x" -> "y", "mv.value.x" -> "y").foreach { case (field, newName) => val e = intercept[AnalysisException] { sql(s"ALTER TABLE $t RENAME COLUMN $field TO $newName") } assert(e.getMessage.contains("rename")) assert(e.getMessage.contains((field.split("\\\\.").init :+ newName).mkString("."))) assert(e.getMessage.contains("already exists")) } } } test("AlterTable: drop column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, data string) USING $v2Format") sql(s"ALTER TABLE $t DROP COLUMN data") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType().add("id", IntegerType)) } } test("AlterTable: drop nested column") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point struct<x: double, y: double, t: double>) " + s"USING $v2Format") sql(s"ALTER TABLE $t DROP COLUMN point.t") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", StructType(Seq( StructField("x", DoubleType), StructField("y", DoubleType))))) } } test("AlterTable: drop nested column in map key") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, point map<struct<x: double, y: double>, bigint>) " + s"USING $v2Format") sql(s"ALTER TABLE $t DROP COLUMN point.key.y") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("point", MapType(StructType(Seq( StructField("x", DoubleType))), LongType))) } } test("AlterTable: drop nested column in map value") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points map<string, struct<x: double, y: double>>) " + s"USING $v2Format") sql(s"ALTER TABLE $t DROP COLUMN points.value.y") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", MapType(StringType, StructType(Seq( StructField("x", DoubleType)))))) } } test("AlterTable: drop nested column in array element") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int, points array<struct<x: double, y: double>>) USING $v2Format") sql(s"ALTER TABLE $t DROP COLUMN points.element.y") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.schema === new StructType() .add("id", IntegerType) .add("points", ArrayType(StructType(Seq( StructField("x", DoubleType)))))) } } test("AlterTable: drop column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t DROP COLUMN data") } assert(exc.getMessage.contains("data")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: nested drop column must exist") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t DROP COLUMN point.x") } assert(exc.getMessage.contains("point.x")) assert(exc.getMessage.contains("missing field")) } } test("AlterTable: set location") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t SET LOCATION 's3://bucket/path'") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.properties === withDefaultOwnership(Map("provider" -> v2Format, "location" -> "s3://bucket/path")).asJava) } } test("AlterTable: set partition location") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") val exc = intercept[AnalysisException] { sql(s"ALTER TABLE $t PARTITION(ds='2017-06-10') SET LOCATION 's3://bucket/path'") } assert(exc.getMessage.contains( "ALTER TABLE SET LOCATION does not support partition for v2 tables")) } } test("AlterTable: set table property") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format") sql(s"ALTER TABLE $t SET TBLPROPERTIES ('test'='34')") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.properties === withDefaultOwnership(Map("provider" -> v2Format, "test" -> "34")).asJava) } } test("AlterTable: remove table property") { val t = s"${catalogAndNamespace}table_name" withTable(t) { sql(s"CREATE TABLE $t (id int) USING $v2Format TBLPROPERTIES('test' = '34')") val tableName = fullTableName(t) val table = getTableMetadata(tableName) assert(table.name === tableName) assert(table.properties === withDefaultOwnership(Map("provider" -> v2Format, "test" -> "34")).asJava) sql(s"ALTER TABLE $t UNSET TBLPROPERTIES ('test')") val updated = getTableMetadata(tableName) assert(updated.name === tableName) assert(updated.properties === withDefaultOwnership(Map("provider" -> v2Format)).asJava) } } }
Intel-bigdata/OAP
oap-native-sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTableTests.scala
Scala
apache-2.0
40,150
/* * Copyright 2012 Arktekk AS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package no.arktekk.atom.extension.mediarss import org.specs2.mutable.Specification import java.net.URI import no.arktekk.atom.MediaType /** * @author Erlend Hamnaberg<erlend@hamnaberg.net> */ class MediaContentSpec extends Specification { "media content" should { "be created correct using 'image' factory" in { val uri = URI.create("http://example.com/image/123.jpeg") val mediaType = MediaType("image/jpeg") val mc = MediaContent.image(uri, mediaType) mc.url must beEqualTo(uri) mc.mediaType must beEqualTo(mediaType) } "be created correct using 'image' factory with width and height" in { val uri = URI.create("http://example.com/image/123.jpeg") val mediaType = MediaType("image/jpeg") val mc = MediaContent.image(uri, mediaType, 800, 600) mc.url must beEqualTo(uri) mc.mediaType must beEqualTo(mediaType) mc.width must beEqualTo(Some(800)) mc.height must beEqualTo(Some(600)) } } }
arktekk/scala-atom
src/test/scala/no/arktekk/atom/extension/mediarss/MediaContentSpec.scala
Scala
apache-2.0
1,580
package com.mdataset.service.api.model import com.ecfront.ez.framework.core.helper.TimeHelper import com.ecfront.ez.framework.service.storage.foundation._ import com.ecfront.ez.framework.service.storage.mongo.MongoSecureStorage import com.mdataset.lib.basic.model.MdsCollectStatusDTO import scala.beans.BeanProperty /** * 采集状态实体 * * 用于保存各数据项的数据采集状态 */ @Entity("Collect Status") class MdsCollectStatusEntity extends SecureModel { @Index @Label("数据源code") @BeanProperty var code: String = _ @Index @Label("数据项code") @BeanProperty var item_code: String = _ @Label("最后一次执行是否成功") @BeanProperty var status: Boolean = _ @Label("最后一次成功更新的时间") @BeanProperty var last_success_time: Long = _ @Label("扩展信息") @BeanProperty var info: Map[String, Any] = _ } object MdsCollectStatusEntity extends MongoSecureStorage[MdsCollectStatusEntity] { super.customTableName("source_status") def getByCode(code: String, itemCode: String): MdsCollectStatusEntity = { MdsCollectStatusEntity.getByCond(s"""{"code":"$code","item_code":"$itemCode"}""").body } implicit def toVO(entity: MdsCollectStatusEntity): MdsCollectStatusDTO = { val vo = new MdsCollectStatusDTO vo.code = entity.code vo.item_code = entity.item_code vo.last_update_time = TimeHelper.msf.parse(entity.last_success_time + "") vo.info = entity.info vo } }
MDataSet/mds
modules/service_api/src/main/scala/com/mdataset/service/api/model/MdsCollectStatusEntity.scala
Scala
apache-2.0
1,480
package com.seanshubin.templater.domain import scala.annotation.tailrec sealed trait SequenceComparisonResult[+T] { def areSame: Boolean def toMultipleLineString: Seq[String] } object SequenceComparisonResult { case object Same extends SequenceComparisonResult[Nothing] { def areSame = true def toMultipleLineString = Seq() } case class Missing[T](index: Int, actualSeq: Seq[T], expectedSeq: Seq[T]) extends SequenceComparisonResult[T] { def areSame = false def toMultipleLineString = { val element = expectedSeq(index) val header = s"missing element at index $index: $element" Seq(header) } } case class Extra[T](index: Int, actualSeq: Seq[T], expectedSeq: Seq[T]) extends SequenceComparisonResult[T] { def areSame = false def toMultipleLineString = { val element = actualSeq(index) val header = s"extra element at index $index: $element" Seq(header) } } case class Difference[T](index: Int, actualSeq: Seq[T], expectedSeq: Seq[T]) extends SequenceComparisonResult[T] { def areSame = false def toMultipleLineString = { val actual = actualSeq(index) val expected = expectedSeq(index) val header = s"difference at index $index" Seq(header, s"actual : $actual", s"expected: $expected") } } } object SequenceComparison { def compare[T](actualSeq: Seq[T], expectedSeq: Seq[T]): SequenceComparisonResult[T] = { import SequenceComparisonResult._ @tailrec def compareLists(index: Int, remainingActual: List[T], remainingExpected: List[T]): SequenceComparisonResult[T] = { (remainingActual.headOption, remainingExpected.headOption) match { case (Some(actual), Some(expected)) => if (actual == expected) compareLists(index + 1, remainingActual.tail, remainingExpected.tail) else Difference[T](index, actualSeq, expectedSeq) case (Some(actual), None) => Extra(index, actualSeq, expectedSeq) case (None, Some(expected)) => Missing(index, actualSeq, expectedSeq) case (None, None) => Same } } compareLists(0, actualSeq.toList, expectedSeq.toList) } }
SeanShubin/generate-from-template
domain/src/test/scala/com/seanshubin/templater/domain/SequenceComparison.scala
Scala
unlicense
2,184
/* * Copyright (c) 2015 Mario Pastorelli (pastorelli.mario@gmail.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package casecsv import org.scalatest.FunSuite import org.scalatest.Matchers._ import casecsv._ class SimpleSuite extends FunSuite { def testConverter[A](desc: String)(value: A, valueString: String) (implicit conv: StringConverter[A]) = { test(desc ++ " '" ++ valueString ++ "' <-> " ++ value.toString) { conv.from(valueString) should be (Right(value)) conv.to(value) should be (valueString) } } def testsFromConverter[A](desc: String) (valuesAndStrings: (String,A)*) (implicit conv: StringConverter[A]) = { valuesAndStrings.foreach { case (valueString, value) => test(desc ++ " '" ++ valueString ++ "' -> " ++ value.toString ) { conv.from(valueString) should be (Right(value)) } } } } class ReadSuite extends SimpleSuite { testsFromConverter("Boolean is instance of StringConverter") ("1" -> true, "true" -> true, "True" -> true, "TRUE" -> true, "0" -> false, "false" -> false, "False" -> false, "FALSE" -> false) testConverter("Boolean is instance of StringConverter")(true, "true") testConverter("Boolean is instance of StringConverter")(false, "false") testConverter("Double is instance of StringConverter") (1.1, "1.1") testConverter("Float is instance of StringConverter") (1.1f, "1.1") testConverter("Int is instance of StringConverter") (1, "1") testConverter("Long is instance of StringConverter") (1l, "1") testConverter("String is instance of StringConverter") ("foo", "foo") testConverter[Option[Int]]("Option is instance of StringConverter") (Some(1),"1") testConverter[Option[Int]]("Option is instance of StringConverter") (None,"") }
melrief/casecsv
src/test/scala/casecsv/StringConverterSuite.scala
Scala
apache-2.0
2,798
/* * Copyright 2010 LinkedIn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.producer.async /* Indicates that client is sending event to a closed queue */ class QueueClosedException(message: String) extends RuntimeException(message) { def this() = this(null) }
jinfei21/kafka
src/kafka/producer/async/QueueClosedException.scala
Scala
apache-2.0
792
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.online import org.apache.spark.sql.catalyst.util._ import org.apache.spark.sql.hive.HiveContext import org.apache.spark.sql.{DataFrame, Row} import org.apache.spark.{SparkConf, SparkContext} import org.scalatest.FunSuite import org.apache.spark.sql.hive.online.OnlineSQLConf._ import org.apache.spark.sql.hive.online.OnlineSQLFunctions._ class OnlineSQLSuite extends FunSuite { val url = "" val master = s"spark://$url:7077" val sparkContext = new SparkContext(master, "TestSQLContext", new SparkConf().set("spark.sql.test", "")) val sqlContext = new HiveContext(sparkContext) import sqlContext._ def printResult(query: DataFrame, batchId: Int): Unit = { query.queryExecution.executedPlan // force to initialize the query plan var rows: Array[Row] = null benchmark { rows = query.collect() } println(s"============{batch = $batchId}=============") rows.foreach(row => println(row.mkString(", "))) println("====================================") } setConf(STREAMED_RELATIONS, "lineitem") setConf(NUMBER_BATCHES, "6") ignore("Q1") { val query = sql( """ |SELECT l_returnflag | ,l_linestatus | ,sum(l_quantity) AS sum_qty | ,sum(l_extendedprice) AS sum_base_price | ,sum(l_extendedprice * (1 - l_discount)) AS sum_disc_price | ,sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) AS sum_charge | ,avg(l_quantity) AS avg_qty | ,avg(l_extendedprice) AS avg_price | ,avg(l_discount) AS avg_disc | ,count(*) AS count_order |FROM lineitem |WHERE l_shipdate <= '1998-09-01' |GROUP BY l_returnflag | ,l_linestatus """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q3") { val query = sql( """ |SELECT o_orderdate | ,o_shippriority | ,sum(l_extendedprice * (1 - l_discount)) AS revenue |FROM customer | ,orders | ,lineitem |WHERE c_mktsegment = 'BUILDING' | AND c_custkey = o_custkey | AND l_orderkey = o_orderkey | AND o_orderdate < '1995-07-01' | AND o_orderdate > '1994-01-01' | AND l_shipdate > '1994-01-01' |GROUP BY o_orderdate | ,o_shippriority """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q5") { val query = sql( """ |SELECT n_name | ,sum(l_extendedprice * (1 - l_discount)) AS revenue |FROM customer | ,orders | ,lineitem | ,supplier | ,nation | ,region |WHERE c_custkey = o_custkey | AND l_orderkey = o_orderkey | AND l_suppkey = s_suppkey | AND c_nationkey = s_nationkey | AND s_nationkey = n_nationkey | AND n_regionkey = r_regionkey | AND r_name = 'AMERICA' | AND o_orderdate >= '1995-01-01' | AND o_orderdate < '1996-01-01' |GROUP BY n_name """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q6") { val query = sql( """ |SELECT sum(l_extendedprice * l_discount) AS revenues |FROM lineitem |WHERE l_shipdate >= '1996-01-01' | AND l_shipdate < '1997-01-01' | AND l_discount BETWEEN 0.06 AND 0.08 | AND l_quantity < 24 """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q7") { val query = sql( """ |SELECT supp_nation | ,cust_nation | ,l_year | ,sum(volume) AS revenue |FROM ( | SELECT n1.n_name AS supp_nation | ,n2.n_name AS cust_nation | ,substring(l_shipdate, 1, 4) AS l_year | ,l_extendedprice * (1 - l_discount) AS volume | FROM supplier | ,lineitem | ,orders | ,customer | ,nation n1 | ,nation n2 | WHERE s_suppkey = l_suppkey | AND o_orderkey = l_orderkey | AND c_custkey = o_custkey | AND s_nationkey = n1.n_nationkey | AND c_nationkey = n2.n_nationkey | AND ( | ( | n1.n_name = 'VIETNAM' | AND n2.n_name = 'KENYA' | ) | OR ( | n1.n_name = 'KENYA' | AND n2.n_name = 'VIETNAM' | ) | ) | AND l_shipdate BETWEEN '1995-01-01' | AND '1996-12-31' | ) AS shipping |GROUP BY supp_nation | ,cust_nation | ,l_year """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q8") { val query = sql( """ |SELECT o_year | ,sum(CASE | WHEN nation = 'JAPAN' | THEN volume | ELSE 0 | END) / sum(volume) AS mkt_share |FROM ( | SELECT substring(o_orderdate, 1, 4) AS o_year | ,l_extendedprice * (1 - l_discount) AS volume | ,n2.n_name AS nation | FROM lineitem | ,part | ,supplier | ,orders | ,customer | ,nation n1 | ,nation n2 | ,region | WHERE p_partkey = l_partkey | AND s_suppkey = l_suppkey | AND l_orderkey = o_orderkey | AND o_custkey = c_custkey | AND c_nationkey = n1.n_nationkey | AND n1.n_regionkey = r_regionkey | AND r_name = 'ASIA' | AND s_nationkey = n2.n_nationkey | AND o_orderdate BETWEEN '1995-01-01' AND '1996-12-31' | AND p_type = 'LARGE POLISHED BRASS' | ) AS all_nations |GROUP BY o_year """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q9") { val query = sql( """ |SELECT nation | ,o_year | ,sum(amount) AS sum_profit |FROM ( | SELECT n_name AS nation | ,substring(o_orderdate, 1, 4) AS o_year | ,l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity AS amount | FROM lineitem | ,part | ,supplier | ,partsupp | ,orders | ,nation | WHERE s_suppkey = l_suppkey | AND ps_suppkey = l_suppkey | AND ps_partkey = l_partkey | AND p_partkey = l_partkey | AND o_orderkey = l_orderkey | AND s_nationkey = n_nationkey | AND p_name LIKE '%ghost%' | ) AS profit |GROUP BY nation | ,o_year """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q10") { val query = sql( """ |SELECT n_name | ,sum(l_extendedprice * (1 - l_discount)) AS revenue |FROM customer | ,orders | ,lineitem | ,nation |WHERE c_custkey = o_custkey | AND l_orderkey = o_orderkey | AND o_orderdate >= '1994-10-01' | AND o_orderdate < '1995-01-01' | AND l_returnflag = 'R' | AND c_nationkey = n_nationkey |GROUP BY n_name """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q11") { setConf(STREAMED_RELATIONS, "partsupp") setConf(NUMBER_BATCHES, "4") val query = sql( """ |SELECT n_nationkey | ,value |FROM ( | SELECT 0 AS KEY | ,n_nationkey | ,sum(ps_supplycost * ps_availqty) AS value | FROM partsupp | ,supplier | ,nation | WHERE ps_suppkey = s_suppkey | AND s_nationkey = n_nationkey | GROUP BY n_nationkey | ) AS A | ,( | SELECT 0 AS KEY | ,sum(ps_supplycost * ps_availqty) * 0.00002 AS threshold | FROM partsupp | ,supplier | ,nation | WHERE ps_suppkey = s_suppkey | AND s_nationkey = n_nationkey | ) AS B |WHERE A.KEY = B.KEY | AND value > threshold """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q12") { val query = sql( """ |SELECT l_shipmode | ,sum(CASE | WHEN o_orderpriority = '1-URGENT' | OR o_orderpriority = '2-HIGH' | THEN 1 | ELSE 0 | END) AS high_line_count | ,sum(CASE | WHEN o_orderpriority <> '1-URGENT' | AND o_orderpriority <> '2-HIGH' | THEN 1 | ELSE 0 | END) AS low_line_count |FROM orders | ,lineitem |WHERE o_orderkey = l_orderkey | AND l_shipmode IN ( | 'RAIL' | ,'MAIL' | ) | AND l_commitdate < l_receiptdate | AND l_shipdate < l_commitdate | AND l_receiptdate >= '1993-01-01' | AND l_receiptdate < '1994-01-01' |GROUP BY l_shipmode """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q14") { val query = sql( """ |SELECT 100.00 * sum(CASE | WHEN p_type LIKE 'PROMO%' | THEN l_extendedprice * (1 - l_discount) | ELSE 0 | END) / sum(l_extendedprice * (1 - l_discount)) AS promo_revenue |FROM lineitem | ,part |WHERE l_partkey = p_partkey | AND l_shipdate >= '1996-12-01' | AND l_shipdate < '1997-01-01' """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q16") { setConf(STREAMED_RELATIONS, "partsupp") setConf(NUMBER_BATCHES, "4") val query = sql( """ |SELECT p_type | ,p_size | ,count(ps_suppkey) AS supplier_cnt |FROM ( | SELECT p_brand | ,p_type | ,p_size | ,ps_suppkey | FROM partsupp | ,part | WHERE p_partkey = ps_partkey | AND p_brand <> 'Brand#43' | AND p_type NOT LIKE 'STANDARD BURNISHED%' | AND p_size IN ( | 12 | ,16 | ,19 | ,14 | ,9 | ,11 | ,42 | ,30 | ) | ) A |JOIN ( | SELECT s_suppkey | FROM supplier | WHERE s_comment NOT LIKE '%Customer%Complaints%' | GROUP BY s_suppkey | ) B ON ps_suppkey = s_suppkey |GROUP BY p_brand | ,p_type | ,p_size """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q17") { val query = sql( """ |SELECT sum(l_extendedprice) / 7.0 AS avg_yearly |FROM ( | SELECT p_mfgr | ,l_quantity | ,l_extendedprice | FROM lineitem | ,part | WHERE p_partkey = l_partkey | AND p_brand = 'Brand#13' | AND p_container = 'JUMBO BOX' | ) AS A | ,( | SELECT p_mfgr | ,12 * avg(l_quantity) AS threshold | FROM lineitem | ,part | WHERE l_partkey = p_partkey | GROUP BY p_mfgr | ) AS B |WHERE A.p_mfgr = B.p_mfgr | AND l_quantity < threshold """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q18") { val query = sql( """ |SELECT c_nationkey | ,sum(l_quantity) |FROM ( | SELECT c_nationkey | ,l_quantity | ,o_orderpriority | FROM customer | ,orders | ,lineitem | WHERE c_custkey = o_custkey | AND o_orderkey = l_orderkey | ) A |JOIN ( |SELECT o_orderpriority |FROM ( | SELECT o_orderpriority, sum(l_quantity) AS tot_qty | FROM orders | ,lineitem | WHERE o_orderkey = l_orderkey | GROUP BY o_orderpriority | ) B |WHERE tot_qty > 31000 | ) C ON (A.o_orderpriority = C.o_orderpriority) |GROUP BY c_nationkey """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q19") { val query = sql( """ |SELECT sum(l_extendedprice * (1 - l_discount)) AS revenue |FROM lineitem | ,part |WHERE p_partkey = l_partkey | AND l_shipmode IN ( | 'RAIL' | ,'AIR REG' | ) | AND l_shipinstruct = 'DELIVER IN PERSON' | AND ( | ( | p_brand = 'Brand#45' | AND p_container IN ( | 'SM CASE' | ,'SM BOX' | ,'SM PACK' | ,'SM PKG' | ) | AND l_quantity >= 7 | AND l_quantity <= 7 + 10 | AND p_size BETWEEN 1 AND 50 | ) | OR ( | p_brand = 'Brand#51' | AND p_container IN ( | 'MED BAG' | ,'MED BOX' | ,'MED PKG' | ,'MED PACK' | ) | AND l_quantity >= 20 | AND l_quantity <= 20 + 10 | AND p_size BETWEEN 1 AND 10 | ) | OR ( | p_brand = 'Brand#51' | AND p_container IN ( | 'LG CASE' | ,'LG BOX' | ,'LG PACK' | ,'LG PKG' | ) | AND l_quantity >= 28 | AND l_quantity <= 28 + 10 | AND p_size BETWEEN 1 AND 15 | ) | ) """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q20") { val query = sql( """ |SELECT s_name | ,s_address |FROM ( | SELECT s_name | ,s_address | ,s_suppkey | FROM supplier | ,nation | WHERE s_nationkey = n_nationkey | AND n_name = 'ETHIOPIA' | ) A |JOIN ( | SELECT ps_suppkey | FROM partsupp | JOIN ( | SELECT p_partkey | FROM part | WHERE p_name LIKE 'cornsilk%' | GROUP BY p_partkey | ) C ON (ps_partkey = p_partkey) | JOIN ( | SELECT s_suppkey | ,qty | FROM ( | SELECT s_nationkey AS nationkey | ,sum(l_quantity) * 0.0001 AS qty | FROM lineitem | ,supplier | WHERE l_suppkey = s_suppkey | AND l_shipdate >= '1996-01-01' | AND l_shipdate < '1997-01-01' | GROUP BY s_nationkey | ) lqty | ,supplier | WHERE s_nationkey = nationkey | ) D ON (ps_suppkey = s_suppkey) | WHERE ps_availqty > qty | GROUP BY ps_suppkey | ) B ON (s_suppkey = ps_suppkey) """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } ignore("Q22") { setConf(STREAMED_RELATIONS, "customer") setConf(NUMBER_BATCHES, "3") val query = sql( """ |SELECT cntrycode | ,count(*) AS numcust | ,sum(c_acctbal) AS totacctbal |FROM ( | SELECT 0 AS KEY | ,substring(c_phone, 1, 2) AS cntrycode | ,c_acctbal | FROM customer | WHERE substring(c_phone, 1, 2) IN ( | '25' | ,'23' | ,'11' | ,'18' | ,'32' | ,'33' | ,'27' | ) | ) AS A | ,( | SELECT 0 AS KEY | ,avg(c_acctbal) AS threshold | FROM customer | WHERE c_acctbal > 0.00 | AND substring(c_phone, 1, 2) IN ( | '25' | ,'23' | ,'11' | ,'18' | ,'32' | ,'33' | ,'27' | ) | ) AS B |WHERE A.KEY = B.KEY | AND c_acctbal > threshold |GROUP BY cntrycode """.stripMargin).online var batchId = 0 while (query.hasNext) { printResult(query.next(), batchId) batchId += 1 } } }
andrewor14/iolap
sql/hive/src/test/scala/org/apache/spark/sql/hive/online/OnlineSQLSuite.scala
Scala
apache-2.0
17,695
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. **/ import de.johoop.jacoco4sbt.JacocoPlugin.jacoco import de.johoop.jacoco4sbt.XMLReport import sbt.Keys._ import sbt._ import com.typesafe.sbt.pgp._ import sbtrelease.ReleaseStateTransformations._ import sbtrelease.ReleasePlugin.autoImport._ import sbtrelease.{Version => SbtVersion} object Config extends Build { lazy val javaCompilerOptions = Seq( "-source", Version.javaLanguage, "-target", Version.javaTarget, "-Xlint", "-g" ) val isJdk8 = System.getProperty( "java.version" ).startsWith( "1.8" ) lazy val javadocOptions = if (isJdk8) Seq( "-Xdoclint:none" ) else Seq( ) def allJavadocOptions(jsvcgenVersion: String) = javadocOptions ++ Seq( "-noqualifier", "all", "-stylesheetfile", "jsvcgen/src/main/resources/javadoc.css", "-header", s"""<img><br/><b>jsvcgen</b><br/>v${jsvcgenVersion}""" ) lazy val compilerOptions = Seq( "-deprecation", "-encoding", "UTF-8", // yes, this is 2 args "-feature", "-language:existentials", "-language:higherKinds", "-language:implicitConversions", "-unchecked", "-Xfatal-warnings", "-Xlint", "-Yno-adapted-args", "-Ywarn-numeric-widen", "-Ywarn-value-discard", "-Xfuture" ) lazy val org = "com.solidfire" lazy val orgName = "SolidFire, Inc." // create beautiful scala test report lazy val unitTestOptions = Seq( Tests.Argument(TestFrameworks.ScalaTest,"-h","target/html-unit-test-report"), Tests.Argument(TestFrameworks.ScalaTest,"-u","target/unit-test-reports"), Tests.Argument(TestFrameworks.ScalaTest,"-oD") ) lazy val jacocoTestOptions = Seq( Tests.Argument(TestFrameworks.ScalaTest,"-h","target/html-unit-test-report"), Tests.Argument(TestFrameworks.ScalaTest,"-u","target/unit-test-reports"), Tests.Argument(TestFrameworks.ScalaTest,"-oD") ) lazy val jacocoSettings = jacoco.settings ++ Seq( Keys.fork in jacoco.Config := true, parallelExecution in jacoco.Config := false, jacoco.reportFormats in jacoco.Config := Seq( XMLReport(encoding = "utf-8")) ) lazy val projectSettings = Defaults.coreDefaultSettings ++ Seq( //populate default set of scalac options for each project javacOptions ++= javaCompilerOptions, javacOptions in doc := allJavadocOptions((version in ThisBuild).value), scalacOptions ++= compilerOptions, testOptions in (Test, test) ++= unitTestOptions, testOptions in jacoco.Config ++= jacocoTestOptions, crossPaths := false, scalaVersion := "2.10.6", crossScalaVersions := Seq( "2.10.6", "2.11.8" ), isSnapshot := version.value.trim.endsWith( "-SNAPSHOT" ), organization := org, resolvers := repositories, updateOptions := updateOptions.value.withCachedResolution(true), releaseVersionBump := SbtVersion.Bump.Next, releasePublishArtifactsAction := PgpKeys.publishSigned.value, releaseProcess := Seq[ReleaseStep]( checkSnapshotDependencies, inquireVersions, runClean, runTest, setReleaseVersion, commitReleaseVersion, tagRelease, setNextVersion, commitNextVersion, pushChanges ), libraryDependencies ++= Seq( Dependencies.slf4jApi, Dependencies.slf4jSimple, Dependencies.scalatest, Dependencies.pegdown, Dependencies.scalacheck, Dependencies.mockito ) ) lazy val repositories = List( "Typesafe" at "http://repo.typesafe.com/typesafe/releases/", "Maven Central" at "http://repo1.maven.org/maven2/" ) import com.mojolly.scalate.ScalatePlugin._ import ScalateKeys._ val templateSettings = scalateSettings ++ Seq( /** * Sets the behavior of recompiling template files. * Always template files are recompiled when this setting is true. * When you set it to false, they are recompiled only when the modified time of * a template file is newer than that of a scala file generated by compilation * or a compiled scala file corresponding to a template file doesn't exist yet. */ scalateOverwrite := true, scalateTemplateConfig in Compile <<= baseDirectory { base => Seq( /** * A minimal template configuration example. * "scalate" is used as a package prefix(the 4th argument of TemplateConfig.apply) * if not specified. * * An example of a scalate usage is as bellow if you have templates/index.ssp. * * val engine = new TemplateEngine * engine.layout("/scalate/index.ssp") */ TemplateConfig( base / "src/main/resources/codegen", Seq( "import com.solidfire.jsvcgen._", "import com.solidfire.jsvcgen.codegen._" ), Nil, None ) ) } ) lazy val root = Project("root", file(".")).settings(templateSettings:_*) } object Version { val javaLanguage = "1.7" val javaTarget = "1.7" val base64 = "2.3.9" val gson = "2.6.2" val jodaConvert = "1.8.1" val jodaTime = "2.9.3" val json4s = "3.3.0" val scalate = "1.7.1" val scopt = "3.4.0" val slf4j = "1.6.6" val junit = "4.12" val scalatest = "2.2.6" val scalacheck = "1.12.5" val pegdown = "1.6.0" val mockito = "1.10.19" } object Dependencies { lazy val base64 = "net.iharder" % "base64" % Version.base64 lazy val gson = "com.solidfire.code.gson" % "gson" % Version.gson lazy val jodaTime = "joda-time" % "joda-time" % Version.jodaTime lazy val jodaConvert = "org.joda" % "joda-convert" % Version.jodaConvert lazy val json4sJackson = "org.json4s" %% "json4s-jackson" % Version.json4s force() lazy val scalateCore = "org.scalatra.scalate" %% "scalate-core" % Version.scalate lazy val scopt = "com.github.scopt" %% "scopt" % Version.scopt lazy val slf4jApi = "org.slf4j" % "slf4j-api" % Version.slf4j lazy val slf4jSimple = "org.slf4j" % "slf4j-simple" % Version.slf4j % "test" lazy val junit = "junit" % "junit" % Version.junit % "test" lazy val scalatest = "org.scalatest" %% "scalatest" % Version.scalatest % "test" lazy val pegdown = "org.pegdown" % "pegdown" % Version.pegdown % "test" lazy val scalacheck = "org.scalacheck" %% "scalacheck" % Version.scalacheck % "test" lazy val mockito = "org.mockito" % "mockito-all" % Version.mockito % "test" }
solidfire/jsvcgen
project/common.scala
Scala
apache-2.0
7,682
/*********************************************************************** * Copyright (c) 2013-2015 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 which * accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.features package object serialization { type Version = Int // Write a datum. type DatumWriter[Writer, -T] = (Writer, T) => Unit // Read a datum. type DatumReader[Reader, +T] = (Reader) => T } class SerializationException(msg: String, cause: Throwable = null) extends RuntimeException(msg, cause)
drackaer/geomesa
geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/package.scala
Scala
apache-2.0
805
/* Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package de.hpi.ingestion.datalake import com.holdenkarau.spark.testing.SharedSparkContext import de.hpi.ingestion.datalake.mock.{MockImport, MockSubjectImport} import de.hpi.ingestion.datalake.models.Subject import org.scalatest.{FlatSpec, Matchers} class DataLakeImportImplementationTest extends FlatSpec with Matchers with SharedSparkContext { "normalizeProperties" should "normalize the properties of an entity" in { val job = new MockImport val entity = TestData.testEntity val mapping = TestData.normalizationMapping val strategies = TestData.strategyMapping val properties = job.normalizeProperties(entity, mapping, strategies) val expected = TestData.propertyMapping properties shouldEqual expected } "Entities" should "be translated" in { val job = new MockSubjectImport job.inputEntities = sc.parallelize(TestData.translationEntities) job.run(sc) val subjects = job.subjects.collect.toList val expectedSubjects = TestData.translatedSubjects subjects shouldEqual expectedSubjects } "filterEntities" should "filter no element by default" in { val job = new MockImport val entities = TestData.testEntities val filteredEntities = entities.filter(job.filterEntities) filteredEntities shouldEqual entities } "extractLegalForm" should "extract the legal form from a given name" in { val job = new MockImport val classifier = job.classifier val companyNames = TestData.companyNames companyNames.foreach { case (name, expected) => val legalForms = job.extractLegalForm(name, classifier) legalForms shouldEqual expected } job.extractLegalForm(null, classifier) shouldEqual None } "run" should "import a new datasource to the datalake" in { val job = new MockImport job.inputEntities = sc.parallelize(TestData.testEntities) job.run(sc) val output = job.subjects.collect.toList val expected = TestData.output (output, expected).zipped.foreach { case (subject, expectedSubject) => subject.name shouldEqual expectedSubject.name } } }
bpn1/ingestion
src/test/scala/de/hpi/ingestion/datalake/DataLakeImportImplementationTest.scala
Scala
apache-2.0
2,854
package net.fluxo.blue.downloader /** * Created with IntelliJ IDEA. * User: Ronald Kurniawan (viper) * Date: 25/12/13 * Time: 9:45 PM * */ class VideoDownloadMonitor extends Runnable { def run() { } }
fluxodesign/FluxoBlue
src/main/scala/net/fluxo/blue/downloader/VideoDownloadMonitor.scala
Scala
lgpl-3.0
212
package com.realizationtime.btdogg.hashessource import akka.actor.{Actor, ActorLogging, ActorRef} import com.realizationtime.btdogg.hashessource.HashesSource.{SpottedHash, Subscribe} import com.realizationtime.btdogg.hashessource.SourcesHub._ class SourcesHub extends Actor with ActorLogging { private var subscribers = Set[ActorRef]() override def receive: Receive = { case m: Message => m match { case AddWorkers(hashSources) => hashSources.foreach(_ ! Subscribe(self)) case SubscribePublisher(s) => subscribers += s case UnsubscribePublisher(sub, msg) => msg.foreach(sub ! _) subscribers -= sub } case k: SpottedHash => subscribers.foreach(_ forward k) } } object SourcesHub { sealed trait Message final case class AddWorkers(hashSources: Set[ActorRef]) extends Message final case class SubscribePublisher(mainConsumer: ActorRef) extends Message final case class UnsubscribePublisher(subscriber: ActorRef, endMessage: Option[Any]) extends Message }
bwrega/btdogg
src/main/scala/com/realizationtime/btdogg/hashessource/SourcesHub.scala
Scala
mit
1,028
package services import controllers.ArrivalGenerator.arrival import drt.shared.PortCode import drt.shared.Terminals.{T1, T2} import drt.shared.api.Arrival import org.specs2.mutable.Specification class CodeSharesSpec extends Specification { import drt.shared.CodeShares._ "Given one flight " + "When we ask for unique arrivals " + "Then we should see that flight with zero code shares " >> { val flight: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight)) val expected = List((flight, Set())) result === expected } "Given two flights which are codeshares of each other " + "When we ask for unique flights " + "Then we should see a tuple of only one flight with its code share" >> { val flight1: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flight2: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T1, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight1, flight2)) val expected = List((flight2, Set(flight1))) result === expected } "Given three flights which are codeshares of each other " + "When we ask for unique flights " + "Then we should see a tuple of only one flight with its two code shares" >> { val flight1: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flight2: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T1, origin = PortCode("JFK")) val flight3: Arrival = arrival(iata = "ZZ5566", schDt = "2016-01-01T10:25Z", actPax = Option(175), terminal = T1, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight1, flight2, flight3)) val expected = List((flight3, Set(flight1, flight2))) result === expected } "Given 5 flight, where there are 2 sets of code shares and one unique flight " + "When we ask for unique flights " + "Then we should see a 3 tuples; one flight with no code shares and 2 flights with their two code shares" >> { val flightCS1a: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flightCS1b: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T1, origin = PortCode("JFK")) val flightCS2a: Arrival = arrival(iata = "ZZ5566", schDt = "2016-01-01T10:25Z", actPax = Option(55), terminal = T1, origin = PortCode("CDG")) val flightCS2b: Arrival = arrival(iata = "TG8000", schDt = "2016-01-01T10:25Z", actPax = Option(180), terminal = T1, origin = PortCode("CDG")) val flight: Arrival = arrival(iata = "KL1010", schDt = "2016-01-01T10:25Z", actPax = Option(175), terminal = T2, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flightCS1a, flightCS1b, flightCS2a, flightCS2b, flight)).toSet val expected = Set( (flightCS1b, Set(flightCS1a)), (flightCS2b, Set(flightCS2a)), (flight, Set()) ) result === expected } "Given two flights with the same scheduled time, the same terminal, but different origins " + "When we ask for unique flights " + "Then we should see two tuples, each with one of the flights and no code shares" >> { val flight1: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flight2: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T1, origin = PortCode("CDG")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight1, flight2)).toSet val expected = Set( (flight1, Set()), (flight2, Set()) ) result === expected } "Given two flights with the same scheduled time, the same origins, but different terminals " + "When we ask for unique flights " + "Then we should see two tuples, each with one of the flights and no code shares" >> { val flight1: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:25Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flight2: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T2, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight1, flight2)).toSet val expected = Set( (flight1, Set()), (flight2, Set()) ) result === expected } "Given two flights with the same origins, the same different terminals, but different scheduled times" + "When we ask for unique flights " + "Then we should see two tuples, each with one of the flights and no code shares" >> { val flight1: Arrival = arrival(iata = "BA0001", schDt = "2016-01-01T10:30Z", actPax = Option(100), terminal = T1, origin = PortCode("JFK")) val flight2: Arrival = arrival(iata = "AA8778", schDt = "2016-01-01T10:25Z", actPax = Option(150), terminal = T1, origin = PortCode("JFK")) val result = uniqueArrivalsWithCodeShares(identity[Arrival])(Seq(flight1, flight2)).toSet val expected = Set( (flight1, Set()), (flight2, Set()) ) result === expected } }
UKHomeOffice/drt-scalajs-spa-exploration
server/src/test/scala/services/CodeSharesSpec.scala
Scala
apache-2.0
5,560
package feature import com.twitter.finagle.http.Method.{Get, Post} import com.twitter.finagle.http.Request import com.twitter.finagle.http.Status.{Ok, Unauthorized} import env.{ResponseStatusAndContent, RunningTestEnvironment} import example.{EmailAddress, Id, User, Username} import io.circe.generic.auto._ import io.fintrospect.formats.Circe.JsonFormat.{decode, parse} import org.scalatest.{FunSpec, Matchers} class ReportInhabitantsTest extends FunSpec with Matchers with RunningTestEnvironment { describe("whoIsThere endpoint validation") { it("is protected with a secret key") { checkInhabitants("fakeSecret").status shouldBe Unauthorized } } describe("proper usage") { it("initially there is no-one inside") { val inhabitants = checkInhabitants("realSecret") inhabitants.status shouldBe Ok decode[Seq[User]](parse(inhabitants.content)) shouldBe Seq() } it("when a user enters the building") { val user = User(Id(1), Username("Bob"), EmailAddress("bob@bob.com")) env.userDirectory.contains(user) enterBuilding(Option("Bob"), "realSecret") val inhabitants = checkInhabitants("realSecret") inhabitants.status shouldBe Ok decode[Seq[User]](parse(inhabitants.content)) shouldBe Seq(user) } } private def enterBuilding(user: Option[String], secret: String): ResponseStatusAndContent = { val query = user.map("username=" + _).getOrElse("") val request = Request(Post, "/security/knock?" + query) request.headerMap("key") = secret env.responseTo(request) } private def checkInhabitants(secret: String): ResponseStatusAndContent = { val request = Request(Get, "/security/whoIsThere") request.headerMap("key") = secret env.responseTo(request) } }
daviddenton/fintrospect-example-app
src/test/scala/feature/ReportInhabitantsTest.scala
Scala
apache-2.0
1,780
package sandbox import com.typesafe.config.ConfigFactory import akka.actor.ActorSystem import akka.stream.ActorMaterializer import org.apache.kafka.common.serialization.StringDeserializer import org.apache.kafka.common.serialization.StringSerializer import org.scalacheck.Gen import akka.stream.scaladsl.Source import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Keep import scala.concurrent.duration._ import sandbox.util.DataGen import akka.kafka.ConsumerSettings import akka.kafka.ProducerSettings import org.apache.kafka.common.serialization.ByteArrayDeserializer import org.apache.kafka.common.serialization.ByteArraySerializer import org.apache.kafka.clients.producer.ProducerRecord import akka.kafka.scaladsl._ import akka.kafka.Subscriptions import org.apache.kafka.clients.producer.ProducerConfig import scala.concurrent.Future import scala.concurrent.Await import java.util.concurrent.TimeoutException object NameAggregator extends App { val config = ConfigFactory.empty implicit val system = ActorSystem("NameAggregator",config) implicit val ec = system.dispatcher implicit val materializer = ActorMaterializer() val topicName = "names" val consumerGroupId = "name-consumer" val broker = "localhost:9092" val consumerProps = ConsumerSettings(system,new ByteArrayDeserializer, new StringDeserializer) .withBootstrapServers(broker) val producerProps = ProducerSettings(system, new ByteArraySerializer, new StringSerializer) .withBootstrapServers(broker) .withProperty(ProducerConfig.LINGER_MS_CONFIG, "3000") val rng = new scala.util.Random(new java.util.Random()) try { val nameSource = Source.unfold(DataGen.name("US")){ gen => // val delay = Math.abs(rng.nextGaussian()*1000L).toLong // Thread.sleep(delay) gen.sample.map(n => (gen,n)) } val throttle = Source.tick(0.seconds, 1.second, ()) val producer = (nameSource zip throttle).map{ case(a,b) => a } .map(n => new ProducerRecord[Array[Byte],String](topicName, n)) .alsoTo(Sink.foreach( n =>println("Publishing: "+n))) .to(Producer.plainSink(producerProps)) val start = System.currentTimeMillis val consumer = Consumer.committableSource(consumerProps.withGroupId(consumerGroupId), Subscriptions.topics(topicName)) val done = consumer .alsoTo(Sink.foreach(rec => println(s"Consuming: ${rec}"))) .groupedWithin(15,30.seconds) .alsoTo(Sink.foreach( recs =>println(s"==> Aggregated: ${recs.map(_.record.value)} - ${recs.size} - ${System.currentTimeMillis - start}"))) .mapAsync(1)(recs => Future{ if(recs.find(_.record.value.contains("SMITHY")).isDefined) throw new Exception("SMITH!!") else recs}) .map(_.last) .alsoTo(Sink.foreach(rec => println(s"===> Committing: offset=${rec.committableOffset.partitionOffset.offset}"))) .mapAsync(1){ _.committableOffset.commitScaladsl() } .runWith(Sink.ignore) val bar = producer.run() try { val res = waitFor(done,60.seconds) println("Success: "+res) } catch { case to: TimeoutException => println("Success") case ex: Throwable => println("Failure: "+ex.getClass.getSimpleName+" - "+ex.getMessage) } } finally { system.terminate() } def waitFor[T](f: Future[T], d: Duration): T = Await.result(f,d) }
jthompson-hiya/akka-streams-sandbox
src/main/scala/sandbox/NameAggregator.scala
Scala
mit
3,351
package org.apache.spark.sql.cassandra import com.datastax.spark.connector.types.UDTFieldDef import org.apache.spark.Logging import org.apache.spark.sql.cassandra.types.{UUIDType, InetAddressType} import org.apache.spark.sql.types.StructField import org.apache.spark.sql.{types => catalystTypes} import com.datastax.spark.connector import com.datastax.spark.connector.cql.ColumnDef /** Convert Cassandra data type to Catalyst data type */ object DataTypeConverter extends Logging { private[cassandra] val primitiveTypeMap = Map[connector.types.ColumnType[_], catalystTypes.DataType]( connector.types.TextType -> catalystTypes.StringType, connector.types.AsciiType -> catalystTypes.StringType, connector.types.VarCharType -> catalystTypes.StringType, connector.types.BooleanType -> catalystTypes.BooleanType, connector.types.IntType -> catalystTypes.IntegerType, connector.types.BigIntType -> catalystTypes.LongType, connector.types.CounterType -> catalystTypes.LongType, connector.types.FloatType -> catalystTypes.FloatType, connector.types.DoubleType -> catalystTypes.DoubleType, connector.types.VarIntType -> catalystTypes.DecimalType(), // no native arbitrary-size integer type connector.types.DecimalType -> catalystTypes.DecimalType(), connector.types.TimestampType -> catalystTypes.TimestampType, connector.types.InetType -> InetAddressType, connector.types.UUIDType -> UUIDType, connector.types.TimeUUIDType -> UUIDType, connector.types.BlobType -> catalystTypes.BinaryType ) /** Convert Cassandra data type to Catalyst data type */ def catalystDataType(cassandraType: connector.types.ColumnType[_], nullable: Boolean): catalystTypes.DataType = { def catalystStructField(field: UDTFieldDef): StructField = StructField(field.columnName, catalystDataType(field.columnType, nullable = true), nullable = true) cassandraType match { case connector.types.SetType(et) => catalystTypes.ArrayType(primitiveTypeMap(et), nullable) case connector.types.ListType(et) => catalystTypes.ArrayType(primitiveTypeMap(et), nullable) case connector.types.MapType(kt, vt) => catalystTypes.MapType(primitiveTypeMap(kt), primitiveTypeMap(vt), nullable) case connector.types.UserDefinedType(_, fields) => catalystTypes.StructType(fields.map(catalystStructField)) case connector.types.VarIntType => logWarning("VarIntType is mapped to catalystTypes.DecimalType with unlimited values.") primitiveTypeMap(cassandraType) case _ => primitiveTypeMap(cassandraType) } } /** Create a Catalyst StructField from a Cassandra Column */ def toStructField(column: ColumnDef): StructField = StructField(column.columnName,catalystDataType(column.columnType, nullable = true)) }
IMCG/spark-cassandra
spark-cassandra-connector/src/main/scala/org/apache/spark/sql/cassandra/DataTypeConverter.scala
Scala
apache-2.0
2,974
package models.analysis import play.api.libs.json.{JsObject, Json} object Purposes { sealed trait Purpose { self => val id: Int val noPurpose: String val enPurpose: String } val purposes = List( MaterialDetermination, Dating, ProvenanceDetermination, ContructionUnderstanding ) object Purpose { def purposeIdToNoString(i: Int) = fromInt(i).map(_.noPurpose).getOrElse("") def purposeIdToEnString(i: Int) = fromInt(i).map(_.enPurpose).getOrElse("") def fromInt(i: Int): Option[Purpose] = purposes.find(_.id == i) def fromString(s: String): Option[Purpose] = purposes.find(p => p.enPurpose == s || p.noPurpose == s) def toJson(m: Purpose): JsObject = { Json.obj( "id" -> m.id, "noPurpose" -> m.noPurpose, "enPurpose" -> m.enPurpose ) } } case object MaterialDetermination extends Purpose { override val id = 1 override val noPurpose: String = "Materialbestemmelse" override val enPurpose: String = "Material determination" } case object Dating extends Purpose { override val id = 2 override val noPurpose: String = "Datering" override val enPurpose: String = "Dating" } case object ProvenanceDetermination extends Purpose { override val id = 3 override val noPurpose: String = "Proveniensbestemmelse" override val enPurpose: String = "Provenance determination" } case object ContructionUnderstanding extends Purpose { override val id = 4 override val noPurpose: String = "Konstruksjonsforståelse" override val enPurpose: String = "Construction understanding" } }
MUSIT-Norway/musit
service_backend/app/models/analysis/Purposes.scala
Scala
gpl-2.0
1,726
package org.dberg.hubot import org.dberg.hubot.models.Message object SpecHelpers { val exception = new Exception("Dumb Exception") //Just a helper method to abstract out the response a //listener generates for our tests //Param is kind of redundanat since its in the body but helps //make this easy and generic def generateListenerResponse(message: Message, param: String) = { if (param.isEmpty) Message(message.user, "received", message.messageType) else Message(message.user, "received " + param, message.messageType) } }
denen99/hubot-scala
src/test/scala/org/dberg/hubot/SpecHelpers.scala
Scala
apache-2.0
563
package org.http4s package twirl import cats._ import org.http4s.headers.`Content-Type` import org.http4s.MediaType._ import play.twirl.api._ trait TwirlInstances { implicit def htmlContentEncoder[F[_]: Applicative]( implicit charset: Charset = DefaultCharset): EntityEncoder[F, Html] = contentEncoder(`text/html`) /** * Note: Twirl uses a media type of `text/javascript`. This is obsolete, so we instead return * [[org.http4s.MediaType.application/javascript]]. */ implicit def jsContentEncoder[F[_]: Applicative]( implicit charset: Charset = DefaultCharset): EntityEncoder[F, JavaScript] = contentEncoder(`application/javascript`) implicit def xmlContentEncoder[F[_]: Applicative]( implicit charset: Charset = DefaultCharset): EntityEncoder[F, Xml] = contentEncoder(`application/xml`) implicit def txtContentEncoder[F[_]: Applicative]( implicit charset: Charset = DefaultCharset): EntityEncoder[F, Txt] = contentEncoder(`text/plain`) private def contentEncoder[F[_], C <: Content]( mediaType: MediaType)(implicit F: Applicative[F], charset: Charset): EntityEncoder[F, C] = EntityEncoder .stringEncoder[F] .contramap[C](content => content.body) .withContentType(`Content-Type`(mediaType, charset)) }
reactormonk/http4s
twirl/src/main/scala/org/http4s/twirl/TwirlInstances.scala
Scala
apache-2.0
1,300
/* * Copyright (c) 2014 Contributor. All rights reserved. */ package org.scalaide.ui.internal.editor.decorators.implicits import scala.reflect.internal.util.SourceFile import org.scalaide.ui.internal.editor.decorators.BaseSemanticAction import org.eclipse.jface.preference.IPreferenceStore import org.eclipse.jface.text.Position import org.eclipse.jface.text.Region import org.eclipse.jface.text.source.Annotation import org.eclipse.jface.text.source.ISourceViewer import org.scalaide.core.IScalaPlugin import org.scalaide.core.internal.compiler.ScalaPresentationCompiler import org.scalaide.core.internal.jdt.model.ScalaCompilationUnit import org.scalaide.ui.internal.preferences.ImplicitsPreferencePage import org.scalaide.core.compiler.IScalaPresentationCompiler /** * Semantic action for highlighting implicit conversions and parameters. */ class ImplicitHighlightingPresenter(sourceViewer: ISourceViewer) extends BaseSemanticAction( sourceViewer, ImplicitAnnotation.ID, Some("implicit")) { protected override def findAll(compiler: ScalaPresentationCompiler, scu: ScalaCompilationUnit, sourceFile: SourceFile): Map[Annotation, Position] = ImplicitHighlightingPresenter.findAllImplicitConversions(compiler, scu, sourceFile) } object ImplicitHighlightingPresenter { final val DisplayStringSeparator = " => " private def pluginStore: IPreferenceStore = IScalaPlugin().getPreferenceStore def findAllImplicitConversions(compiler: IScalaPresentationCompiler, scu: ScalaCompilationUnit, sourceFile: SourceFile) = { import compiler.Tree import compiler.Traverser import compiler.Apply import compiler.Select import compiler.ApplyImplicitView import compiler.ApplyToImplicitArgs def mkPosition(pos: compiler.Position, txt: String): Position = { val start = pos.start val end = if (pluginStore.getBoolean(ImplicitsPreferencePage.PFirstLineOnly)) { val eol = txt.indexOf('\\n') if (eol > -1) eol else txt.length } else txt.length new Position(start, end) } def mkImplicitConversionAnnotation(t: ApplyImplicitView) = { import org.scalaide.core.compiler.IScalaPresentationCompiler.Implicits.RichResponse val txt = new String(sourceFile.content, t.pos.start, math.max(0, t.pos.end - t.pos.start)).trim() val pos = mkPosition(t.pos, txt) val region = new Region(pos.offset, pos.getLength) val msg = compiler.asyncExec{ val sname = t.fun.symbol.nameString s"Implicit conversion found: `$txt`$DisplayStringSeparator`$sname($txt): ${t.tpe}`" }.getOption() val annotation = new ImplicitConversionAnnotation( () => compiler.mkHyperlink(t.symbol, name = "Open Implicit", region, scu.scalaProject.javaProject), msg.getOrElse("")) (annotation, pos) } def mkImplicitArgumentAnnotation(t: ApplyToImplicitArgs) = { val txt = new String(sourceFile.content, t.pos.start, math.max(0, t.pos.end - t.pos.start)).trim() // Defensive, but why x.symbol is null (see bug 1000477) for "Some(x.flatten))" // TODO find the implicit args value val argsStr = t.args match { case null => "" case l => l.map { x => if ((x.symbol ne null) && (x.symbol ne compiler.NoSymbol)) x.symbol.fullName else "<error>" }.mkString("( ", ", ", " )") } val annotation = new ImplicitArgAnnotation(s"Implicit arguments found: `$txt`$DisplayStringSeparator`$txt$argsStr`") val pos = mkPosition(t.pos, txt) (annotation, pos) } var implicits = Map[Annotation, Position]() new Traverser { override def traverse(t: Tree): Unit = { t match { case v: ApplyImplicitView => val (annotation, pos) = mkImplicitConversionAnnotation(v) implicits += (annotation -> pos) case v: ApplyToImplicitArgs if !pluginStore.getBoolean(ImplicitsPreferencePage.PConversionsOnly) => val (annotation, pos) = mkImplicitArgumentAnnotation(v) implicits += (annotation -> pos) case _ => } super.traverse(t) } }.traverse(compiler.askLoadedTyped(sourceFile, keepLoaded = false).get.fold(identity _, _ => compiler.EmptyTree)) implicits } }
andrey-ilinykh/scala-ide
org.scala-ide.sdt.core/src/org/scalaide/ui/internal/editor/decorators/implicits/ImplicitHighlightingPresenter.scala
Scala
bsd-3-clause
4,303
package co.aa8y.datadude.config import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession case class DumperConfig( sparkConf: SparkConf = new SparkConf(), schema: String = "", table: String = "", outputBaseDir: String = "", outputFormat: String = "json", jdbc: JdbcConfig = JdbcConfig() ) { lazy val appName = s"dumper:${jdbc.database}:$schema:$table" lazy val outputDir = { Seq(outputBaseDir, jdbc.database, schema, table).mkString("/") } lazy val spark = SparkSession .builder .config(sparkConf) .appName(appName) .getOrCreate lazy val tableName = s"$schema.$table" } object DumperConfig { private val parser = new scopt.OptionParser[DumperConfig]("scopt") { head("scopt", "3.x") opt[String]('D', "dbType").action { (v, c) => c.copy(jdbc = c.jdbc.copy(dbType = v)) } opt[String]('h', "host").action { (v, c) => c.copy(jdbc = c.jdbc.copy(host = v)) } opt[String]('d', "database").action { (v, c) => c.copy(jdbc = c.jdbc.copy(database = v)) } opt[String]('P', "port").action { (v, c) => c.copy(jdbc = c.jdbc.copy(port = v.toInt)) } opt[String]('u', "username").action { (v, c) => c.copy(jdbc = c.jdbc.copy(username = v)) } opt[String]('p', "password").action { (v, c) => c.copy(jdbc = c.jdbc.copy(password = v)) } opt[String]('s', "schema").action { (v, c) => c.copy(schema = v) } opt[String]('t', "table").action { (v, c) => c.copy(table = v) } opt[String]('m', "master").action { (v, c) => c.copy(sparkConf = c.sparkConf.setMaster(v)) } opt[String]('o', "outputBaseDir").action { (v, c) => c.copy(outputBaseDir = v) } opt[String]('f', "outputFormat").action { (v, c) => c.copy(outputFormat = v) } } def parse(args: Array[String]): Option[DumperConfig] = parser.parse(args, this()) }
aa8y/data-dude
src/main/scala/co/aa8y/datadude/config/DumperConfig.scala
Scala
gpl-3.0
1,810
package controllers import com.mohiva.play.silhouette.api.Silhouette import model.actuator.{ActuatorRepository, Command} import model.location.LocationRepository import play.api.Logger import play.api.libs.json.Json import play.api.mvc.{Action, AnyContent, Controller} import security.utils.auth.DefaultEnv import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future /** * Controller for the Actuators */ class ActuatorController( locationRepository: LocationRepository, actuatorRepository: ActuatorRepository, silhouette: Silhouette[DefaultEnv]) extends Controller { def getAvailableActuators = Action.async { Future { Ok(Json.toJson( actuatorRepository.findAll() )) } } def execute(locationAddress: String, actuatorName: String): Action[AnyContent] = Action.async { implicit request => Future { Logger.info(s"Command received ${request.body}") val json = request.body.asJson.get val command = json.as[Command] actuatorRepository .find(locationRepository.findOrCreateLocation(locationAddress), actuatorName) .foreach(actuator => actuator.execute(command)) NoContent } } }
vavravl1/home_center
app/controllers/ActuatorController.scala
Scala
mit
1,280
package V1 import scala.language.implicitConversions /** * Host language: * Language in which the interpreter is implemented. * * Interpreted language: * The language that the interpreter evaluates. * * Metacircular-Interpreter * Host language == Interpreted Language * * Substitution: * To substitute identifier i in e with expression v, replace * all _free_ instances of i in e with v. */ sealed abstract class WAE case class Num(n: Int) extends WAE case class Add(lhs: WAE, rhs: WAE) extends WAE case class Sub(lhs: WAE, rhs: WAE) extends WAE case class Let(name: Symbol, namedExpr: WAE, body: WAE) extends WAE case class Id(name: Symbol) extends WAE object WAEInterp extends App { /** * This method will substitute/replace all free instances of * <code>substId</code> in <code>expr</code> with <code>value</code>. */ def subst(expr: WAE, substId: Symbol, value: WAE): WAE = expr match { case Num(n) => Num(n) //same as expr case Add(lhs, rhs) => Add(subst(lhs, substId, value), subst(rhs, substId, value)) case Sub(lhs, rhs) => Sub(subst(lhs, substId, value), subst(rhs, substId, value)) case Let(boundId, namedExpr, boundExpr) => val substNamedExpr = subst(namedExpr, substId, value) // Shadowing: Occurrence of inner Let expression // Let('x, 1, Let('x, 2, Add('x, 'x)) if(boundId == substId) { // There is another binding of substId, so stop replacing Let(boundId, substNamedExpr, boundExpr) } else { Let(boundId, substNamedExpr, subst(boundExpr, substId, value)) } case Id(name) => // In case, the substId matches with the found Id replace it with the value if(substId == name) value else Id(name) } /** * This substitution strategy substitutes an expression as soon as possible. * This also forces the evaluation of expressions that may not be necessary * at run time. */ def eagerCalc(expr: WAE): Int = expr match { case Num(n) => n case Add(lhs, rhs) => eagerCalc(lhs) + eagerCalc(rhs) case Sub(lhs, rhs) => eagerCalc(lhs) - eagerCalc(rhs) case Let(boundId, namedExpr, boundExpr) => eagerCalc(subst(boundExpr, boundId, Num(eagerCalc(namedExpr)))) case Id(name) => sys.error("Found unbound id " + name) } /** * This substitution strategy delays the substitution of an expression * until its value is needed. */ def lazyCalc(expr: WAE): Int = expr match { case Num(n) => n case Add(lhs, rhs) => eagerCalc(lhs) + eagerCalc(rhs) case Sub(lhs, rhs) => eagerCalc(lhs) - eagerCalc(rhs) case Let(boundId, namedExpr, boundExpr) => lazyCalc(subst(boundExpr, boundId, namedExpr)) case Id(name) => sys.error("Found unbound id " + name) } // Assertions on the interpreter implicit def symbolToWAE(symbol: Symbol) = Id(symbol) implicit def intToWAE(n: Int) = Num(n) assert(eagerCalc(Let('x, Add(5, 5), Add('x, 'x))) == 20) // Shadowing behaviour assert(eagerCalc(Let('x, 1, Let('x, 2, Add('x, 'x)))) == 4) try { eagerCalc(Let('x, Add(3, 'z), Let('y, 100, 'y))) assert(false) } catch { case e: Exception => assert(true) } assert(lazyCalc(Let('x, Add(5, 5), Add('x, 'x))) == 20) assert(eagerCalc(Let('x, 1, Let('x, 2, Add('x, 'x)))) == 4) assert(lazyCalc(Let('x, Add(3, 'z), Let('y, 100, 'y))) == 100) }
Tooa/interpreters
src/V1/WAEInterp.scala
Scala
apache-2.0
3,367
class A { class G trait B class K[T >: G with B] { def z(t: T) = 1 def z(s: String) = "text" /*start*/z(new G with B)/*end*/ } } //Int
LPTK/intellij-scala
testdata/typeInference/bugs5/CompoundTypeConformance.scala
Scala
apache-2.0
157
package lib import io.flow.delta.config.v0.models.{BuildConfig, BuildConfigUndefinedType, Cluster, Config, ConfigError, ConfigProject, ConfigUndefinedType, EcsBuildConfig, K8sBuildConfig} import io.flow.delta.config.v0.models.json._ import play.api.libs.json.Json import scala.util.{Failure, Success, Try} object ProjectConfigUtil { val Unknown = Cluster.UNDEFINED("unknown") def cluster(config: String, buildName: String): Option[Cluster] = { Try { Json.parse(config).asOpt[Config].flatMap { c => cluster(c, buildName) } } match { case Success(r) => r case Failure(_) => None } } def cluster(config: Config, buildName: String): Option[Cluster] = { config match { case _: ConfigUndefinedType => None case _: ConfigError => None case p: ConfigProject => { BuildConfigUtil.findBuildByName(p.builds, buildName).map { build => cluster(build) } } } } def cluster(config: BuildConfig): Cluster = { config match { case b: EcsBuildConfig => b.cluster.getOrElse(Cluster.Ecs) case b: K8sBuildConfig => b.cluster case BuildConfigUndefinedType(_) => Unknown } } }
flowcommerce/delta
api/app/lib/ProjectConfigUtil.scala
Scala
mit
1,201
import com.amazonaws.auth.profile.ProfileCredentialsProvider import com.amazonaws.auth.{AWSCredentialsProviderChain, DefaultAWSCredentialsProviderChain} import com.amazonaws.regions.Regions import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.gu.pandomainauth.PanDomainAuthSettingsRefresher import controllers.AdminController import play.api.ApplicationLoader.Context import play.api.libs.ws.ahc.AhcWSComponents import play.api.routing.Router import play.api.{Application, ApplicationLoader, BuiltInComponentsFromContext} import play.filters.HttpFiltersComponents import router.Routes class AppLoader extends ApplicationLoader { def load(context: Context): Application = { new AppComponents(context).application } } class AppComponents(context: Context) extends BuiltInComponentsFromContext(context) with AhcWSComponents with HttpFiltersComponents { // Change this to point to the S3 bucket containing the settings file val bucketName = "pan-domain-auth-settings" val region = Regions.EU_WEST_1 // Customise as appropriate depending on how you manage your AWS credentials val credentials = new AWSCredentialsProviderChain( new ProfileCredentialsProvider("workflow"), DefaultAWSCredentialsProviderChain.getInstance() ) val s3Client = AmazonS3ClientBuilder.standard().withRegion(region).withCredentials(credentials).build() val panDomainSettings = new PanDomainAuthSettingsRefresher( domain = "local.dev-gutools.co.uk", system = "example", bucketName = bucketName, settingsFileKey = "local.dev-gutools.co.uk.settings", s3Client = s3Client ) val controller = new AdminController(controllerComponents, configuration, wsClient, panDomainSettings) def router: Router = new Routes( httpErrorHandler, controller ) }
guardian/pan-domain-authentication
pan-domain-auth-example/app/di.scala
Scala
apache-2.0
1,802
package com.github.andr83.parsek.spark.sink import java.util.UUID import com.github.andr83.parsek._ import com.github.andr83.parsek.formatter.FieldFormatter import com.github.andr83.parsek.serde.{SerDe, Serializer, StringSerializer} import com.github.andr83.parsek.spark.util.RDDUtils.{DefaultPartitioner, FieldsPartitioner} import com.github.andr83.parsek.spark.util.{HadoopUtils, RDDUtils} import com.typesafe.config.Config import net.ceedubs.ficus.Ficus._ import org.apache.hadoop.io.compress.CompressionCodec import org.apache.hadoop.io.{NullWritable, Text} import org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat import org.apache.spark.rdd.RDD import scala.collection.JavaConversions._ /** * @author andr83 */ case class SequenceFileSink( path: String, codec: Option[Class[_ <: CompressionCodec]], serializer: () => Serializer, partitions: Seq[FieldFormatter] = Seq.empty[FieldFormatter], numPartitions: Option[Int] = None, fileNamePattern: Option[String] = None ) extends Sink { def this(config: Config) = this( path = config.getString("path"), codec = config.as[Option[String]]("codec") map HadoopUtils.getCodec, serializer = config.as[Option[Config]]("serializer") .map(serializerConf => () => SerDe(serializerConf)) .getOrElse(StringSerializer.factory), partitions = if (config.hasPath("partitions")) FieldFormatter(config.getList("partitions")) else Seq.empty[FieldFormatter], numPartitions = config.as[Option[Int]]("numPartitions"), fileNamePattern = config.as[Option[String]]("fileNamePattern") ) override def sink(rdd: RDD[PValue], time: Long): Unit = { try { if (partitions.nonEmpty || fileNamePattern.nonEmpty) { val pattern = fileNamePattern map (pattern => { pattern .replaceAllLiterally("${randomUUID}", UUID.randomUUID().toString) .replaceAllLiterally("${timeInMs}", time.toString) }) val partitioner = if (partitions.isEmpty) DefaultPartitioner(pattern) else FieldsPartitioner(partitions, pattern) RDDUtils .serializeAndPartitionBy(rdd, serializer, partitioner, numPartitions) .mapValues(new Text(_)) .saveAsHadoopFile( path, classOf[NullWritable], classOf[Text], classOf[SequenceFileSink.RDDMultipleSequenceOutputFormat], codec = codec ) } else { RDDUtils .serialize(rdd, serializer) .map(v=> NullWritable.get() -> v) .saveAsSequenceFile(path, codec) } } catch { case e: Exception => logger.error(e.toString, e) } } } object SequenceFileSink { class RDDMultipleSequenceOutputFormat extends MultipleSequenceFileOutputFormat[Any, Any] { override def generateActualKey(key: Any, value: Any): Any = NullWritable.get() override def generateFileNameForKeyValue(key: Any, value: Any, name: String): String = { key.asInstanceOf[String] } } }
andr83/parsek
spark/src/main/scala/com/github/andr83/parsek/spark/sink/SequenceFileSink.scala
Scala
mit
3,037
// scalastyle:off /* NSC -- new Scala compiler * Copyright 2005-2013 LAMP/EPFL * @author Alexander Spoon */ package org.apache.spark.repl import java.net.URL import scala.reflect.io.AbstractFile import scala.tools.nsc._ import scala.tools.nsc.backend.JavaPlatform import scala.tools.nsc.interpreter._ import scala.tools.nsc.interpreter.{Results => IR} import Predef.{println => _, _} import java.io.{BufferedReader, FileReader} import java.net.URI import java.util.concurrent.locks.ReentrantLock import scala.sys.process.Process import scala.tools.nsc.interpreter.session._ import scala.util.Properties.{jdkHome, javaVersion} import scala.tools.util.{Javap} import scala.annotation.tailrec import scala.collection.mutable.ListBuffer import scala.concurrent.ops import scala.tools.nsc.util._ import scala.tools.nsc.interpreter._ import scala.tools.nsc.io.{File, Directory} import scala.reflect.NameTransformer._ import scala.tools.nsc.util.ScalaClassLoader._ import scala.tools.util._ import scala.language.{implicitConversions, existentials, postfixOps} import scala.reflect.{ClassTag, classTag} import scala.tools.reflect.StdRuntimeTags._ import java.lang.{Class => jClass} import scala.reflect.api.{Mirror, TypeCreator, Universe => ApiUniverse} import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.spark.annotation.DeveloperApi import org.apache.spark.internal.Logging import org.apache.spark.sql.SparkSession import org.apache.spark.util.Utils /** The Scala interactive shell. It provides a read-eval-print loop * around the Interpreter class. * After instantiation, clients should call the main() method. * * If no in0 is specified, then input will come from the console, and * the class will attempt to provide input editing feature such as * input history. * * @author Moez A. Abdel-Gawad * @author Lex Spoon * @version 1.2 */ @DeveloperApi class SparkILoop( private val in0: Option[BufferedReader], protected val out: JPrintWriter, val master: Option[String] ) extends AnyRef with LoopCommands with SparkILoopInit with Logging { def this(in0: BufferedReader, out: JPrintWriter, master: String) = this(Some(in0), out, Some(master)) def this(in0: BufferedReader, out: JPrintWriter) = this(Some(in0), out, None) def this() = this(None, new JPrintWriter(Console.out, true), None) private var in: InteractiveReader = _ // the input stream from which commands come // NOTE: Exposed in package for testing private[repl] var settings: Settings = _ private[repl] var intp: SparkIMain = _ @deprecated("Use `intp` instead.", "2.9.0") def interpreter = intp @deprecated("Use `intp` instead.", "2.9.0") def interpreter_= (i: SparkIMain): Unit = intp = i /** Having inherited the difficult "var-ness" of the repl instance, * I'm trying to work around it by moving operations into a class from * which it will appear a stable prefix. */ private def onIntp[T](f: SparkIMain => T): T = f(intp) class IMainOps[T <: SparkIMain](val intp: T) { import intp._ import global._ def printAfterTyper(msg: => String) = intp.reporter printMessage afterTyper(msg) /** Strip NullaryMethodType artifacts. */ private def replInfo(sym: Symbol) = { sym.info match { case NullaryMethodType(restpe) if sym.isAccessor => restpe case info => info } } def echoTypeStructure(sym: Symbol) = printAfterTyper("" + deconstruct.show(replInfo(sym))) def echoTypeSignature(sym: Symbol, verbose: Boolean) = { if (verbose) SparkILoop.this.echo("// Type signature") printAfterTyper("" + replInfo(sym)) if (verbose) { SparkILoop.this.echo("\\n// Internal Type structure") echoTypeStructure(sym) } } } implicit def stabilizeIMain(intp: SparkIMain) = new IMainOps[intp.type](intp) /** TODO - * -n normalize * -l label with case class parameter names * -c complete - leave nothing out */ private def typeCommandInternal(expr: String, verbose: Boolean): Result = { onIntp { intp => val sym = intp.symbolOfLine(expr) if (sym.exists) intp.echoTypeSignature(sym, verbose) else "" } } // NOTE: Must be public for visibility @DeveloperApi var sparkContext: SparkContext = _ override def echoCommandMessage(msg: String) { intp.reporter printMessage msg } // def isAsync = !settings.Yreplsync.value private[repl] def isAsync = false // lazy val power = new Power(intp, new StdReplVals(this))(tagOfStdReplVals, classTag[StdReplVals]) private def history = in.history /** The context class loader at the time this object was created */ protected val originalClassLoader = Utils.getContextOrSparkClassLoader // classpath entries added via :cp private var addedClasspath: String = "" /** A reverse list of commands to replay if the user requests a :replay */ private var replayCommandStack: List[String] = Nil /** A list of commands to replay if the user requests a :replay */ private def replayCommands = replayCommandStack.reverse /** Record a command for replay should the user request a :replay */ private def addReplay(cmd: String) = replayCommandStack ::= cmd private def savingReplayStack[T](body: => T): T = { val saved = replayCommandStack try body finally replayCommandStack = saved } private def savingReader[T](body: => T): T = { val saved = in try body finally in = saved } private def sparkCleanUp() { echo("Stopping spark context.") intp.beQuietDuring { command("sc.stop()") } } /** Close the interpreter and set the var to null. */ private def closeInterpreter() { if (intp ne null) { sparkCleanUp() intp.close() intp = null } } class SparkILoopInterpreter extends SparkIMain(settings, out) { outer => override private[repl] lazy val formatting = new Formatting { def prompt = SparkILoop.this.prompt } override protected def parentClassLoader = SparkHelper.explicitParentLoader(settings).getOrElse(classOf[SparkILoop].getClassLoader) } /** * Constructs a new interpreter. */ protected def createInterpreter() { require(settings != null) if (addedClasspath != "") settings.classpath.append(addedClasspath) val addedJars = if (Utils.isWindows) { // Strip any URI scheme prefix so we can add the correct path to the classpath // e.g. file:/C:/my/path.jar -> C:/my/path.jar getAddedJars().map { jar => new URI(jar).getPath.stripPrefix("/") } } else { // We need new URI(jar).getPath here for the case that `jar` includes encoded white space (%20). getAddedJars().map { jar => new URI(jar).getPath } } // work around for Scala bug val totalClassPath = addedJars.foldLeft( settings.classpath.value)((l, r) => ClassPath.join(l, r)) this.settings.classpath.value = totalClassPath intp = new SparkILoopInterpreter } /** print a friendly help message */ private def helpCommand(line: String): Result = { if (line == "") helpSummary() else uniqueCommand(line) match { case Some(lc) => echo("\\n" + lc.longHelp) case _ => ambiguousError(line) } } private def helpSummary() = { val usageWidth = commands map (_.usageMsg.length) max val formatStr = "%-" + usageWidth + "s %s %s" echo("All commands can be abbreviated, e.g. :he instead of :help.") echo("Those marked with a * have more detailed help, e.g. :help imports.\\n") commands foreach { cmd => val star = if (cmd.hasLongHelp) "*" else " " echo(formatStr.format(cmd.usageMsg, star, cmd.help)) } } private def ambiguousError(cmd: String): Result = { matchingCommands(cmd) match { case Nil => echo(cmd + ": no such command. Type :help for help.") case xs => echo(cmd + " is ambiguous: did you mean " + xs.map(":" + _.name).mkString(" or ") + "?") } Result(true, None) } private def matchingCommands(cmd: String) = commands filter (_.name startsWith cmd) private def uniqueCommand(cmd: String): Option[LoopCommand] = { // this lets us add commands willy-nilly and only requires enough command to disambiguate matchingCommands(cmd) match { case List(x) => Some(x) // exact match OK even if otherwise appears ambiguous case xs => xs find (_.name == cmd) } } private var fallbackMode = false private def toggleFallbackMode() { val old = fallbackMode fallbackMode = !old System.setProperty("spark.repl.fallback", fallbackMode.toString) echo(s""" |Switched ${if (old) "off" else "on"} fallback mode without restarting. | If you have defined classes in the repl, it would |be good to redefine them incase you plan to use them. If you still run |into issues it would be good to restart the repl and turn on `:fallback` |mode as first command. """.stripMargin) } /** Show the history */ private lazy val historyCommand = new LoopCommand("history", "show the history (optional num is commands to show)") { override def usage = "[num]" def defaultLines = 20 def apply(line: String): Result = { if (history eq NoHistory) return "No history available." val xs = words(line) val current = history.index val count = try xs.head.toInt catch { case _: Exception => defaultLines } val lines = history.asStrings takeRight count val offset = current - lines.size + 1 for ((line, index) <- lines.zipWithIndex) echo("%3d %s".format(index + offset, line)) } } // When you know you are most likely breaking into the middle // of a line being typed. This softens the blow. private[repl] def echoAndRefresh(msg: String) = { echo("\\n" + msg) in.redrawLine() } private[repl] def echo(msg: String) = { out println msg out.flush() } private def echoNoNL(msg: String) = { out print msg out.flush() } /** Search the history */ private def searchHistory(_cmdline: String) { val cmdline = _cmdline.toLowerCase val offset = history.index - history.size + 1 for ((line, index) <- history.asStrings.zipWithIndex ; if line.toLowerCase contains cmdline) echo("%d %s".format(index + offset, line)) } private var currentPrompt = Properties.shellPromptString /** * Sets the prompt string used by the REPL. * * @param prompt The new prompt string */ @DeveloperApi def setPrompt(prompt: String) = currentPrompt = prompt /** * Represents the current prompt string used by the REPL. * * @return The current prompt string */ @DeveloperApi def prompt = currentPrompt import LoopCommand.{ cmd, nullary } /** Standard commands */ private lazy val standardCommands = List( cmd("cp", "<path>", "add a jar or directory to the classpath", addClasspath), cmd("help", "[command]", "print this summary or command-specific help", helpCommand), historyCommand, cmd("h?", "<string>", "search the history", searchHistory), cmd("imports", "[name name ...]", "show import history, identifying sources of names", importsCommand), cmd("implicits", "[-v]", "show the implicits in scope", implicitsCommand), cmd("javap", "<path|class>", "disassemble a file or class name", javapCommand), cmd("load", "<path>", "load and interpret a Scala file", loadCommand), nullary("paste", "enter paste mode: all input up to ctrl-D compiled together", pasteCommand), // nullary("power", "enable power user mode", powerCmd), nullary("quit", "exit the repl", () => Result(false, None)), nullary("replay", "reset execution and replay all previous commands", replay), nullary("reset", "reset the repl to its initial state, forgetting all session entries", resetCommand), shCommand, nullary("silent", "disable/enable automatic printing of results", verbosity), nullary("fallback", """ |disable/enable advanced repl changes, these fix some issues but may introduce others. |This mode will be removed once these fixes stablize""".stripMargin, toggleFallbackMode), cmd("type", "[-v] <expr>", "display the type of an expression without evaluating it", typeCommand), nullary("warnings", "show the suppressed warnings from the most recent line which had any", warningsCommand) ) /** Power user commands */ private lazy val powerCommands: List[LoopCommand] = List( // cmd("phase", "<phase>", "set the implicit phase for power commands", phaseCommand) ) // private def dumpCommand(): Result = { // echo("" + power) // history.asStrings takeRight 30 foreach echo // in.redrawLine() // } // private def valsCommand(): Result = power.valsDescription private val typeTransforms = List( "scala.collection.immutable." -> "immutable.", "scala.collection.mutable." -> "mutable.", "scala.collection.generic." -> "generic.", "java.lang." -> "jl.", "scala.runtime." -> "runtime." ) private def importsCommand(line: String): Result = { val tokens = words(line) val handlers = intp.languageWildcardHandlers ++ intp.importHandlers val isVerbose = tokens contains "-v" handlers.filterNot(_.importedSymbols.isEmpty).zipWithIndex foreach { case (handler, idx) => val (types, terms) = handler.importedSymbols partition (_.name.isTypeName) val imps = handler.implicitSymbols val found = tokens filter (handler importsSymbolNamed _) val typeMsg = if (types.isEmpty) "" else types.size + " types" val termMsg = if (terms.isEmpty) "" else terms.size + " terms" val implicitMsg = if (imps.isEmpty) "" else imps.size + " are implicit" val foundMsg = if (found.isEmpty) "" else found.mkString(" // imports: ", ", ", "") val statsMsg = List(typeMsg, termMsg, implicitMsg) filterNot (_ == "") mkString ("(", ", ", ")") intp.reporter.printMessage("%2d) %-30s %s%s".format( idx + 1, handler.importString, statsMsg, foundMsg )) } } private def implicitsCommand(line: String): Result = onIntp { intp => import intp._ import global._ def p(x: Any) = intp.reporter.printMessage("" + x) // If an argument is given, only show a source with that // in its name somewhere. val args = line split "\\\\s+" val filtered = intp.implicitSymbolsBySource filter { case (source, syms) => (args contains "-v") || { if (line == "") (source.fullName.toString != "scala.Predef") else (args exists (source.name.toString contains _)) } } if (filtered.isEmpty) return "No implicits have been imported other than those in Predef." filtered foreach { case (source, syms) => p("/* " + syms.size + " implicit members imported from " + source.fullName + " */") // This groups the members by where the symbol is defined val byOwner = syms groupBy (_.owner) val sortedOwners = byOwner.toList sortBy { case (owner, _) => afterTyper(source.info.baseClasses indexOf owner) } sortedOwners foreach { case (owner, members) => // Within each owner, we cluster results based on the final result type // if there are more than a couple, and sort each cluster based on name. // This is really just trying to make the 100 or so implicits imported // by default into something readable. val memberGroups: List[List[Symbol]] = { val groups = members groupBy (_.tpe.finalResultType) toList val (big, small) = groups partition (_._2.size > 3) val xss = ( (big sortBy (_._1.toString) map (_._2)) :+ (small flatMap (_._2)) ) xss map (xs => xs sortBy (_.name.toString)) } val ownerMessage = if (owner == source) " defined in " else " inherited from " p(" /* " + members.size + ownerMessage + owner.fullName + " */") memberGroups foreach { group => group foreach (s => p(" " + intp.symbolDefString(s))) p("") } } p("") } } private def findToolsJar() = { val jdkPath = Directory(jdkHome) val jar = jdkPath / "lib" / "tools.jar" toFile; if (jar isFile) Some(jar) else if (jdkPath.isDirectory) jdkPath.deepFiles find (_.name == "tools.jar") else None } private def addToolsJarToLoader() = { val cl = findToolsJar match { case Some(tools) => ScalaClassLoader.fromURLs(Seq(tools.toURL), intp.classLoader) case _ => intp.classLoader } if (Javap.isAvailable(cl)) { logDebug(":javap available.") cl } else { logDebug(":javap unavailable: no tools.jar at " + jdkHome) intp.classLoader } } private def newJavap() = new JavapClass(addToolsJarToLoader(), new SparkIMain.ReplStrippingWriter(intp)) { override def tryClass(path: String): Array[Byte] = { val hd :: rest = path split '.' toList; // If there are dots in the name, the first segment is the // key to finding it. if (rest.nonEmpty) { intp optFlatName hd match { case Some(flat) => val clazz = flat :: rest mkString NAME_JOIN_STRING val bytes = super.tryClass(clazz) if (bytes.nonEmpty) bytes else super.tryClass(clazz + MODULE_SUFFIX_STRING) case _ => super.tryClass(path) } } else { // Look for Foo first, then Foo$, but if Foo$ is given explicitly, // we have to drop the $ to find object Foo, then tack it back onto // the end of the flattened name. def className = intp flatName path def moduleName = (intp flatName path.stripSuffix(MODULE_SUFFIX_STRING)) + MODULE_SUFFIX_STRING val bytes = super.tryClass(className) if (bytes.nonEmpty) bytes else super.tryClass(moduleName) } } } // private lazy val javap = substituteAndLog[Javap]("javap", NoJavap)(newJavap()) private lazy val javap = try newJavap() catch { case _: Exception => null } // Still todo: modules. private def typeCommand(line0: String): Result = { line0.trim match { case "" => ":type [-v] <expression>" case s if s startsWith "-v " => typeCommandInternal(s stripPrefix "-v " trim, true) case s => typeCommandInternal(s, false) } } private def warningsCommand(): Result = { if (intp.lastWarnings.isEmpty) "Can't find any cached warnings." else intp.lastWarnings foreach { case (pos, msg) => intp.reporter.warning(pos, msg) } } private def javapCommand(line: String): Result = { if (javap == null) ":javap unavailable, no tools.jar at %s. Set JDK_HOME.".format(jdkHome) else if (javaVersion startsWith "1.7") ":javap not yet working with java 1.7" else if (line == "") ":javap [-lcsvp] [path1 path2 ...]" else javap(words(line)) foreach { res => if (res.isError) return "Failed: " + res.value else res.show() } } private def wrapCommand(line: String): Result = { def failMsg = "Argument to :wrap must be the name of a method with signature [T](=> T): T" onIntp { intp => import intp._ import global._ words(line) match { case Nil => intp.executionWrapper match { case "" => "No execution wrapper is set." case s => "Current execution wrapper: " + s } case "clear" :: Nil => intp.executionWrapper match { case "" => "No execution wrapper is set." case s => intp.clearExecutionWrapper() ; "Cleared execution wrapper." } case wrapper :: Nil => intp.typeOfExpression(wrapper) match { case PolyType(List(targ), MethodType(List(arg), restpe)) => intp setExecutionWrapper intp.pathToTerm(wrapper) "Set wrapper to '" + wrapper + "'" case tp => failMsg + "\\nFound: <unknown>" } case _ => failMsg } } } private def pathToPhaseWrapper = intp.pathToTerm("$r") + ".phased.atCurrent" // private def phaseCommand(name: String): Result = { // val phased: Phased = power.phased // import phased.NoPhaseName // if (name == "clear") { // phased.set(NoPhaseName) // intp.clearExecutionWrapper() // "Cleared active phase." // } // else if (name == "") phased.get match { // case NoPhaseName => "Usage: :phase <expr> (e.g. typer, erasure.next, erasure+3)" // case ph => "Active phase is '%s'. (To clear, :phase clear)".format(phased.get) // } // else { // val what = phased.parse(name) // if (what.isEmpty || !phased.set(what)) // "'" + name + "' does not appear to represent a valid phase." // else { // intp.setExecutionWrapper(pathToPhaseWrapper) // val activeMessage = // if (what.toString.length == name.length) "" + what // else "%s (%s)".format(what, name) // "Active phase is now: " + activeMessage // } // } // } /** * Provides a list of available commands. * * @return The list of commands */ @DeveloperApi def commands: List[LoopCommand] = standardCommands /*++ ( if (isReplPower) powerCommands else Nil )*/ private val replayQuestionMessage = """|That entry seems to have slain the compiler. Shall I replay |your session? I can re-run each line except the last one. |[y/n] """.trim.stripMargin private def crashRecovery(ex: Throwable): Boolean = { echo(ex.toString) ex match { case _: NoSuchMethodError | _: NoClassDefFoundError => echo("\\nUnrecoverable error.") throw ex case _ => def fn(): Boolean = try in.readYesOrNo(replayQuestionMessage, { echo("\\nYou must enter y or n.") ; fn() }) catch { case _: RuntimeException => false } if (fn()) replay() else echo("\\nAbandoning crashed session.") } true } /** The main read-eval-print loop for the repl. It calls * command() for each line of input, and stops when * command() returns false. */ private def loop() { def readOneLine() = { out.flush() in readLine prompt } // return false if repl should exit def processLine(line: String): Boolean = { if (isAsync) { if (!awaitInitialized()) return false runThunks() } if (line eq null) false // assume null means EOF else command(line) match { case Result(false, _) => false case Result(_, Some(finalLine)) => addReplay(finalLine) ; true case _ => true } } def innerLoop() { val shouldContinue = try { processLine(readOneLine()) } catch {case t: Throwable => crashRecovery(t)} if (shouldContinue) innerLoop() } innerLoop() } /** interpret all lines from a specified file */ private def interpretAllFrom(file: File) { savingReader { savingReplayStack { file applyReader { reader => in = SimpleReader(reader, out, false) echo("Loading " + file + "...") loop() } } } } /** create a new interpreter and replay the given commands */ private def replay() { reset() if (replayCommandStack.isEmpty) echo("Nothing to replay.") else for (cmd <- replayCommands) { echo("Replaying: " + cmd) // flush because maybe cmd will have its own output command(cmd) echo("") } } private def resetCommand() { echo("Resetting repl state.") if (replayCommandStack.nonEmpty) { echo("Forgetting this session history:\\n") replayCommands foreach echo echo("") replayCommandStack = Nil } if (intp.namedDefinedTerms.nonEmpty) echo("Forgetting all expression results and named terms: " + intp.namedDefinedTerms.mkString(", ")) if (intp.definedTypes.nonEmpty) echo("Forgetting defined types: " + intp.definedTypes.mkString(", ")) reset() } private def reset() { intp.reset() // unleashAndSetPhase() } /** fork a shell and run a command */ private lazy val shCommand = new LoopCommand("sh", "run a shell command (result is implicitly => List[String])") { override def usage = "<command line>" def apply(line: String): Result = line match { case "" => showUsage() case _ => val toRun = classOf[ProcessResult].getName + "(" + string2codeQuoted(line) + ")" intp interpret toRun () } } private def withFile(filename: String)(action: File => Unit) { val f = File(filename) if (f.exists) action(f) else echo("That file does not exist") } private def loadCommand(arg: String) = { var shouldReplay: Option[String] = None withFile(arg)(f => { interpretAllFrom(f) shouldReplay = Some(":load " + arg) }) Result(true, shouldReplay) } private def addAllClasspath(args: Seq[String]): Unit = { var added = false var totalClasspath = "" for (arg <- args) { val f = File(arg).normalize if (f.exists) { added = true addedClasspath = ClassPath.join(addedClasspath, f.path) totalClasspath = ClassPath.join(settings.classpath.value, addedClasspath) intp.addUrlsToClassPath(f.toURI.toURL) sparkContext.addJar(f.toURI.toURL.getPath) } } } private def addClasspath(arg: String): Unit = { val f = File(arg).normalize if (f.exists) { addedClasspath = ClassPath.join(addedClasspath, f.path) intp.addUrlsToClassPath(f.toURI.toURL) sparkContext.addJar(f.toURI.toURL.getPath) echo("Added '%s'. Your new classpath is:\\n\\"%s\\"".format(f.path, intp.global.classPath.asClasspathString)) } else echo("The path '" + f + "' doesn't seem to exist.") } private def powerCmd(): Result = { if (isReplPower) "Already in power mode." else enablePowerMode(false) } private[repl] def enablePowerMode(isDuringInit: Boolean) = { // replProps.power setValue true // unleashAndSetPhase() // asyncEcho(isDuringInit, power.banner) } // private def unleashAndSetPhase() { // if (isReplPower) { // // power.unleash() // // Set the phase to "typer" // intp beSilentDuring phaseCommand("typer") // } // } private def asyncEcho(async: Boolean, msg: => String) { if (async) asyncMessage(msg) else echo(msg) } private def verbosity() = { // val old = intp.printResults // intp.printResults = !old // echo("Switched " + (if (old) "off" else "on") + " result printing.") } /** * Run one command submitted by the user. Two values are returned: * (1) whether to keep running, (2) the line to record for replay, * if any. */ private[repl] def command(line: String): Result = { if (line startsWith ":") { val cmd = line.tail takeWhile (x => !x.isWhitespace) uniqueCommand(cmd) match { case Some(lc) => lc(line.tail stripPrefix cmd dropWhile (_.isWhitespace)) case _ => ambiguousError(cmd) } } else if (intp.global == null) Result(false, None) // Notice failure to create compiler else Result(true, interpretStartingWith(line)) } private def readWhile(cond: String => Boolean) = { Iterator continually in.readLine("") takeWhile (x => x != null && cond(x)) } private def pasteCommand(): Result = { echo("// Entering paste mode (ctrl-D to finish)\\n") val code = readWhile(_ => true) mkString "\\n" echo("\\n// Exiting paste mode, now interpreting.\\n") intp interpret code () } private object paste extends Pasted { val ContinueString = " | " val PromptString = "scala> " def interpret(line: String): Unit = { echo(line.trim) intp interpret line echo("") } def transcript(start: String) = { echo("\\n// Detected repl transcript paste: ctrl-D to finish.\\n") apply(Iterator(start) ++ readWhile(_.trim != PromptString.trim)) } } import paste.{ ContinueString, PromptString } /** * Interpret expressions starting with the first line. * Read lines until a complete compilation unit is available * or until a syntax error has been seen. If a full unit is * read, go ahead and interpret it. Return the full string * to be recorded for replay, if any. */ private def interpretStartingWith(code: String): Option[String] = { // signal completion non-completion input has been received in.completion.resetVerbosity() def reallyInterpret = { val reallyResult = intp.interpret(code) (reallyResult, reallyResult match { case IR.Error => None case IR.Success => Some(code) case IR.Incomplete => if (in.interactive && code.endsWith("\\n\\n")) { echo("You typed two blank lines. Starting a new command.") None } else in.readLine(ContinueString) match { case null => // we know compilation is going to fail since we're at EOF and the // parser thinks the input is still incomplete, but since this is // a file being read non-interactively we want to fail. So we send // it straight to the compiler for the nice error message. intp.compileString(code) None case line => interpretStartingWith(code + "\\n" + line) } }) } /** Here we place ourselves between the user and the interpreter and examine * the input they are ostensibly submitting. We intervene in several cases: * * 1) If the line starts with "scala> " it is assumed to be an interpreter paste. * 2) If the line starts with "." (but not ".." or "./") it is treated as an invocation * on the previous result. * 3) If the Completion object's execute returns Some(_), we inject that value * and avoid the interpreter, as it's likely not valid scala code. */ if (code == "") None else if (!paste.running && code.trim.startsWith(PromptString)) { paste.transcript(code) None } else if (Completion.looksLikeInvocation(code) && intp.mostRecentVar != "") { interpretStartingWith(intp.mostRecentVar + code) } else if (code.trim startsWith "//") { // line comment, do nothing None } else reallyInterpret._2 } // runs :load `file` on any files passed via -i private def loadFiles(settings: Settings) = settings match { case settings: SparkRunnerSettings => for (filename <- settings.loadfiles.value) { val cmd = ":load " + filename command(cmd) addReplay(cmd) echo("") } case _ => } /** Tries to create a JLineReader, falling back to SimpleReader: * unless settings or properties are such that it should start * with SimpleReader. */ private def chooseReader(settings: Settings): InteractiveReader = { if (settings.Xnojline.value || Properties.isEmacsShell) SimpleReader() else try new SparkJLineReader( if (settings.noCompletion.value) NoCompletion else new SparkJLineCompletion(intp) ) catch { case ex @ (_: Exception | _: NoClassDefFoundError) => echo("Failed to created SparkJLineReader: " + ex + "\\nFalling back to SimpleReader.") SimpleReader() } } private val u: scala.reflect.runtime.universe.type = scala.reflect.runtime.universe private val m = u.runtimeMirror(Utils.getSparkClassLoader) private def tagOfStaticClass[T: ClassTag]: u.TypeTag[T] = u.TypeTag[T]( m, new TypeCreator { def apply[U <: ApiUniverse with Singleton](m: Mirror[U]): U # Type = m.staticClass(classTag[T].runtimeClass.getName).toTypeConstructor.asInstanceOf[U # Type] }) private def process(settings: Settings): Boolean = savingContextLoader { this.settings = settings createInterpreter() // sets in to some kind of reader depending on environmental cues in = in0 match { case Some(reader) => SimpleReader(reader, out, true) case None => // some post-initialization chooseReader(settings) match { case x: SparkJLineReader => addThunk(x.consoleReader.postInit) ; x case x => x } } lazy val tagOfSparkIMain = tagOfStaticClass[org.apache.spark.repl.SparkIMain] // Bind intp somewhere out of the regular namespace where // we can get at it in generated code. addThunk(intp.quietBind(NamedParam[SparkIMain]("$intp", intp)(tagOfSparkIMain, classTag[SparkIMain]))) addThunk({ import scala.tools.nsc.io._ import Properties.userHome import scala.compat.Platform.EOL val autorun = replProps.replAutorunCode.option flatMap (f => io.File(f).safeSlurp()) if (autorun.isDefined) intp.quietRun(autorun.get) }) addThunk(printWelcome()) addThunk(initializeSpark()) // it is broken on startup; go ahead and exit if (intp.reporter.hasErrors) return false // This is about the illusion of snappiness. We call initialize() // which spins off a separate thread, then print the prompt and try // our best to look ready. The interlocking lazy vals tend to // inter-deadlock, so we break the cycle with a single asynchronous // message to an rpcEndpoint. if (isAsync) { intp initialize initializedCallback() createAsyncListener() // listens for signal to run postInitialization } else { intp.initializeSynchronous() postInitialization() } // printWelcome() loadFiles(settings) try loop() catch AbstractOrMissingHandler() finally closeInterpreter() true } // NOTE: Must be public for visibility @DeveloperApi def createSparkSession(): SparkSession = { val execUri = System.getenv("SPARK_EXECUTOR_URI") val jars = getAddedJars() val conf = new SparkConf() .setMaster(getMaster()) .setJars(jars) .setIfMissing("spark.app.name", "Spark shell") // SparkContext will detect this configuration and register it with the RpcEnv's // file server, setting spark.repl.class.uri to the actual URI for executors to // use. This is sort of ugly but since executors are started as part of SparkContext // initialization in certain cases, there's an initialization order issue that prevents // this from being set after SparkContext is instantiated. .set("spark.repl.class.outputDir", intp.outputDir.getAbsolutePath()) if (execUri != null) { conf.set("spark.executor.uri", execUri) } val builder = SparkSession.builder.config(conf) val sparkSession = if (SparkSession.hiveClassesArePresent) { logInfo("Creating Spark session with Hive support") builder.enableHiveSupport().getOrCreate() } else { logInfo("Creating Spark session") builder.getOrCreate() } sparkContext = sparkSession.sparkContext sparkSession } private def getMaster(): String = { val master = this.master match { case Some(m) => m case None => val envMaster = sys.env.get("MASTER") val propMaster = sys.props.get("spark.master") propMaster.orElse(envMaster).getOrElse("local[*]") } master } /** process command-line arguments and do as they request */ def process(args: Array[String]): Boolean = { val command = new SparkCommandLine(args.toList, msg => echo(msg)) def neededHelp(): String = (if (command.settings.help.value) command.usageMsg + "\\n" else "") + (if (command.settings.Xhelp.value) command.xusageMsg + "\\n" else "") // if they asked for no help and command is valid, we call the real main neededHelp() match { case "" => command.ok && process(command.settings) case help => echoNoNL(help) ; true } } @deprecated("Use `process` instead", "2.9.0") private def main(settings: Settings): Unit = process(settings) @DeveloperApi def getAddedJars(): Array[String] = { val conf = new SparkConf().setMaster(getMaster()) val envJars = sys.env.get("ADD_JARS") if (envJars.isDefined) { logWarning("ADD_JARS environment variable is deprecated, use --jar spark submit argument instead") } val jars = { val userJars = Utils.getUserJars(conf, isShell = true) if (userJars.isEmpty) { envJars.getOrElse("") } else { userJars.mkString(",") } } Utils.resolveURIs(jars).split(",").filter(_.nonEmpty) } } object SparkILoop extends Logging { implicit def loopToInterpreter(repl: SparkILoop): SparkIMain = repl.intp private def echo(msg: String) = Console println msg // Designed primarily for use by test code: take a String with a // bunch of code, and prints out a transcript of what it would look // like if you'd just typed it into the repl. private[repl] def runForTranscript(code: String, settings: Settings): String = { import java.io.{ BufferedReader, StringReader, OutputStreamWriter } stringFromStream { ostream => Console.withOut(ostream) { val output = new JPrintWriter(new OutputStreamWriter(ostream), true) { override def write(str: String) = { // completely skip continuation lines if (str forall (ch => ch.isWhitespace || ch == '|')) () // print a newline on empty scala prompts else if ((str contains '\\n') && (str.trim == "scala> ")) super.write("\\n") else super.write(str) } } val input = new BufferedReader(new StringReader(code)) { override def readLine(): String = { val s = super.readLine() // helping out by printing the line being interpreted. if (s != null) // scalastyle:off println output.println(s) // scalastyle:on println s } } val repl = new SparkILoop(input, output) if (settings.classpath.isDefault) settings.classpath.value = sys.props("java.class.path") repl.getAddedJars().map(jar => new URI(jar).getPath).foreach(settings.classpath.append(_)) repl process settings } } } /** Creates an interpreter loop with default settings and feeds * the given code to it as input. */ private[repl] def run(code: String, sets: Settings = new Settings): String = { import java.io.{ BufferedReader, StringReader, OutputStreamWriter } stringFromStream { ostream => Console.withOut(ostream) { val input = new BufferedReader(new StringReader(code)) val output = new JPrintWriter(new OutputStreamWriter(ostream), true) val repl = new ILoop(input, output) if (sets.classpath.isDefault) sets.classpath.value = sys.props("java.class.path") repl process sets } } } private[repl] def run(lines: List[String]): String = run(lines map (_ + "\\n") mkString) }
aokolnychyi/spark
repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala
Scala
apache-2.0
39,818
package org.elasticmq.actor.test import akka.actor.{ActorRef, ActorSystem, Props} import org.elasticmq.StrictSQSLimits import org.elasticmq.actor.QueueManagerActor import org.elasticmq.util.MutableNowProvider import org.scalatest.{BeforeAndAfterEach, Suite} trait QueueManagerForEachTest extends BeforeAndAfterEach { this: Suite => val system: ActorSystem var queueManagerActor: ActorRef = _ var nowProvider: MutableNowProvider = _ override protected def beforeEach(): Unit = { super.beforeEach() nowProvider = new MutableNowProvider queueManagerActor = system.actorOf(Props(new QueueManagerActor(nowProvider, StrictSQSLimits, None))) } override protected def afterEach(): Unit = { system.stop(queueManagerActor) super.afterEach() } }
adamw/elasticmq
core/src/test/scala/org/elasticmq/actor/test/QueueManagerForEachTest.scala
Scala
apache-2.0
777
/* sbt -- Simple Build Tool * Copyright 2011 Mark Harrah */ package sbt import sbt.util.Logger import java.io.File import sbt.librarymanagement.Resolver import sbt.internal.librarymanagement.{ InlineIvyConfiguration, IvyPaths } object ConsoleProject { def apply(state: State, extra: String, cleanupCommands: String = "", options: Seq[String] = Nil)(implicit log: Logger): Unit = { val extracted = Project extract state val cpImports = new Imports(extracted, state) val bindings = ("currentState" -> state) :: ("extracted" -> extracted) :: ("cpHelpers" -> cpImports) :: Nil val unit = extracted.currentUnit val localOnly = false val lock = None val checksums = Nil val ivyPaths = new IvyPaths(unit.unit.localBase, bootIvyHome(state.configuration)) val ivyConfiguration = new InlineIvyConfiguration(ivyPaths, Resolver.withDefaultResolvers(Nil), Nil, Nil, localOnly, lock, checksums, None, log) val compiler: sbt.compiler.AnalyzingCompiler = Compiler.compilers(ClasspathOptions.repl, ivyConfiguration)(state.configuration, log).scalac val imports = BuildUtil.getImports(unit.unit) ++ BuildUtil.importAll(bindings.map(_._1)) val importString = imports.mkString("", ";\n", ";\n\n") val initCommands = importString + extra // TODO - Hook up dsl classpath correctly... (new Console(compiler))( unit.classpath, options, initCommands, cleanupCommands )(Some(unit.loader), bindings) } /** Conveniences for consoleProject that shouldn't normally be used for builds. */ final class Imports private[sbt] (extracted: Extracted, state: State) { import extracted._ implicit def taskKeyEvaluate[T](t: TaskKey[T]): Evaluate[T] = new Evaluate(runTask(t, state)._2) implicit def settingKeyEvaluate[T](s: SettingKey[T]): Evaluate[T] = new Evaluate(get(s)) } final class Evaluate[T] private[sbt] (val eval: T) private def bootIvyHome(app: xsbti.AppConfiguration): Option[File] = try { Option(app.provider.scalaProvider.launcher.ivyHome) } catch { case _: NoSuchMethodError => None } }
mdedetrich/sbt
main/src/main/scala/sbt/ConsoleProject.scala
Scala
bsd-3-clause
2,078
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0, (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tle.core.db.tables import com.tle.core.db.types.InstId case class Setting(institution_id: InstId, property: String, value: String)
equella/Equella
Source/Plugins/Core/com.equella.serverbase/scalasrc/com/tle/core/db/tables/Settings.scala
Scala
apache-2.0
950
package io.taig.gandalf.report.syntax import io.taig.gandalf.{ Error, Term } import io.taig.gandalf.report.ops import shapeless.{ HList, Witness } import scala.language.implicitConversions trait term { implicit def termSyntax[N <: String, I, O, A <: HList, E]( term: Term.Aux[N, I, O, A, Error[N, A]] )( implicit w: Witness.Aux[N] ): ops.term[N, I, O, A] = new ops.term[N, I, O, A]( term ) } object term extends term
Taig/Gandalf
report/src/main/scala/io/taig/gandalf/report/syntax/term.scala
Scala
mit
444
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.rdd import java.io.IOException import scala.collection.mutable.{ArrayBuffer, HashSet} import scala.util.Random import org.apache.commons.math3.distribution.{BinomialDistribution, PoissonDistribution} import org.apache.hadoop.conf.{Configurable, Configuration} import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.mapred._ import org.apache.hadoop.mapreduce.{Job => NewJob, JobContext => NewJobContext, OutputCommitter => NewOutputCommitter, OutputFormat => NewOutputFormat, RecordWriter => NewRecordWriter, TaskAttemptContext => NewTaskAttempContext} import org.apache.hadoop.util.Progressable import org.scalatest.Assertions import org.apache.spark._ import org.apache.spark.Partitioner import org.apache.spark.util.Utils class PairRDDFunctionsSuite extends SparkFunSuite with SharedSparkContext { test("aggregateByKey") { val pairs = sc.parallelize(Array((1, 1), (1, 1), (3, 2), (5, 1), (5, 3)), 2) val sets = pairs.aggregateByKey(new HashSet[Int]())(_ += _, _ ++= _).collect() assert(sets.size === 3) val valuesFor1 = sets.find(_._1 == 1).get._2 assert(valuesFor1.toList.sorted === List(1)) val valuesFor3 = sets.find(_._1 == 3).get._2 assert(valuesFor3.toList.sorted === List(2)) val valuesFor5 = sets.find(_._1 == 5).get._2 assert(valuesFor5.toList.sorted === List(1, 3)) } test("groupByKey") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (2, 1))) val groups = pairs.groupByKey().collect() assert(groups.size === 2) val valuesFor1 = groups.find(_._1 == 1).get._2 assert(valuesFor1.toList.sorted === List(1, 2, 3)) val valuesFor2 = groups.find(_._1 == 2).get._2 assert(valuesFor2.toList.sorted === List(1)) } test("groupByKey with duplicates") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val groups = pairs.groupByKey().collect() assert(groups.size === 2) val valuesFor1 = groups.find(_._1 == 1).get._2 assert(valuesFor1.toList.sorted === List(1, 1, 2, 3)) val valuesFor2 = groups.find(_._1 == 2).get._2 assert(valuesFor2.toList.sorted === List(1)) } test("groupByKey with negative key hash codes") { val pairs = sc.parallelize(Array((-1, 1), (-1, 2), (-1, 3), (2, 1))) val groups = pairs.groupByKey().collect() assert(groups.size === 2) val valuesForMinus1 = groups.find(_._1 == -1).get._2 assert(valuesForMinus1.toList.sorted === List(1, 2, 3)) val valuesFor2 = groups.find(_._1 == 2).get._2 assert(valuesFor2.toList.sorted === List(1)) } test("groupByKey with many output partitions") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (2, 1))) val groups = pairs.groupByKey(10).collect() assert(groups.size === 2) val valuesFor1 = groups.find(_._1 == 1).get._2 assert(valuesFor1.toList.sorted === List(1, 2, 3)) val valuesFor2 = groups.find(_._1 == 2).get._2 assert(valuesFor2.toList.sorted === List(1)) } test("sampleByKey") { val defaultSeed = 1L // vary RDD size for (n <- List(100, 1000, 1000000)) { val data = sc.parallelize(1 to n, 2) val fractionPositive = 0.3 val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) val samplingRate = 0.1 StratifiedAuxiliary.testSample(stratifiedData, samplingRate, defaultSeed, n) } // vary fractionPositive for (fractionPositive <- List(0.1, 0.3, 0.5, 0.7, 0.9)) { val n = 100 val data = sc.parallelize(1 to n, 2) val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) val samplingRate = 0.1 StratifiedAuxiliary.testSample(stratifiedData, samplingRate, defaultSeed, n) } // Use the same data for the rest of the tests val fractionPositive = 0.3 val n = 100 val data = sc.parallelize(1 to n, 2) val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) // vary seed for (seed <- defaultSeed to defaultSeed + 5L) { val samplingRate = 0.1 StratifiedAuxiliary.testSample(stratifiedData, samplingRate, seed, n) } // vary sampling rate for (samplingRate <- List(0.01, 0.05, 0.1, 0.5)) { StratifiedAuxiliary.testSample(stratifiedData, samplingRate, defaultSeed, n) } } test("sampleByKeyExact") { val defaultSeed = 1L // vary RDD size for (n <- List(100, 1000, 1000000)) { val data = sc.parallelize(1 to n, 2) val fractionPositive = 0.3 val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) val samplingRate = 0.1 StratifiedAuxiliary.testSampleExact(stratifiedData, samplingRate, defaultSeed, n) } // vary fractionPositive for (fractionPositive <- List(0.1, 0.3, 0.5, 0.7, 0.9)) { val n = 100 val data = sc.parallelize(1 to n, 2) val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) val samplingRate = 0.1 StratifiedAuxiliary.testSampleExact(stratifiedData, samplingRate, defaultSeed, n) } // Use the same data for the rest of the tests val fractionPositive = 0.3 val n = 100 val data = sc.parallelize(1 to n, 2) val stratifiedData = data.keyBy(StratifiedAuxiliary.stratifier(fractionPositive)) // vary seed for (seed <- defaultSeed to defaultSeed + 5L) { val samplingRate = 0.1 StratifiedAuxiliary.testSampleExact(stratifiedData, samplingRate, seed, n) } // vary sampling rate for (samplingRate <- List(0.01, 0.05, 0.1, 0.5)) { StratifiedAuxiliary.testSampleExact(stratifiedData, samplingRate, defaultSeed, n) } } test("reduceByKey") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val sums = pairs.reduceByKey(_ + _).collect() assert(sums.toSet === Set((1, 7), (2, 1))) } test("reduceByKey with collectAsMap") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val sums = pairs.reduceByKey(_ + _).collectAsMap() assert(sums.size === 2) assert(sums(1) === 7) assert(sums(2) === 1) } test("reduceByKey with many output partitions") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val sums = pairs.reduceByKey(_ + _, 10).collect() assert(sums.toSet === Set((1, 7), (2, 1))) } test("reduceByKey with partitioner") { val p = new Partitioner() { def numPartitions = 2 def getPartition(key: Any) = key.asInstanceOf[Int] } val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 1), (0, 1))).partitionBy(p) val sums = pairs.reduceByKey(_ + _) assert(sums.collect().toSet === Set((1, 4), (0, 1))) assert(sums.partitioner === Some(p)) // count the dependencies to make sure there is only 1 ShuffledRDD val deps = new HashSet[RDD[_]]() def visit(r: RDD[_]) { for (dep <- r.dependencies) { deps += dep.rdd visit(dep.rdd) } } visit(sums) assert(deps.size === 2) // ShuffledRDD, ParallelCollection. } test("countApproxDistinctByKey") { def error(est: Long, size: Long): Double = math.abs(est - size) / size.toDouble /* Since HyperLogLog unique counting is approximate, and the relative standard deviation is * only a statistical bound, the tests can fail for large values of relativeSD. We will be using * relatively tight error bounds to check correctness of functionality rather than checking * whether the approximation conforms with the requested bound. */ val p = 20 val sp = 0 // When p = 20, the relative accuracy is about 0.001. So with high probability, the // relative error should be smaller than the threshold 0.01 we use here. val relativeSD = 0.01 // For each value i, there are i tuples with first element equal to i. // Therefore, the expected count for key i would be i. val stacked = (1 to 100).flatMap(i => (1 to i).map(j => (i, j))) val rdd1 = sc.parallelize(stacked) val counted1 = rdd1.countApproxDistinctByKey(p, sp).collect() counted1.foreach { case (k, count) => assert(error(count, k) < relativeSD) } val rnd = new Random(42) // The expected count for key num would be num val randStacked = (1 to 100).flatMap { i => val num = rnd.nextInt() % 500 (1 to num).map(j => (num, j)) } val rdd2 = sc.parallelize(randStacked) val counted2 = rdd2.countApproxDistinctByKey(relativeSD).collect() counted2.foreach { case (k, count) => assert(error(count, k) < relativeSD, s"${error(count, k)} < $relativeSD") } } test("join") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.join(rdd2).collect() assert(joined.size === 4) assert(joined.toSet === Set( (1, (1, 'x')), (1, (2, 'x')), (2, (1, 'y')), (2, (1, 'z')) )) } test("join all-to-all") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (1, 3))) val rdd2 = sc.parallelize(Array((1, 'x'), (1, 'y'))) val joined = rdd1.join(rdd2).collect() assert(joined.size === 6) assert(joined.toSet === Set( (1, (1, 'x')), (1, (1, 'y')), (1, (2, 'x')), (1, (2, 'y')), (1, (3, 'x')), (1, (3, 'y')) )) } test("leftOuterJoin") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.leftOuterJoin(rdd2).collect() assert(joined.size === 5) assert(joined.toSet === Set( (1, (1, Some('x'))), (1, (2, Some('x'))), (2, (1, Some('y'))), (2, (1, Some('z'))), (3, (1, None)) )) } // See SPARK-9326 test("cogroup with empty RDD") { import scala.reflect.classTag val intPairCT = classTag[(Int, Int)] val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.emptyRDD[(Int, Int)](intPairCT) val joined = rdd1.cogroup(rdd2).collect() assert(joined.size > 0) } // See SPARK-9326 test("cogroup with groupByed RDD having 0 partitions") { import scala.reflect.classTag val intCT = classTag[Int] val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.emptyRDD[Int](intCT).groupBy((x) => 5) val joined = rdd1.cogroup(rdd2).collect() assert(joined.size > 0) } // See SPARK-22465 test("cogroup between multiple RDD " + "with an order of magnitude difference in number of partitions") { val rdd1 = sc.parallelize((1 to 1000).map(x => (x, x)), 1000) val rdd2 = sc .parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) .partitionBy(new HashPartitioner(10)) val joined = rdd1.cogroup(rdd2) assert(joined.getNumPartitions == rdd1.getNumPartitions) } // See SPARK-22465 test("cogroup between multiple RDD" + " with number of partitions similar in order of magnitude") { val rdd1 = sc.parallelize((1 to 1000).map(x => (x, x)), 20) val rdd2 = sc .parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) .partitionBy(new HashPartitioner(10)) val joined = rdd1.cogroup(rdd2) assert(joined.getNumPartitions == rdd2.getNumPartitions) } test("rightOuterJoin") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.rightOuterJoin(rdd2).collect() assert(joined.size === 5) assert(joined.toSet === Set( (1, (Some(1), 'x')), (1, (Some(2), 'x')), (2, (Some(1), 'y')), (2, (Some(1), 'z')), (4, (None, 'w')) )) } test("fullOuterJoin") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.fullOuterJoin(rdd2).collect() assert(joined.size === 6) assert(joined.toSet === Set( (1, (Some(1), Some('x'))), (1, (Some(2), Some('x'))), (2, (Some(1), Some('y'))), (2, (Some(1), Some('z'))), (3, (Some(1), None)), (4, (None, Some('w'))) )) } test("join with no matches") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((4, 'x'), (5, 'y'), (5, 'z'), (6, 'w'))) val joined = rdd1.join(rdd2).collect() assert(joined.size === 0) } test("join with many output partitions") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.join(rdd2, 10).collect() assert(joined.size === 4) assert(joined.toSet === Set( (1, (1, 'x')), (1, (2, 'x')), (2, (1, 'y')), (2, (1, 'z')) )) } test("groupWith") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val joined = rdd1.groupWith(rdd2).collect() assert(joined.size === 4) val joinedSet = joined.map(x => (x._1, (x._2._1.toList, x._2._2.toList))).toSet assert(joinedSet === Set( (1, (List(1, 2), List('x'))), (2, (List(1), List('y', 'z'))), (3, (List(1), List())), (4, (List(), List('w'))) )) } test("groupWith3") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val rdd3 = sc.parallelize(Array((1, 'a'), (3, 'b'), (4, 'c'), (4, 'd'))) val joined = rdd1.groupWith(rdd2, rdd3).collect() assert(joined.size === 4) val joinedSet = joined.map(x => (x._1, (x._2._1.toList, x._2._2.toList, x._2._3.toList))).toSet assert(joinedSet === Set( (1, (List(1, 2), List('x'), List('a'))), (2, (List(1), List('y', 'z'), List())), (3, (List(1), List(), List('b'))), (4, (List(), List('w'), List('c', 'd'))) )) } test("groupWith4") { val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1))) val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w'))) val rdd3 = sc.parallelize(Array((1, 'a'), (3, 'b'), (4, 'c'), (4, 'd'))) val rdd4 = sc.parallelize(Array((2, '@'))) val joined = rdd1.groupWith(rdd2, rdd3, rdd4).collect() assert(joined.size === 4) val joinedSet = joined.map(x => (x._1, (x._2._1.toList, x._2._2.toList, x._2._3.toList, x._2._4.toList))).toSet assert(joinedSet === Set( (1, (List(1, 2), List('x'), List('a'), List())), (2, (List(1), List('y', 'z'), List(), List('@'))), (3, (List(1), List(), List('b'), List())), (4, (List(), List('w'), List('c', 'd'), List())) )) } test("zero-partition RDD") { val emptyDir = Utils.createTempDir() try { val file = sc.textFile(emptyDir.getAbsolutePath) assert(file.partitions.isEmpty) assert(file.collect().toList === Nil) // Test that a shuffle on the file works, because this used to be a bug assert(file.map(line => (line, 1)).reduceByKey(_ + _).collect().toList === Nil) } finally { Utils.deleteRecursively(emptyDir) } } test("keys and values") { val rdd = sc.parallelize(Array((1, "a"), (2, "b"))) assert(rdd.keys.collect().toList === List(1, 2)) assert(rdd.values.collect().toList === List("a", "b")) } test("default partitioner uses partition size") { // specify 2000 partitions val a = sc.makeRDD(Array(1, 2, 3, 4), 2000) // do a map, which loses the partitioner val b = a.map(a => (a, (a * 2).toString)) // then a group by, and see we didn't revert to 2 partitions val c = b.groupByKey() assert(c.partitions.size === 2000) } test("default partitioner uses largest partitioner") { val a = sc.makeRDD(Array((1, "a"), (2, "b")), 2) val b = sc.makeRDD(Array((1, "a"), (2, "b")), 2000) val c = a.join(b) assert(c.partitions.size === 2000) } test("subtract") { val a = sc.parallelize(Array(1, 2, 3), 2) val b = sc.parallelize(Array(2, 3, 4), 4) val c = a.subtract(b) assert(c.collect().toSet === Set(1)) assert(c.partitions.size === a.partitions.size) } test("subtract with narrow dependency") { // use a deterministic partitioner val p = new Partitioner() { def numPartitions = 5 def getPartition(key: Any) = key.asInstanceOf[Int] } // partitionBy so we have a narrow dependency val a = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c"))).partitionBy(p) // more partitions/no partitioner so a shuffle dependency val b = sc.parallelize(Array((2, "b"), (3, "cc"), (4, "d")), 4) val c = a.subtract(b) assert(c.collect().toSet === Set((1, "a"), (3, "c"))) // Ideally we could keep the original partitioner... assert(c.partitioner === None) } test("subtractByKey") { val a = sc.parallelize(Array((1, "a"), (1, "a"), (2, "b"), (3, "c")), 2) val b = sc.parallelize(Array((2, 20), (3, 30), (4, 40)), 4) val c = a.subtractByKey(b) assert(c.collect().toSet === Set((1, "a"), (1, "a"))) assert(c.partitions.size === a.partitions.size) } test("subtractByKey with narrow dependency") { // use a deterministic partitioner val p = new Partitioner() { def numPartitions = 5 def getPartition(key: Any) = key.asInstanceOf[Int] } // partitionBy so we have a narrow dependency val a = sc.parallelize(Array((1, "a"), (1, "a"), (2, "b"), (3, "c"))).partitionBy(p) // more partitions/no partitioner so a shuffle dependency val b = sc.parallelize(Array((2, "b"), (3, "cc"), (4, "d")), 4) val c = a.subtractByKey(b) assert(c.collect().toSet === Set((1, "a"), (1, "a"))) assert(c.partitioner.get === p) } test("foldByKey") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val sums = pairs.foldByKey(0)(_ + _).collect() assert(sums.toSet === Set((1, 7), (2, 1))) } test("foldByKey with mutable result type") { val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1))) val bufs = pairs.mapValues(v => ArrayBuffer(v)).cache() // Fold the values using in-place mutation val sums = bufs.foldByKey(new ArrayBuffer[Int])(_ ++= _).collect() assert(sums.toSet === Set((1, ArrayBuffer(1, 2, 3, 1)), (2, ArrayBuffer(1)))) // Check that the mutable objects in the original RDD were not changed assert(bufs.collect().toSet === Set( (1, ArrayBuffer(1)), (1, ArrayBuffer(2)), (1, ArrayBuffer(3)), (1, ArrayBuffer(1)), (2, ArrayBuffer(1)))) } test("saveNewAPIHadoopFile should call setConf if format is configurable") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(1)))) // No error, non-configurable formats still work pairs.saveAsNewAPIHadoopFile[NewFakeFormat]("ignored") /* * Check that configurable formats get configured: * ConfigTestFormat throws an exception if we try to write * to it when setConf hasn't been called first. * Assertion is in ConfigTestFormat.getRecordWriter. */ pairs.saveAsNewAPIHadoopFile[ConfigTestFormat]("ignored") } test("The JobId on the driver and executors should be the same during the commit") { // Create more than one rdd to mimic stageId not equal to rddId val pairs = sc.parallelize(Array((1, 2), (2, 3)), 2) .map { p => (new Integer(p._1 + 1), new Integer(p._2 + 1)) } .filter { p => p._1 > 0 } pairs.saveAsNewAPIHadoopFile[YetAnotherFakeFormat]("ignored") assert(JobID.jobid != -1) } test("saveAsHadoopFile should respect configured output committers") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(1)))) val conf = new JobConf() conf.setOutputCommitter(classOf[FakeOutputCommitter]) FakeOutputCommitter.ran = false pairs.saveAsHadoopFile( "ignored", pairs.keyClass, pairs.valueClass, classOf[FakeOutputFormat], conf) assert(FakeOutputCommitter.ran, "OutputCommitter was never called") } test("failure callbacks should be called before calling writer.close() in saveNewAPIHadoopFile") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(2))), 1) FakeWriterWithCallback.calledBy = "" FakeWriterWithCallback.exception = null val e = intercept[SparkException] { pairs.saveAsNewAPIHadoopFile[NewFakeFormatWithCallback]("ignored") } assert(e.getCause.getMessage contains "failed to write") assert(FakeWriterWithCallback.calledBy === "write,callback,close") assert(FakeWriterWithCallback.exception != null, "exception should be captured") assert(FakeWriterWithCallback.exception.getMessage contains "failed to write") } test("failure callbacks should be called before calling writer.close() in saveAsHadoopFile") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(2))), 1) val conf = new JobConf() FakeWriterWithCallback.calledBy = "" FakeWriterWithCallback.exception = null val e = intercept[SparkException] { pairs.saveAsHadoopFile( "ignored", pairs.keyClass, pairs.valueClass, classOf[FakeFormatWithCallback], conf) } assert(e.getCause.getMessage contains "failed to write") assert(FakeWriterWithCallback.calledBy === "write,callback,close") assert(FakeWriterWithCallback.exception != null, "exception should be captured") assert(FakeWriterWithCallback.exception.getMessage contains "failed to write") } test("saveAsNewAPIHadoopDataset should support invalid output paths when " + "there are no files to be committed to an absolute output location") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(2))), 1) def saveRddWithPath(path: String): Unit = { val job = NewJob.getInstance(new Configuration(sc.hadoopConfiguration)) job.setOutputKeyClass(classOf[Integer]) job.setOutputValueClass(classOf[Integer]) job.setOutputFormatClass(classOf[NewFakeFormat]) if (null != path) { job.getConfiguration.set("mapred.output.dir", path) } else { job.getConfiguration.unset("mapred.output.dir") } val jobConfiguration = job.getConfiguration // just test that the job does not fail with java.lang.IllegalArgumentException. pairs.saveAsNewAPIHadoopDataset(jobConfiguration) } saveRddWithPath(null) saveRddWithPath("") saveRddWithPath("::invalid::") } // In spark 2.1, only null was supported - not other invalid paths. // org.apache.hadoop.mapred.FileOutputFormat.getOutputPath fails with IllegalArgumentException // for non-null invalid paths. test("saveAsHadoopDataset should respect empty output directory when " + "there are no files to be committed to an absolute output location") { val pairs = sc.parallelize(Array((new Integer(1), new Integer(2))), 1) val conf = new JobConf() conf.setOutputKeyClass(classOf[Integer]) conf.setOutputValueClass(classOf[Integer]) conf.setOutputFormat(classOf[FakeOutputFormat]) conf.setOutputCommitter(classOf[FakeOutputCommitter]) FakeOutputCommitter.ran = false pairs.saveAsHadoopDataset(conf) assert(FakeOutputCommitter.ran, "OutputCommitter was never called") } test("lookup") { val pairs = sc.parallelize(Array((1, 2), (3, 4), (5, 6), (5, 7))) assert(pairs.partitioner === None) assert(pairs.lookup(1) === Seq(2)) assert(pairs.lookup(5) === Seq(6, 7)) assert(pairs.lookup(-1) === Seq()) } test("lookup with partitioner") { val pairs = sc.parallelize(Array((1, 2), (3, 4), (5, 6), (5, 7))) val p = new Partitioner { def numPartitions: Int = 2 def getPartition(key: Any): Int = Math.abs(key.hashCode() % 2) } val shuffled = pairs.partitionBy(p) assert(shuffled.partitioner === Some(p)) assert(shuffled.lookup(1) === Seq(2)) assert(shuffled.lookup(5) === Seq(6, 7)) assert(shuffled.lookup(-1) === Seq()) } test("lookup with bad partitioner") { val pairs = sc.parallelize(Array((1, 2), (3, 4), (5, 6), (5, 7))) val p = new Partitioner { def numPartitions: Int = 2 def getPartition(key: Any): Int = key.hashCode() % 2 } val shuffled = pairs.partitionBy(p) assert(shuffled.partitioner === Some(p)) assert(shuffled.lookup(1) === Seq(2)) intercept[IllegalArgumentException] {shuffled.lookup(-1)} } private object StratifiedAuxiliary { def stratifier (fractionPositive: Double): (Int) => String = { (x: Int) => if (x % 10 < (10 * fractionPositive).toInt) "1" else "0" } def assertBinomialSample( exact: Boolean, actual: Int, trials: Int, p: Double): Unit = { if (exact) { assert(actual == math.ceil(p * trials).toInt) } else { val dist = new BinomialDistribution(trials, p) val q = dist.cumulativeProbability(actual) withClue(s"p = $p: trials = $trials") { assert(q >= 0.001 && q <= 0.999) } } } def assertPoissonSample( exact: Boolean, actual: Int, trials: Int, p: Double): Unit = { if (exact) { assert(actual == math.ceil(p * trials).toInt) } else { val dist = new PoissonDistribution(p * trials) val q = dist.cumulativeProbability(actual) withClue(s"p = $p: trials = $trials") { assert(q >= 0.001 && q <= 0.999) } } } def testSampleExact(stratifiedData: RDD[(String, Int)], samplingRate: Double, seed: Long, n: Long): Unit = { testBernoulli(stratifiedData, true, samplingRate, seed, n) testPoisson(stratifiedData, true, samplingRate, seed, n) } def testSample(stratifiedData: RDD[(String, Int)], samplingRate: Double, seed: Long, n: Long): Unit = { testBernoulli(stratifiedData, false, samplingRate, seed, n) testPoisson(stratifiedData, false, samplingRate, seed, n) } // Without replacement validation def testBernoulli(stratifiedData: RDD[(String, Int)], exact: Boolean, samplingRate: Double, seed: Long, n: Long): Unit = { val trials = stratifiedData.countByKey() val fractions = Map("1" -> samplingRate, "0" -> samplingRate) val sample = if (exact) { stratifiedData.sampleByKeyExact(false, fractions, seed) } else { stratifiedData.sampleByKey(false, fractions, seed) } val sampleCounts = sample.countByKey() val takeSample = sample.collect() sampleCounts.foreach { case (k, v) => assertBinomialSample(exact = exact, actual = v.toInt, trials = trials(k).toInt, p = samplingRate) } assert(takeSample.size === takeSample.toSet.size) takeSample.foreach { x => assert(1 <= x._2 && x._2 <= n, s"elements not in [1, $n]") } } // With replacement validation def testPoisson(stratifiedData: RDD[(String, Int)], exact: Boolean, samplingRate: Double, seed: Long, n: Long): Unit = { val trials = stratifiedData.countByKey() val expectedSampleSize = stratifiedData.countByKey().mapValues(count => math.ceil(count * samplingRate).toInt) val fractions = Map("1" -> samplingRate, "0" -> samplingRate) val sample = if (exact) { stratifiedData.sampleByKeyExact(true, fractions, seed) } else { stratifiedData.sampleByKey(true, fractions, seed) } val sampleCounts = sample.countByKey() val takeSample = sample.collect() sampleCounts.foreach { case (k, v) => assertPoissonSample(exact, actual = v.toInt, trials = trials(k).toInt, p = samplingRate) } val groupedByKey = takeSample.groupBy(_._1) for ((key, v) <- groupedByKey) { if (expectedSampleSize(key) >= 100 && samplingRate >= 0.1) { // sample large enough for there to be repeats with high likelihood assert(v.toSet.size < expectedSampleSize(key)) } else { if (exact) { assert(v.toSet.size <= expectedSampleSize(key)) } else { assertPoissonSample(false, actual = v.toSet.size, trials(key).toInt, p = samplingRate) } } } takeSample.foreach(x => assert(1 <= x._2 && x._2 <= n, s"elements not in [1, $n]")) } } } /* These classes are fakes for testing saveAsHadoopFile/saveNewAPIHadoopFile. Unfortunately, they have to be top level classes, and not defined in the test method, because otherwise Scala won't generate no-args constructors and the test will therefore throw InstantiationException when saveAsNewAPIHadoopFile tries to instantiate them with Class.newInstance. */ /* * Original Hadoop API */ class FakeWriter extends RecordWriter[Integer, Integer] { override def write(key: Integer, value: Integer): Unit = () override def close(reporter: Reporter): Unit = () } class FakeOutputCommitter() extends OutputCommitter() { override def setupJob(jobContext: JobContext): Unit = () override def needsTaskCommit(taskContext: TaskAttemptContext): Boolean = true override def setupTask(taskContext: TaskAttemptContext): Unit = () override def commitTask(taskContext: TaskAttemptContext): Unit = { FakeOutputCommitter.ran = true () } override def abortTask(taskContext: TaskAttemptContext): Unit = () } /* * Used to communicate state between the test harness and the OutputCommitter. */ object FakeOutputCommitter { var ran = false } class FakeOutputFormat() extends OutputFormat[Integer, Integer]() { override def getRecordWriter( ignored: FileSystem, job: JobConf, name: String, progress: Progressable): RecordWriter[Integer, Integer] = { new FakeWriter() } override def checkOutputSpecs(ignored: FileSystem, job: JobConf): Unit = () } /* * New-style Hadoop API */ class NewFakeWriter extends NewRecordWriter[Integer, Integer] { def close(p1: NewTaskAttempContext): Unit = () def write(p1: Integer, p2: Integer): Unit = () } class NewFakeCommitter extends NewOutputCommitter { def setupJob(p1: NewJobContext): Unit = () def needsTaskCommit(p1: NewTaskAttempContext): Boolean = false def setupTask(p1: NewTaskAttempContext): Unit = () def commitTask(p1: NewTaskAttempContext): Unit = () def abortTask(p1: NewTaskAttempContext): Unit = () } class NewFakeFormat() extends NewOutputFormat[Integer, Integer]() { def checkOutputSpecs(p1: NewJobContext): Unit = () def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriter() } def getOutputCommitter(p1: NewTaskAttempContext): NewOutputCommitter = { new NewFakeCommitter() } } object FakeWriterWithCallback { var calledBy: String = "" var exception: Throwable = _ def onFailure(ctx: TaskContext, e: Throwable): Unit = { calledBy += "callback," exception = e } } class FakeWriterWithCallback extends FakeWriter { override def close(p1: Reporter): Unit = { FakeWriterWithCallback.calledBy += "close" } override def write(p1: Integer, p2: Integer): Unit = { FakeWriterWithCallback.calledBy += "write," TaskContext.get().addTaskFailureListener { (t: TaskContext, e: Throwable) => FakeWriterWithCallback.onFailure(t, e) } throw new IOException("failed to write") } } class FakeFormatWithCallback() extends FakeOutputFormat { override def getRecordWriter( ignored: FileSystem, job: JobConf, name: String, progress: Progressable): RecordWriter[Integer, Integer] = { new FakeWriterWithCallback() } } class NewFakeWriterWithCallback extends NewFakeWriter { override def close(p1: NewTaskAttempContext): Unit = { FakeWriterWithCallback.calledBy += "close" } override def write(p1: Integer, p2: Integer): Unit = { FakeWriterWithCallback.calledBy += "write," TaskContext.get().addTaskFailureListener { (t: TaskContext, e: Throwable) => FakeWriterWithCallback.onFailure(t, e) } throw new IOException("failed to write") } } class NewFakeFormatWithCallback() extends NewFakeFormat { override def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriterWithCallback() } } class YetAnotherFakeCommitter extends NewOutputCommitter with Assertions { def setupJob(j: NewJobContext): Unit = { JobID.jobid = j.getJobID().getId } def needsTaskCommit(t: NewTaskAttempContext): Boolean = false def setupTask(t: NewTaskAttempContext): Unit = { val jobId = t.getTaskAttemptID().getJobID().getId assert(jobId === JobID.jobid) } def commitTask(t: NewTaskAttempContext): Unit = {} def abortTask(t: NewTaskAttempContext): Unit = {} } class YetAnotherFakeFormat() extends NewOutputFormat[Integer, Integer]() { def checkOutputSpecs(j: NewJobContext): Unit = {} def getRecordWriter(t: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriter() } def getOutputCommitter(t: NewTaskAttempContext): NewOutputCommitter = { new YetAnotherFakeCommitter() } } object JobID { var jobid = -1 } class ConfigTestFormat() extends NewFakeFormat() with Configurable { var setConfCalled = false def setConf(p1: Configuration): Unit = { setConfCalled = true () } def getConf: Configuration = null override def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { assert(setConfCalled, "setConf was never called") super.getRecordWriter(p1) } }
saltstar/spark
core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala
Scala
apache-2.0
34,535
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import java.io.PrintStream import java.io.ByteArrayOutputStream import org.scalatest.events._ import org.scalatest.funspec.AnyFunSpec class CatchReporterSpec extends AnyFunSpec { it("the CatchReporter should catch exceptions") { val buggyReporter = new ResourcefulReporter { override def apply(event: Event): Unit = { throw new RuntimeException } override def dispose(): Unit = { throw new RuntimeException } } // Pass in a PrintStream so you don't get an ugly msg to the standard error stream val catchReporter = new WrapperCatchReporter(buggyReporter, new PrintStream(new ByteArrayOutputStream)) intercept[RuntimeException] { buggyReporter(RunStarting(new Ordinal(99), 1, ConfigMap())) } catchReporter(RunStarting(new Ordinal(99), 1, ConfigMap())) intercept[RuntimeException] { buggyReporter(TestStarting(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name")) } catchReporter(TestStarting(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name")) intercept[RuntimeException] { buggyReporter(TestSucceeded(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name", Vector.empty)) } catchReporter(TestSucceeded(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name", Vector.empty)) intercept[RuntimeException] { buggyReporter(TestIgnored(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name")) } catchReporter(TestIgnored(new Ordinal(99), "suite name", "suite ID", Some("suite.className"), "test name", "test name")) intercept[RuntimeException] { buggyReporter(TestFailed(new Ordinal(99), "message", "suite name", "suite ID", Some("suite.className"), "test name", "test name", Vector.empty, Vector.empty, None)) } catchReporter(TestFailed(new Ordinal(99), "message", "suite name", "suite ID", Some("suite.className"), "test name", "test name", Vector.empty, Vector.empty, None)) intercept[RuntimeException] { buggyReporter(SuiteStarting(new Ordinal(99), "suite name", "suite ID", None, None)) } catchReporter(SuiteStarting(new Ordinal(99), "suite name", "suite ID", None, None)) intercept[RuntimeException] { buggyReporter(SuiteCompleted(new Ordinal(99), "suite name", "suite ID", None, None)) } catchReporter(SuiteCompleted(new Ordinal(99), "suite name", "suite ID", None, None)) intercept[RuntimeException] { buggyReporter(SuiteAborted(new Ordinal(99), "msg", "suiteName", "suite ID", None, None)) } catchReporter(SuiteAborted(new Ordinal(99), "msg", "suiteName", "suite ID", None, None)) intercept[RuntimeException] { buggyReporter(InfoProvided(new Ordinal(99), "msg", None)) } catchReporter(InfoProvided(new Ordinal(99), "msg", None)) intercept[RuntimeException] { buggyReporter(RunStopped(new Ordinal(99))) } catchReporter(RunStopped(new Ordinal(99))) intercept[RuntimeException] { buggyReporter(RunAborted(new Ordinal(99), "", None)) } catchReporter(RunAborted(new Ordinal(99), "", None)) intercept[RuntimeException] { buggyReporter(RunCompleted(new Ordinal(99))) } catchReporter(RunCompleted(new Ordinal(99))) intercept[RuntimeException] { buggyReporter.dispose() } catchReporter.dispose() } }
scalatest/scalatest
jvm/scalatest-test/src/test/scala/org/scalatest/CatchReporterSpec.scala
Scala
apache-2.0
4,125
package flags import scala.language/*=>scala.language.*/.experimental/*=>scala.language.experimental.*/.macros/*=>scala.language.experimental.macros.*/ package object p/*<=flags.p.package.*/ { private lazy val x/*<=flags.p.package.x.*/ = 1 protected implicit var y/*<=flags.p.package.y().*/: Int/*=>scala.Int#*/ = 2 def z/*<=flags.p.package.z().*/(pp/*<=flags.p.package.z().(pp)*/: Int/*=>scala.Int#*/) = 3 def m/*<=flags.p.package.m().*/[TT/*<=flags.p.package.m().[TT]*/]: Int/*=>scala.Int#*/ = macro ???/*=>scala.Predef.`???`().*/ abstract class C/*<=flags.p.package.C#*/[+T/*<=flags.p.package.C#[T]*/, -U/*<=flags.p.package.C#[U]*/, V/*<=flags.p.package.C#[V]*/](x/*<=flags.p.package.C#x.*/: T/*=>flags.p.package.C#[T]*/, y/*<=flags.p.package.C#y.*/: U/*=>flags.p.package.C#[U]*/, z/*<=flags.p.package.C#z.*/: V/*=>flags.p.package.C#[V]*/) { def this/*<=flags.p.package.C#`<init>`(+1).*/() = this(???/*=>scala.Predef.`???`().*/, ???/*=>scala.Predef.`???`().*/, ???/*=>scala.Predef.`???`().*/) def w/*<=flags.p.package.C#w().*/: Int/*=>scala.Int#*/ } type T1/*<=flags.p.package.T1#*/ = Int/*=>scala.Int#*/ type T2/*<=flags.p.package.T2#*/[T/*<=flags.p.package.T2#[T]*/] = S/*=>flags.p.package.S#*/[T/*=>flags.p.package.T2#[T]*/] type U/*<=flags.p.package.U#*/ <: Int/*=>scala.Int#*/ type V/*<=flags.p.package.V#*/ >: Int/*=>scala.Int#*/ case object X/*<=flags.p.package.X.*/ final class Y/*<=flags.p.package.Y#*/ sealed trait Z/*<=flags.p.package.Z#*/ class AA/*<=flags.p.package.AA#*/(x/*<=flags.p.package.AA#x.*/: Int/*=>scala.Int#*/, val y/*<=flags.p.package.AA#y.*/: Int/*=>scala.Int#*/, var z/*<=flags.p.package.AA#z().*/: Int/*=>scala.Int#*/) class S/*<=flags.p.package.S#*/[@specialized/*=>scala.specialized#*/ T/*<=flags.p.package.S#[T]*/] val List/*=>scala.collection.immutable.List.*/(xs1/*<=flags.p.package.xs1.*/) = ???/*=>scala.Predef.`???`().*/ ???/*=>scala.Predef.`???`().*/ match { case List/*=>scala.collection.immutable.List.*/(xs2/*<=local1*/) => ???/*=>scala.Predef.`???`().*/ } ???/*=>scala.Predef.`???`().*/ match { case _: List/*=>scala.package.List#*/[t/*<=local2*/] => ???/*=>scala.Predef.`???`().*/ } }
scalameta/scalameta
tests/jvm/src/test/resources/example/Flags.scala
Scala
bsd-3-clause
2,177
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import java.io.File import java.net.URI import org.mockito.Mockito._ import org.scalatest.Matchers import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogStorageFormat, CatalogTable, CatalogTableType, ExternalCatalog, InMemoryCatalog, SessionCatalog} import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference} import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Project} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.util.Utils class AnalysisExternalCatalogSuite extends AnalysisTest with Matchers { private def getAnalyzer(externCatalog: ExternalCatalog, databasePath: File): Analyzer = { val conf = new SQLConf() val catalog = new SessionCatalog(externCatalog, FunctionRegistry.builtin, conf) catalog.createDatabase( CatalogDatabase("default", "", databasePath.toURI, Map.empty), ignoreIfExists = false) catalog.createTable( CatalogTable( TableIdentifier("t1", Some("default")), CatalogTableType.MANAGED, CatalogStorageFormat.empty, StructType(Seq(StructField("a", IntegerType, nullable = true)))), ignoreIfExists = false) new Analyzer(catalog, conf) } test("query builtin functions don't call the external catalog") { withTempDir { tempDir => val inMemoryCatalog = new InMemoryCatalog val catalog = spy(inMemoryCatalog) val analyzer = getAnalyzer(catalog, tempDir) reset(catalog) val testRelation = LocalRelation(AttributeReference("a", IntegerType, nullable = true)()) val func = Alias(UnresolvedFunction("sum", Seq(UnresolvedAttribute("a")), isDistinct = false), "s")() val plan = Project(Seq(func), testRelation) analyzer.execute(plan) verifyNoInteractions(catalog) } } test("check the existence of builtin functions don't call the external catalog") { withTempDir { tempDir => val inMemoryCatalog = new InMemoryCatalog val externCatalog = spy(inMemoryCatalog) val catalog = new SessionCatalog(externCatalog, FunctionRegistry.builtin, conf) catalog.createDatabase( CatalogDatabase("default", "", new URI(tempDir.toString), Map.empty), ignoreIfExists = false) reset(externCatalog) catalog.functionExists(FunctionIdentifier("sum")) verifyNoInteractions(externCatalog) } } }
goldmedal/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisExternalCatalogSuite.scala
Scala
apache-2.0
3,340
package org.jetbrains.plugins.scala package annotator class ApplicationTestBase_2_11 extends ApplicationTestBase { override protected def supportedIn(version: ScalaVersion): Boolean = version <= LatestScalaVersions.Scala_2_11 } class ApplicationTestBase_2_12 extends ApplicationTestBase { override protected def supportedIn(version: ScalaVersion): Boolean = version >= LatestScalaVersions.Scala_2_12 } abstract class ApplicationTestBase extends AnnotatorLightCodeInsightFixtureTestAdapter { def testSCL9931(): Unit = { checkTextHasNoErrors( """ |trait Foo { | def foo(a: Int) = 1 |} | |object Foo{ | def foo = 0.2 | | implicit def defImpl(x: Foo.type):Foo = FooImpl |} | |object FooImpl extends Foo | |object Bar { | Foo.foo(1) //in (1): Application does not takes parameters |} """.stripMargin) } def testSCL3878(): Unit = checkTextHasNoErrors( """class Test { | def prop: Vector[Int] = Vector.empty[Int] // def or val, doesn't matter | def prop(x: String) = "" | def test1 = List("1", "2", "3").map(prop) // prop is red (Cannot resolve symbol prop) | def test2 = List(1, 2, 3).map(prop) // this one is ok |} """.stripMargin) def testScl11063(): Unit = { checkTextHasNoErrors( """ |import scala.collection.mutable | |class DynamicMap[A](val self: mutable.Map[String, A]) extends AnyVal { | def apply(key: Int): A = self(key.toString) | def apply(key: Float): A = self(key.toString) | | def update(key: String, value: A): Unit = self(key) = value |} | |object Example { | val map = new DynamicMap(new mutable.HashMap[String, Int]) | <caret>map("a") = 5 |} """.stripMargin) } def testDecodeRightAssoc(): Unit = { checkTextHasNoErrors( """object BacktickedRightAssoc { | class Options | implicit val opt: Options = new Options | | implicit class SymbolicOperations(val arr: Array[Byte]) { | def `>:`(i: Int): arr.type = ??? | | def `>::`(i: Int)(implicit opt: Options): arr.type = ??? | } | | val a: Array[Byte] = ??? | | 1 `>:` a | | 1 `$greater$colon` a | | 1 `>::` a | | 1 `$greater$colon$colon` a |} |""".stripMargin ) } //adapted from `better-files` project def testImplicitArgNotSAM(): Unit = { checkTextHasNoErrors( """ |package test | |import java.io._ | |abstract class FileTest { | class Options | | def output1 (implicit options: Options = new Options): OutputStream | def output2[T](implicit options: Options = new Options): OutputStream | def input (implicit options: Options = new Options): InputStream | | implicit class InputStreamOps(in: InputStream) { | def >>(out: OutputStream): Unit = ??? | } | | def useDefault(): Unit = { | input >> output1 | input >> output1() | | input >> output2 | input >> output2() | | input >> output2[Nothing] | input >> output2[Nothing]() | } | | def passImplicit(implicit opt: Options): Unit = { | input >> output1 | input >> output1() | | input >> output2 | input >> output2() | | input >> output2[Nothing] | input >> output2[Nothing]() | } | | def explicit(): Unit = { | input >> output1(new Options) | input >> output2(new Options) | input >> output2[Nothing](new Options) | } |}""".stripMargin) } def testSCL13027(): Unit = { checkTextHasNoErrors( """ |object Test { | class returnType[T] | object myObject { | // implicit not related to this issue, but required for compilation | implicit object intType | def myFunction(fun: Int => Unit)(implicit i: intType.type): returnType[Int] = new returnType[Int] | implicit object strType | def myFunction(fun: String => Unit)(implicit i: strType.type): returnType[String] = new returnType[String] | } | (myObject.myFunction(_ + 1)): returnType[Int] // works | (myObject.myFunction(_.toUpperCase + 1)): returnType[String] // works | (myObject myFunction (_ + 1)): returnType[Int] // compiles, but red "Cannot resolve reference myFunction with such signature" | (myObject myFunction (_.toUpperCase + 1)): returnType[String] // compiles, but red "Cannot resolve reference myFunction with such signature" |} """.stripMargin) } def testSCL10352(): Unit = { checkTextHasNoErrors( """ |abstract class BlockModel[T <: Block[_]] (implicit c: scala.reflect.ClassTag[T]){} | |class Block[R <: BlockModel[_]](implicit val repr: R) {???} | |abstract class Screen[R <: BlockModel[_]](override implicit val repr: R) extends Block[R] {} | |object TabsDemoScreen { | implicit object TabsDemoScreenModel extends BlockModel[TabsDemoScreen] { | } |} |class TabsDemoScreen extends Screen[TabsDemoScreen.TabsDemoScreenModel.type] {} """.stripMargin ) } }
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/annotator/ApplicationTest.scala
Scala
apache-2.0
5,653
package infra.hubs.pubsub import akka.actor._ import scala.concurrent.duration.FiniteDuration import scala.Some import infra.hubs.HubTopic /** * @author alari * @since 12/19/13 */ trait PubSubTopic[T] extends HubTopic[T] { topic: Actor => private var listeners = Set[ActorRef]() private var timeout: Option[Cancellable] = None def timeoutDelay = FiniteDuration(10, "seconds") /** * Should actor die when there's no listeners connected or not */ val timeoutsEnabled = true protected def actualListeners = listeners /** * Clears or launches timeout */ def checkTimeout() { if (listeners.isEmpty) launchTimeout() else clearTimeout() } def clearTimeout() { timeout.map(_.cancel()) timeout = None } def launchTimeout() { import context.dispatcher if (timeout.isEmpty && timeoutsEnabled) timeout = Some(context.system.scheduler.scheduleOnce(timeoutDelay, self, PoisonPill)) } launchTimeout() def join(actor: ActorRef) { listeners += actor context.watch(actor) clearTimeout() } def leave(actor: ActorRef) { listeners -= actor context.unwatch(actor) checkTimeout() } def broadcast(message: Any) { listeners.foreach(_ ! message) } type CanSubscribe = PartialFunction[T, Boolean] val canSubscribe: CanSubscribe abstract override def topicBehaviour: Receive = super.topicBehaviour orElse pubSubBehaviour val pubSubBehaviour: Receive = { case Terminated(a) if listeners.contains(a) => leave(a) case PubSubTopic.Broadcast(m) => broadcast(m) case PubSubTopic.Join(state: T) if canSubscribe.applyOrElse(state, { _: T => false: Boolean }) => join(sender) case PubSubTopic.Leave if listeners.contains(sender) => leave(sender) } } object PubSubTopic { /** * Broadcast the message to all topic listeners * @param message message */ case class Broadcast(message: Any) /** * Subscribe to a topic, if permitted * @param listenerState state to check permissions against */ case class Join(listenerState: Any) /** * Ubsubscribe the sender */ case object Leave }
alari/play-hubs
module-code/app/infra/hubs/pubsub/PubSubTopic.scala
Scala
mit
2,167
package repositories trait Repository[M, I] { def getOneById(id: I): M }
manuelkiessling/play2-compiletime-cassandra-di
app/repositories/Repository.scala
Scala
mit
76