code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sandius.rembulan.test.fragments
import net.sandius.rembulan.test.FragmentExecTestSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ModuleLibFragmentsRunSpec extends FragmentExecTestSuite {
override def bundles = Seq(ModuleLibFragments)
override def expectations = Seq(ModuleLibFragments)
override def contexts = Seq(Mod, Full)
override def steps = Seq(1, Int.MaxValue)
}
| mjanicek/rembulan | rembulan-tests/src/test/scala/net/sandius/rembulan/test/fragments/ModuleLibFragmentsRunSpec.scala | Scala | apache-2.0 | 1,063 |
/**
*
* ${FILE_NAME}
* Ledger wallet
*
* Created by Pierre Pollastri on 09/01/15.
*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ledger
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package co.ledger.wallet.app.base
import android.app.Fragment
import co.ledger.wallet.core.utils.logs.Loggable
class BaseFragment extends Fragment with Loggable with RichFragment {
def tag = ""
} | LedgerHQ/ledger-wallet-android | app/src/main/scala/co/ledger/wallet/app/base/BaseFragment.scala | Scala | mit | 1,431 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.geotools.tools
import com.beust.jcommander.JCommander
import org.locationtech.geomesa.tools._
import org.locationtech.geomesa.tools.export.{ConvertCommand, GenerateAvroSchemaCommand}
import org.locationtech.geomesa.tools.status._
object GeoToolsRunner extends Runner {
override val name: String = "geomesa-gt"
override def createCommands(jc: JCommander): Seq[Command] = Seq(
new data.GeoToolsCreateSchemaCommand,
new data.GeoToolsDeleteFeaturesCommand,
new data.GeoToolsDescribeSchemaCommand,
new data.GeoToolsGetSftConfigCommand,
new data.GeoToolsGetTypeNamesCommand,
new data.GeoToolsRemoveSchemaCommand,
new data.GeoToolsUpdateSchemaCommand,
new export.GeoToolsExportCommand,
new export.GeoToolsPlaybackCommand,
new ingest.GeoToolsIngestCommand,
// common commands, placeholders for script functions
new ConvertCommand,
new ConfigureCommand,
new ClasspathCommand,
new EnvironmentCommand,
new GenerateAvroSchemaCommand,
new HelpCommand(this, jc),
new ScalaConsoleCommand,
new VersionCommand
)
}
| elahrvivaz/geomesa | geomesa-gt/geomesa-gt-tools/src/main/scala/org/locationtech/geomesa/geotools/tools/GeoToolsRunner.scala | Scala | apache-2.0 | 1,581 |
/*
* Copyright 2014 Databricks, 2016 crealytics
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.crealytics.spark.excel
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
private[excel] object InferSchema {
type CellType = Int
/** Similar to the JSON schema inference. [[org.apache.spark.sql.execution.datasources.json.InferSchema]]
* 1. Infer type of each row 2. Merge row types to find common type 3. Replace any null types with string type
*/
def apply(rowsRDD: RDD[Seq[DataType]]): Array[DataType] = {
val startType: Array[DataType] = Array.empty
val rootTypes: Array[DataType] = rowsRDD.aggregate(startType)(inferRowType, mergeRowTypes)
rootTypes.map {
case _: NullType => StringType
case other => other
}
}
private def inferRowType(rowSoFar: Array[DataType], next: Seq[DataType]): Array[DataType] = {
val maxLength = math.max(rowSoFar.length, next.size)
val defaultDataType: Int => DataType = (_ => NullType)
val filledRowSoFar = Array.tabulate(maxLength)(n => rowSoFar.applyOrElse[Int, DataType](n, defaultDataType))
val filledNext = Array.tabulate(maxLength)(n => next.applyOrElse[Int, DataType](n, defaultDataType))
filledRowSoFar.zip(filledNext).map { case (r, n) => inferField(r, n) }
}
private[excel] def mergeRowTypes(first: Array[DataType], second: Array[DataType]): Array[DataType] = {
first.zipAll(second, NullType, NullType).map { case ((a, b)) =>
findTightestCommonType(a, b).getOrElse(NullType)
}
}
/** Infer type of string field. Given known type Double, and a string "1", there is no point checking if it is an Int,
* as the final type must be Double or higher.
*/
private[excel] def inferField(typeSoFar: DataType, field: DataType): DataType = {
// Defining a function to return the StringType constant is necessary in order to work around
// a Scala compiler issue which leads to runtime incompatibilities with certain Spark versions;
// see issue #128 for more details.
def stringType(): DataType = {
StringType
}
if (field == NullType) {
typeSoFar
} else {
(typeSoFar, field) match {
case (NullType, ct) => ct
case (DoubleType, DoubleType) => DoubleType
case (BooleanType, BooleanType) => BooleanType
case (TimestampType, TimestampType) => TimestampType
case (StringType, _) => stringType()
case (_, _) => stringType()
}
}
}
/** Copied from internal Spark api [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion]]
*/
private val numericPrecedence: IndexedSeq[DataType] =
IndexedSeq[DataType](ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, TimestampType)
/** Copied from internal Spark api [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion]]
*/
val findTightestCommonType: (DataType, DataType) => Option[DataType] = {
case (t1, t2) if t1 == t2 => Some(t1)
case (NullType, t1) => Some(t1)
case (t1, NullType) => Some(t1)
case (StringType, _) => Some(StringType)
case (_, StringType) => Some(StringType)
// Promote numeric types to the highest of the two and all numeric types to unlimited decimal
case (t1, t2) if Seq(t1, t2).forall(numericPrecedence.contains) =>
val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2)
Some(numericPrecedence(index))
case _ => None
}
}
| crealytics/spark-excel | src/main/scala/com/crealytics/spark/excel/InferSchema.scala | Scala | apache-2.0 | 3,968 |
package scorex.crypto.ads.merkle
import java.io.File
import org.mapdb.{DBMaker, HTreeMap, Serializer}
import scorex.crypto.hash.CryptographicHash.Digest
import scorex.storage.Storage
import scorex.utils.ScorexLogging
import scala.util.{Failure, Success, Try}
@deprecated("Use tree storage from scrypto library", "1.2.2")
class TreeStorage(fileName: String, levels: Int) extends Storage[(Int, Long), Array[Byte]] with ScorexLogging {
import TreeStorage._
private val dbs =
(0 to levels) map { n: Int =>
DBMaker.fileDB(new File(fileName + n + ".mapDB"))
.fileMmapEnableIfSupported()
.closeOnJvmShutdown()
.checksumEnable()
.make()
}
private val maps: Map[Int, HTreeMap[Long, Digest]] = {
val t = (0 to levels) map { n: Int =>
val m: HTreeMap[Long, Digest] = dbs(n).hashMapCreate("map_" + n)
.keySerializer(Serializer.LONG)
.valueSerializer(Serializer.BYTE_ARRAY)
.makeOrGet()
n -> m
}
t.toMap
}
override def set(key: Key, value: Digest): Unit = Try {
maps(key._1.asInstanceOf[Int]).put(key._2, value)
}.recoverWith { case t: Throwable =>
log.warn("Failed to set key:" + key, t)
Failure(t)
}
override def commit(): Unit = dbs.foreach(_.commit())
override def close(): Unit = {
commit()
dbs.foreach(_.close())
}
override def get(key: Key): Option[Digest] = {
Try {
maps(key._1).get(key._2)
} match {
case Success(v) =>
Option(v)
case Failure(e) =>
if (key._1 == 0) {
log.debug("Enable to load key for level 0: " + key)
}
None
}
}
}
object TreeStorage {
type Level = Int
type Position = Long
type Key = (Level, Position)
type Value = Digest
}
| ScorexProject/Scorex-Lagonaki | scorex-basics/src/main/scala/scorex/crypto/ads/merkle/TreeStorage.scala | Scala | cc0-1.0 | 1,770 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.textmining.nel
import java.util.UUID
import de.hpi.fgis.utils.text.annotation.NamedEntity
import de.hpi.ingestion.datalake.models.Subject
import de.hpi.ingestion.textmining.ClassifierTraining
import de.hpi.ingestion.textmining.TestData.{extendedClassifierFeatureEntries, rfModel}
import de.hpi.ingestion.textmining.models.{TrieAlias, TrieAliasArticle, _}
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.feature.{IndexToString, StringIndexer}
import org.apache.spark.sql.SparkSession
// scalastyle:off line.size.limit
// scalastyle:off method.length
object TestData {
def reducedWikipediaArticles(): Set[TrieAliasArticle] = {
Set(
TrieAliasArticle(
id = "Audi Test mit Link",
title = Option("Audi Test mit Link"),
text = Option("Hier ist Audi verlinkt.")
),
TrieAliasArticle(
id = "Audi Test ohne Link",
title = Option("Audi Test ohne Link"),
text = Option("Hier ist Audi nicht verlinkt.")
),
TrieAliasArticle(
id = "Streitberg (Brachttal)",
title = Option("Streitberg (Brachttal)"),
text = Option("""Streitberg ist einer von sechs Ortsteilen der Gemeinde Brachttal, Main-Kinzig-Kreis in Hessen. Es ist zugleich der kleinste Ortsteil mit einer Einwohnerzahl von ca. 270. Die erste nachweisliche Erwähnung stammt aus dem Jahre 1377. Im Jahre 1500 ist von Stridberg die Rede, ein Jahr später taucht die Bezeichnung Streidtburgk auf und weitere Namensvarianten sind Stripurgk (1528) und Steytberg (1554). Danach hat sich der Ortsname Streitberg eingebürgert. Vom Mittelalter bis ins 19. Jahrhundert hatte der Ort Waldrechte (Holz- und Huterechte) im Büdinger Wald.""")
),
TrieAliasArticle(
id = "Testartikel",
title = Option("Testartikel"),
text = Option("Links: Audi, Brachttal, historisches Jahr.\\nKeine Links: Hessen, Main-Kinzig-Kreis, Büdinger Wald, Backfisch und nochmal Hessen.")
)
)
}
def articlesWithFoundLinks(): Set[TrieAliasArticle] = {
Set(
TrieAliasArticle(
id = "Audi Test mit Link",
title = Option("Audi Test mit Link"),
text = Option("Hier ist Audi verlinkt."),
List(),
List(
Link("Audi", "Audi", Option(9), article = Option("Audi Test mit Link")),
Link("Audi", "BMW", Option(9), article = Option("Audi Test mit Link")),
Link("Audi", "Ferrari", Option(9), article = Option("Audi Test mit Link"))
)),
TrieAliasArticle(
id = "Audi Test ohne Link",
title = Option("Audi Test ohne Link"),
text = Option("Hier ist Audi nicht verlinkt."),
List(),
List()
),
TrieAliasArticle(
id = "Streitberg (Brachttal)",
title = Option("Streitberg (Brachttal)"),
text = Option("""Streitberg ist einer von sechs Ortsteilen der Gemeinde Brachttal, Main-Kinzig-Kreis in Hessen. Es ist zugleich der kleinste Ortsteil mit einer Einwohnerzahl von ca. 270. Die erste nachweisliche Erwähnung stammt aus dem Jahre 1377. Im Jahre 1500 ist von Stridberg die Rede, ein Jahr später taucht die Bezeichnung Streidtburgk auf und weitere Namensvarianten sind Stripurgk (1528) und Steytberg (1554). Danach hat sich der Ortsname Streitberg eingebürgert. Vom Mittelalter bis ins 19. Jahrhundert hatte der Ort Waldrechte (Holz- und Huterechte) im Büdinger Wald."""),
List(),
List(
Link("Brachttal", "Brachttal", Option(55), article = Option("Streitberg (Brachttal)")),
Link("Main-Kinzig-Kreis", "Main-Kinzig-Kreis", Option(66), article = Option("Streitberg (Brachttal)")),
Link("Hessen", "Hessen", Option(87), article = Option("Streitberg (Brachttal)")),
Link("Hessen", "Hessen (Stamm)", Option(87), article = Option("Streitberg (Brachttal)")),
Link("Hessen", "Kanton Hessen", Option(87), article = Option("Streitberg (Brachttal)")),
Link("1377", "1377", Option(225), article = Option("Streitberg (Brachttal)")),
Link("Büdinger Wald", "Büdinger Wald", Option(546), article = Option("Streitberg (Brachttal)"))
)),
TrieAliasArticle(
id = "Testartikel",
title = Option("Testartikel"),
text = Option("Links: Audi, Brachttal, historisches Jahr.\\nKeine Links: Hessen, Main-Kinzig-Kreis, Büdinger Wald, Backfisch und nochmal Hessen."),
List(),
List(
Link("Audi", "Audi", Option(7), article = Option("Testartikel")),
Link("Brachttal", "Brachttal", Option(13), article = Option("Testartikel")),
Link("historisches Jahr", "1377", Option(24), article = Option("Testartikel")),
Link("historisches Jahr", "1996", Option(24), article = Option("Testartikel")),
Link("historisches Jahr", "2017", Option(24), article = Option("Testartikel"))
))
)
}
def aliasSearchArticles(): List[TrieAliasArticle] = {
List(
TrieAliasArticle(
id = "1",
title = Option("1"),
text = Option("Dieser Artikel schreibt über Audi.")
),
TrieAliasArticle(
id = "2",
title = Option("2"),
text = Option("Die Audi AG ist ein Tochterunternehmen der Volkswagen AG.")
),
TrieAliasArticle(
id = "3",
title = Option("3"),
text = Option("Die Volkswagen Aktiengesellschaft ist nicht sehr umweltfreundlich.")
),
TrieAliasArticle(
id = "4",
title = Option("4"),
text = Option("Dieser Satz enthält Audi. Dieser Satz enthält Audi AG. Dieser Satz enthält Volkswagen.")
),
TrieAliasArticle(
id = "5",
title = Option("5"),
text = Option("-buch aktuell -Der Audio Verlag DER SPIEGEL Dein")
),
TrieAliasArticle(
id = "6",
title = Option("6"),
text = None
)
)
}
def realAliasSearchArticles(): List[TrieAliasArticle] = {
List(
TrieAliasArticle(
id = "Audi Test mit Link",
title = Option("Audi Test mit Link"),
text = Option("Hier ist Audi verlinkt.")
),
TrieAliasArticle(
id = "Audi Test ohne Link",
title = Option("Audi Test ohne Link"),
text = Option("Hier ist Audi nicht verlinkt.")
),
TrieAliasArticle(
id = "Streitberg (Brachttal)",
title = Option("Streitberg (Brachttal)"),
text = Option("""Streitberg ist einer von sechs Ortsteilen der Gemeinde Brachttal, Main-Kinzig-Kreis in Hessen. Es ist zugleich der kleinste Ortsteil mit einer Einwohnerzahl von ca. 270. Die erste nachweisliche Erwähnung stammt aus dem Jahre 1377. Im Jahre 1500 ist von Stridberg die Rede, ein Jahr später taucht die Bezeichnung Streidtburgk auf und weitere Namensvarianten sind Stripurgk (1528) und Steytberg (1554). Danach hat sich der Ortsname Streitberg eingebürgert. Vom Mittelalter bis ins 19. Jahrhundert hatte der Ort Waldrechte (Holz- und Huterechte) im Büdinger Wald.""")
),
TrieAliasArticle(
id = "Testartikel",
title = Option("Testartikel"),
text = Option("Links: Audi, Brachttal, historisches Jahr.\\nKeine Links: Hessen, Main-Kinzig-Kreis, Büdinger Wald, Backfisch und nochmal Hessen.")
)
)
}
def foundTrieAliases(): Set[(String, List[TrieAlias])] = {
Set(
("1", List(
TrieAlias("Audi", Option(29))
)),
("2", List(
TrieAlias("Audi AG", Option(4)),
TrieAlias("Volkswagen AG", Option(43))
)),
("3", List(
TrieAlias("Volkswagen", Option(4))
)),
("4", List(
TrieAlias("Audi", Option(20)),
TrieAlias("Audi AG", Option(46)),
TrieAlias("Volkswagen", Option(75))
)),
("5", List(
TrieAlias("Der Audio Verlag", Option(15))
)),
("6", List())
)
}
def realFoundTrieAliases(): Set[(String, List[TrieAlias])] = {
Set(
("Audi Test mit Link", List(
TrieAlias("Audi", Option(9))
)),
("Audi Test ohne Link", List(
TrieAlias("Audi", Option(9))
)),
("Streitberg (Brachttal)", List(
TrieAlias("Brachttal", Option(55)),
TrieAlias("Main-Kinzig-Kreis", Option(66)),
TrieAlias("Hessen", Option(87)),
TrieAlias("1377", Option(225)),
TrieAlias("Büdinger Wald", Option(546))
)),
("Testartikel", List(
TrieAlias("Audi", Option(7)),
TrieAlias("Brachttal", Option(13)),
TrieAlias("historisches Jahr", Option(24)),
TrieAlias("Hessen", Option(56)),
TrieAlias("Main-Kinzig-Kreis", Option(64)),
TrieAlias("Büdinger Wald", Option(83)),
TrieAlias("Hessen", Option(120))
))
)
}
def foundAliasArticles(): List[TrieAliasArticle] = {
List(
TrieAliasArticle(
id = "1",
title = Option("1"),
text = Option("Dieser Artikel schreibt über Audi."),
triealiases = List(
TrieAlias("Audi", Option(29))
)
),
TrieAliasArticle(
id = "2",
title = Option("2"),
text = Option("Die Audi AG ist ein Tochterunternehmen der Volkswagen AG."),
triealiases = List(
TrieAlias("Audi AG", Option(4)),
TrieAlias("Volkswagen AG", Option(43))
)
),
TrieAliasArticle(
id = "3",
title = Option("3"),
text = Option("Die Volkswagen Aktiengesellschaft ist nicht sehr umweltfreundlich."),
triealiases = List(
TrieAlias("Volkswagen", Option(4))
)
),
TrieAliasArticle(
id = "4",
title = Option("4"),
text = Option("Dieser Satz enthält Audi. Dieser Satz enthält Audi AG. Dieser Satz enthält Volkswagen."),
triealiases = List(
TrieAlias("Audi", Option(20)),
TrieAlias("Audi AG", Option(46)),
TrieAlias("Volkswagen", Option(75))
)
),
TrieAliasArticle(
id = "5",
title = Option("5"),
text = Option("-buch aktuell -Der Audio Verlag DER SPIEGEL Dein"),
triealiases = List(
TrieAlias("Der Audio Verlag", Option(15))
)
),
TrieAliasArticle(
id = "6",
title = Option("6"),
text = None
)
)
}
def incompleteFoundAliasArticles(): List[TrieAliasArticle] = {
List(
TrieAliasArticle(
id = "Audi Test ohne Link",
title = Option("Audi Test ohne Link"),
text = Option("Hier ist Audi nicht verlinkt."),
List(
TrieAlias("Audi", Option(9))
)
),
TrieAliasArticle(
id = "Testartikel",
title = Option("Testartikel"),
text = Option("Links: Audi, Brachttal, historisches Jahr.\\nKeine Links: Hessen, Main-Kinzig-Kreis, Büdinger Wald, Backfisch und nochmal Hessen."),
List(
TrieAlias("Audi", Option(7)),
TrieAlias("Brachttal", Option(13)),
TrieAlias("historisches Jahr", Option(24)),
TrieAlias("Hessen", Option(56)),
TrieAlias("Main-Kinzig-Kreis", Option(64)),
TrieAlias("Büdinger Wald", Option(83)),
TrieAlias("Hessen", Option(120))
)
),
TrieAliasArticle(
id = "Toan Anh",
title = Option("Toan Anh"),
text = Option("REDIRECT Van Toan Nguyen"),
List(
TrieAlias("Van", Option(9))
)
)
)
}
def contextArticles(): List[ArticleTfIdf] = {
List(
ArticleTfIdf(
"Audi Test mit Link",
Map("audi" -> 1, "verlink" -> 1)
),
ArticleTfIdf(
"Audi Test ohne Link",
Map("audi" -> 1, "verlink" -> 1)
),
ArticleTfIdf(
"Streitberg (Brachttal)",
Map("1554" -> 1, "waldrech" -> 1, "einwohnerzahl" -> 1, "streidtburgk" -> 1, "19" -> 1, "brachttal" -> 1, "ort" -> 1, "jahrhu" -> 1, "nachweislich" -> 1, "-lrb-" -> 3, "huterech" -> 1, "eingeburg" -> 1, "steytberg" -> 1, "erwahnung" -> 1, "ortsteil" -> 2, "bezeichnung" -> 1, "jahr" -> 3, "270" -> 1, "-" -> 1, "stridberg" -> 1, "kleinst" -> 1, "-rrb-" -> 3, "stamm" -> 1, "hess" -> 1, "holx" -> 1, "buding" -> 1, "tauch" -> 1, "stripurgk" -> 1, "1500" -> 1, "gemei" -> 1, "1377" -> 1, "wald" -> 1, "main-kinzig-kreis" -> 1, "1528" -> 1, "namensvaria" -> 1, "ortsnam" -> 1, "streitberg" -> 2, "mittelal" -> 1, "red" -> 1, "stamm" -> 1)
),
ArticleTfIdf(
"Testartikel",
Map("audi" -> 1, "brachttal" -> 1, "historisch" -> 1, "jahr" -> 1, "hess" -> 2, "main-kinzig-kreis" -> 1, "buding" -> 1, "wald" -> 1, "backfisch" -> 1, "nochmal" -> 1)
),
ArticleTfIdf(
"Toan Anh",
Map("nguy" -> 1, "redirec" -> 1, "toa" -> 1, "van" -> 1)
),
ArticleTfIdf("Chevrolet"),
ArticleTfIdf("Chevrolet Van"),
ArticleTfIdf("Flughafen Ferit Melen"),
ArticleTfIdf("Kastenwagen"),
ArticleTfIdf("Reliant"),
ArticleTfIdf("Reliant Van"),
ArticleTfIdf("Toyota"),
ArticleTfIdf("Tušpa"),
ArticleTfIdf("Türkisch Van"),
ArticleTfIdf("Van"),
ArticleTfIdf("Van (Automobil)"),
ArticleTfIdf("Van (Provinz)"),
ArticleTfIdf("Van (Türkei)"),
ArticleTfIdf("Vansee"),
ArticleTfIdf("Vilâyet Van")
)
}
def alias(): Alias = {
Alias(
"Van",
Map("Chevrolet" -> 1, "Chevrolet Van" -> 9, "Flughafen Ferit Melen" -> 1, "Kastenwagen" -> 1, "Reliant" -> 7, "Reliant Van" -> 2, "Toyota" -> 1, "Tušpa" -> 2, "Türkisch Van" -> 12, "Van" -> 159, "Van (Automobil)" -> 443, "Van (Provinz)" -> 83, "Van (Türkei)" -> 292, "Vansee" -> 5, "Vilâyet Van" -> 13),
linkoccurrences = Option(1040),
totaloccurrences = Option(20508)
)
}
def aliasContexts(): List[(Link, Bag[String, Int])] = {
List(
(Link("Audi", null, Option(29), Map(), Option("1")), Bag("artikel" -> 1, "schreib" -> 1, "." -> 1)),
(Link("Audi AG", null, Option(4), Map(), Option("2")), Bag("tochterunternehm" -> 1, "volkswag" -> 1, "ag" -> 1, "." -> 1)),
(Link("Volkswagen AG", null, Option(43), Map(), Option("2")), Bag("audi" -> 1, "tochterunternehm" -> 1, "ag" -> 1, "." -> 1)),
(Link("Volkswagen", null, Option(4), Map(), Option("3")), Bag("aktiengesellschaf" -> 1, "umweltfreundlich" -> 1, "." -> 1)),
(Link("Audi", null, Option(20), Map(), Option("4")), Bag("audi" -> 1, "ag" -> 1, "satx" -> 3, "enthal" -> 3, "volkswag" -> 1, "." -> 3)),
(Link("Audi AG", null, Option(46), Map(), Option("4")), Bag("audi" -> 1, "satx" -> 3, "enthal" -> 3, "volkswag" -> 1, "." -> 3)),
(Link("Volkswagen", null, Option(75), Map(), Option("4")), Bag("audi" -> 2, "ag" -> 1, "satx" -> 3, "enthal" -> 3, "." -> 3)),
(Link("Der Audio Verlag", null, Option(15), Map(), Option("5")), Bag("der" -> 1, "buch" -> 1, "aktuell" -> 1, "-" -> 2, "spiegel" -> 1))
)
}
def aliasMap(): Map[String, List[(String, Double, Double)]] = {
Map(
"Audi" -> List(("Audi", 1.0, 1.0)),
"Audi AG" -> List(("Audi", 1.0, 1.0)),
"Volkswagen" -> List(("Volkswagen", 1.0, 1.0)),
"Volkswagen AG" -> List(("Volkswagen", 1.0, 1.0)),
"Der Audio Verlag" -> List(("Der Audio Verlag", 1.0, 1.0))
)
}
def rawAliases(): List[Alias] = {
List(
Alias("Audi", Map("Audi" -> 1), Map("Audi" -> 1), Option(1), Option(1)),
Alias("Audi AG", Map("Audi" -> 1), Map("Audi" -> 1), Option(1), Option(1)),
Alias("Volkswagen", Map("Volkswagen" -> 1), Map("Volkswagen" -> 1), Option(1), Option(1)),
Alias("Volkswagen AG", Map("Volkswagen" -> 1), Map("Volkswagen" -> 1), Option(1), Option(1)),
Alias("Der Audio Verlag", Map("Der Audio Verlag" -> 1), Map("Der Audio Verlag" -> 1), Option(1), Option(1))
)
}
def tfidfArticles(): List[ArticleTfIdf] = {
List(
ArticleTfIdf("Audi"),
ArticleTfIdf("Volkswagen"),
ArticleTfIdf("Der Audio Verlag")
)
}
def featureEntries(): List[FeatureEntry] = {
List(
FeatureEntry("1", 29, "Audi", "Audi", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("2", 4, "Audi AG", "Audi", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("2", 43, "Volkswagen AG", "Volkswagen", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("3", 4, "Volkswagen", "Volkswagen", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("4", 20, "Audi", "Audi", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("4", 46, "Audi AG", "Audi", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("4", 75, "Volkswagen", "Volkswagen", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false),
FeatureEntry("5", 15, "Der Audio Verlag", "Der Audio Verlag", 1.0, MultiFeature(1.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), MultiFeature(0.0, 1, Double.PositiveInfinity, Double.PositiveInfinity), false)
)
}
def linkedEntities(): Set[(String, List[Link])] = {
Set(
("1", List(Link("Audi", "Audi", Option(29)))),
("2", List(
Link("Audi AG", "Audi", Option(4)),
Link("Volkswagen AG", "Volkswagen", Option(43))
)),
("3", List(Link("Volkswagen", "Volkswagen", Option(4)))),
("4", List(
Link("Audi", "Audi", Option(20)),
Link("Audi AG", "Audi", Option(46)),
Link("Volkswagen", "Volkswagen", Option(75))
)),
("5", List(Link("Der Audio Verlag", "Der Audio Verlag", Option(15))))
)
}
def linkedEntitiesForAllArticles(): Set[(String, List[Link])] = {
Set(
("1", List(Link("Audi", "Audi", Option(29)))),
("2", List(
Link("Audi AG", "Audi", Option(4)),
Link("Volkswagen AG", "Volkswagen", Option(43))
)),
("3", List(Link("Volkswagen", "Volkswagen", Option(4)))),
("4", List(
Link("Audi", "Audi", Option(20)),
Link("Audi AG", "Audi", Option(46)),
Link("Volkswagen", "Volkswagen", Option(75))
)),
("5", List(Link("Der Audio Verlag", "Der Audio Verlag", Option(15))))
)
}
def linkedEntitiesForIncompleteArticles(): Set[(String, Set[Link])] = {
Set(
("Audi Test ohne Link", Set(
Link("Audi", "Audi Test mit Link", Option(9))
)),
("Testartikel", Set(
Link("Audi", "Audi Test mit Link", Option(7)),
Link("Brachttal", "Audi Test ohne Link", Option(13)),
Link("historisches Jahr", "Testartikel", Option(24)),
Link("Hessen", "Streitberg (Brachttal)", Option(56)),
Link("Main-Kinzig-Kreis", "Streitberg (Brachttal)", Option(64)),
Link("Büdinger Wald", "Testartikel", Option(83)),
Link("Hessen", "Streitberg (Brachttal)", Option(120))
)),
("Toan Anh", Set(
Link("Van", "Van (Automobil)", Option(9)),
Link("Van", "Van (Türkei)", Option(9))
))
)
}
def randomForestModel(session: SparkSession): () => PipelineModel = {
if(rfModel.isEmpty) {
val featureEntries = session.sparkContext.parallelize(extendedClassifierFeatureEntries(10))
val training = ClassifierTraining.labeledPointDF(featureEntries, session)
val classifier = ClassifierTraining.randomForestDFModel(1, 2, 1)
.setFeaturesCol("features")
val labelIndexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("indexedLabel")
.fit(training)
val labelConverter = new IndexToString()
.setInputCol("prediction")
.setOutputCol("predictedLabel")
.setLabels(labelIndexer.labels)
val pipeline = new Pipeline().setStages(Array(labelIndexer, classifier, labelConverter))
rfModel = Option(pipeline.fit(training))
}
() => rfModel.get
}
def fuzzyMatchingArticles: List[NERAnnotatedArticle] = {
List(
NERAnnotatedArticle(
id = "1",
uuid = UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
text = Option("This an article about Volkswagen and Audi."),
nerentities = List(
NamedEntity(22, 10, "company"),
NamedEntity(37, 4, "company")
)
),
NERAnnotatedArticle(
id = "2",
uuid = UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
text = Option("This article is about ASUS and Acer."),
nerentities = List(
NamedEntity(22, 4, "company"),
NamedEntity(31, 4, "company")
)
)
)
}
def fuzzyMatchingEntities: List[String] = {
List("Volkswagen", "Audi", "ASUS", "Acer")
}
def fuzzyMatchingEntitySubjects: List[Subject] = {
List(
Subject(
master = UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
id = UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
datasource = "test",
name = Option("Volkswagen")
),
Subject(
master = UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
id = UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
datasource = "test",
name = Option("Audi")
),
Subject(
master = UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
id = UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
datasource = "test",
name = Option("ASUS")
),
Subject(
master = UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
id = UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
datasource = "test",
name = Option("Acer")
)
)
}
def fuzzyMatchingSubjects: List[Subject] = {
List(
Subject(
master = UUID.fromString("73416060-c0cd-4105-80c8-19c26c3fa640"),
id = UUID.fromString("73416060-c0cd-4105-80c8-19c26c3fa641"),
datasource = "implisense",
name = Option("Volkswagen AG")
),
Subject(
master = UUID.fromString("73416060-c0cd-4105-80c8-19c26c3fa640"),
id = UUID.fromString("73416060-c0cd-4105-80c8-19c26c3fa642"),
datasource = "wikidata",
name = Option("Volkswagen")
),
Subject(
master = UUID.fromString("67c83d81-1091-4bb3-ba6e-ccc646f38150"),
id = UUID.fromString("67c83d81-1091-4bb3-ba6e-ccc646f38151"),
datasource = "implisense",
name = Option("Volkswagen Versicherung AG")
),
Subject(
master = UUID.fromString("0aece747-62cf-401a-9022-f880eda20df0"),
id = UUID.fromString("0aece747-62cf-401a-9022-f880eda20df1"),
datasource = "wikidata",
name = Option("Audi AG")
),
Subject(
master = UUID.fromString("39afe68e-3cb5-4a7b-b9ea-10893c67e0a0"),
id = UUID.fromString("39afe68e-3cb5-4a7b-b9ea-10893c67e0a1"),
datasource = "implisense",
name = Option("ASUS")
)
)
}
def fuzzyMatchingMatches: Set[EntitySubjectMatch] = {
Set(
EntitySubjectMatch(
UUID.fromString("73416060-c0cd-4105-80c8-19c26c3fa642"),
Option("Volkswagen"),
UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
"Volkswagen",
"test",
1.0
),
EntitySubjectMatch(
UUID.fromString("0aece747-62cf-401a-9022-f880eda20df1"),
Option("Audi AG"),
UUID.fromString("b2a14feb-ab8c-451f-9332-52c5da55b92f"),
"Audi",
"test",
0.94
),
EntitySubjectMatch(
UUID.fromString("39afe68e-3cb5-4a7b-b9ea-10893c67e0a1"),
Option("ASUS"),
UUID.fromString("a8363f40-ed6f-479c-a9f3-596c354fa8db"),
"ASUS",
"test",
1.0
)
)
}
}
// scalastyle:on method.length
// scalastyle:on line.size.limit
| bpn1/ingestion | src/test/scala/de/hpi/ingestion/textmining/nel/TestData.scala | Scala | apache-2.0 | 28,566 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.spark
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, RelationProvider}
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
class DefaultSource extends RelationProvider with CreatableRelationProvider {
// Override 'RelationProvider.createRelation', this enables DataFrame.load()
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = {
verifyParameters(parameters)
new PhoenixRelation(
parameters("table"),
parameters("zkUrl"),
parameters.contains("dateAsTimestamp")
)(sqlContext)
}
// Override 'CreatableRelationProvider.createRelation', this enables DataFrame.save()
override def createRelation(sqlContext: SQLContext, mode: SaveMode,
parameters: Map[String, String], data: DataFrame): BaseRelation = {
if (!mode.equals(SaveMode.Overwrite)) {
throw new Exception("SaveMode other than SaveMode.OverWrite is not supported")
}
verifyParameters(parameters)
// Save the DataFrame to Phoenix
data.saveToPhoenix(parameters("table"), zkUrl = parameters.get("zkUrl"), tenantId = parameters.get("TenantId"))
// Return a relation of the saved data
createRelation(sqlContext, parameters)
}
// Ensure the required parameters are present
def verifyParameters(parameters: Map[String, String]): Unit = {
if (parameters.get("table").isEmpty) throw new RuntimeException("No Phoenix 'table' option defined")
if (parameters.get("zkUrl").isEmpty) throw new RuntimeException("No Phoenix 'zkUrl' option defined")
}
}
| joinany/phoenix-for-cloudera | phoenix-spark/src/main/scala/org/apache/phoenix/spark/DefaultSource.scala | Scala | apache-2.0 | 2,442 |
package co.spendabit.webapp.forms.v3.controls
/** A control that accepts a value as free-form text (e.g., <input type="text"/>, <textarea/>),
* though it may validate that the entered text matches a specific format. For example, it may
* require the value to be a valid URL, or that the value parses to an integer.
*/
abstract class TextEntryControl[T] extends TextBasedInput[T] {
def validate(s: String): Either[String, T]
// def validate(params: Map[String, Seq[String]]): Either[String, T] = {
//
// params.get(name) match {
//
// case Some(Seq(v)) =>
// validate(v)
//
// case Some(vs) if vs.length > 1 =>
// // TODO: We should really be returning a 400 response code
// Left(s"Multiple values for field $label?")
//
// case _ =>
// // TODO: We should really be returning a 400 response code
// Left(s"No value provided for field $label")
// }
// }
}
| spendabit/webapp-tools | src/co/spendabit/webapp/forms/v3/controls/TextEntryControl.scala | Scala | unlicense | 924 |
package pimpathon.java.io
import scala.language.implicitConversions
import java.io.{IOException, BufferedOutputStream, InputStream, OutputStream}
import java.util.zip.GZIPOutputStream
import scala.util.Try
import pimpathon.any._
object outputStream extends OutputStreamUtils(closeOut = true, closeIn = true, bufferSize = 8192)
case class OutputStreamUtils(closeOut: Boolean, closeIn: Boolean, bufferSize: Int) {
implicit def outputStreamPimps[OS <: OutputStream](os: OS): OutputStreamPimps[OS] =
new OutputStreamPimps[OS](os, this)
}
class OutputStreamPimps[OS <: OutputStream](os: OS, utils: OutputStreamUtils) {
import utils._
def <<(is: InputStream): OS = drain(is, closeOut = false, closeIn = false)
def drain(is: InputStream, closeOut: Boolean = closeOut, closeIn: Boolean = closeIn): OS =
os.tap(is.drain(_, closeIn, closeOut))
def closeAfter[A](f: OS ⇒ A): A = os.withFinally(_.attemptClose())(f)
def closeIf(condition: Boolean): OS = os.tapIf(_ ⇒ condition)(_.close())
def closeUnless(condition: Boolean): OS = os.tapUnless(_ ⇒ condition)(_.close())
def attemptClose(): Try[Unit] = Try(os.close())
def buffered: BufferedOutputStream = new BufferedOutputStream(os, bufferSize)
def gzip: GZIPOutputStream = new GZIPOutputStream(os, bufferSize)
def writeN(is: InputStream, n: Long): OS = os.tap(_.writeUpToN(is, n) |> (count ⇒ if (count != n)
throw new IOException(s"Failed to write $n only $count were available")
))
def writeUpToN(is: InputStream, n: Long): Long = is.readUpToN(os, n)
} | stacycurl/pimpathon | src/main/scala/pimpathon/java/io/OutputStream.scala | Scala | apache-2.0 | 1,564 |
package reactivemongo.api
import reactivemongo.bson.{ BSONArray, BSONDocument, BSONDocumentWriter }
/**
* MongoDB Read Preferences enable to read from primary or secondaries
* with a predefined strategy.
*/
sealed trait ReadPreference {
def slaveOk: Boolean = true
def filterTag: Option[BSONDocument => Boolean]
}
object ReadPreference {
/** Read only from the primary. This is the default choice. */
object Primary extends ReadPreference {
override def slaveOk = false
override def filterTag = None
}
/** Read from the primary if it is available, or secondaries if it is not. */
case class PrimaryPreferred(filterTag: Option[BSONDocument => Boolean]) extends ReadPreference
/** Read only from any secondary. */
case class Secondary(filterTag: Option[BSONDocument => Boolean]) extends ReadPreference
/** Read from any secondary, or from the primary if they are not available. */
case class SecondaryPreferred(filterTag: Option[BSONDocument => Boolean]) extends ReadPreference
/**
* Read from the faster node (ie the node which replies faster than all others), regardless its status
* (primary or secondary).
*/
case class Nearest(filterTag: Option[BSONDocument => Boolean]) extends ReadPreference
private implicit class BSONDocumentWrapper(val underlying: BSONDocument) extends AnyVal {
def contains(doc: BSONDocument): Boolean = {
val els = underlying.elements
doc.elements.forall { element =>
els.find {
case (name, value) => element._1 == name && ((element._2, value) match {
case (d1: BSONDocument, d2: BSONDocument) => d1.elements == d2.elements
case (a1: BSONArray, a2: BSONArray) => a1.values == a2.values
case (v1, v2) => v1 == v2
})
}.isDefined
}
}
}
private val defaultFilterTag = (doc: BSONDocument) => true
/** Read only from the primary. This is the default choice. */
def primary: Primary.type = Primary
/** Read from the primary if it is available, or secondaries if it is not. */
def primaryPreferred: PrimaryPreferred = new PrimaryPreferred(None)
/** Read from any node that has the given `tag` in the replica set (preferably the primary). */
def primaryPreferred[T](tag: T)(implicit writer: BSONDocumentWriter[T]): PrimaryPreferred =
new PrimaryPreferred(Some(doc => doc.contains(writer.write(tag))))
/** Read only from any secondary. */
def secondary: Secondary = new Secondary(None)
/** Read from a secondary that has the given `tag` in the replica set. */
def secondary[T](tag: T)(implicit writer: BSONDocumentWriter[T]): Secondary =
new Secondary(Some(doc => doc.contains(writer.write(tag))))
/** Read from any secondary, or from the primary if they are not available. */
def secondaryPreferred: SecondaryPreferred = new SecondaryPreferred(None)
/** Read from any node that has the given `tag` in the replica set (preferably a secondary). */
def secondaryPreferred[T](tag: T)(implicit writer: BSONDocumentWriter[T]): SecondaryPreferred =
new SecondaryPreferred(Some(doc => doc.contains(writer.write(tag))))
/**
* Read from the fastest node (ie the node which replies faster than all others), regardless its status
* (primary or secondary).
*/
def nearest: Nearest = new Nearest(None)
/**
* Read from the fastest node (ie the node which replies faster than all others) that has the given `tag`,
* regardless its status (primary or secondary).
*/
def nearest[T](tag: T)(implicit writer: BSONDocumentWriter[T]): Nearest =
new Nearest(Some(doc => doc.contains(writer.write(tag))))
}
| maowug/ReactiveMongo | driver/src/main/scala/api/ReadPreference.scala | Scala | apache-2.0 | 3,677 |
package dotty.tools.benchmarks
import java.lang.annotation.Annotation
import java.lang.reflect.Method
import org.junit.runner.Request
import org.junit.runner.notification.RunNotifier
import org.scalameter.PerformanceTest.OnlineRegressionReport
import org.scalameter.api._
import org.scalameter.reporting.RegressionReporter.Tester
import scala.collection.mutable.ListBuffer
abstract class TestsToBenchmarkConverter
(targetClass: Class[_],
filterAnnot: Class[_ <: java.lang.annotation.Annotation] = classOf[org.junit.Test].asInstanceOf[Class[_ <: java.lang.annotation.Annotation]])
extends OnlineRegressionReport {
// accept all the results, do not fail
override def tester: Tester = new Tester.Accepter
override def executor: Executor = LocalExecutor(warmer, aggregator, measurer)
val testNames = getMethodsAnnotatedWith(targetClass, filterAnnot).map(_.getName).sorted
val tests = testNames.map{name =>
val runner = Request.method(targetClass, name).getRunner
(name, Gen.single("test")(name).map(Request.method(targetClass, _).getRunner))}.toMap
//Gen.enumeration("test")(testNames:_*)
performance of targetClass.getSimpleName config (Context(reports.resultDir -> "./tmp")) in {
for (test <- testNames)
measure.method(test) in {
using(tests(test)) curve test in {
r =>
val dummy = new RunNotifier()
r.run(dummy)
}
}
}
def getMethodsAnnotatedWith(clazz: Class[_], annotation: Class[_ <: java.lang.annotation.Annotation]): List[Method] = {
val methods = ListBuffer[Method]()
var klass: Class[_] = clazz
while (klass ne classOf[AnyRef]) {
val allMethods = klass.getDeclaredMethods
import scala.collection.JavaConversions._
for (method <- allMethods) {
if (annotation == null || method.isAnnotationPresent(annotation)) {
val annotInstance: Annotation = method.getAnnotation(annotation)
methods.add(method)
}
}
klass = klass.getSuperclass
}
methods.toList
}
}
object dotcTests extends TestsToBenchmarkConverter(classOf[dotc.tests])
| yusuke2255/dotty | bench/src/test/scala/TestsAsBenchmarks.scala | Scala | bsd-3-clause | 2,121 |
package example
import spray.json._
sealed trait StockLevel
object StockLevel {
def apply(level: Int) =
if (level > 3) InStock
else if (level > 0) LowStock
else SoldOut
}
case object InStock extends StockLevel
case object LowStock extends StockLevel
case object SoldOut extends StockLevel
case class PublicItem(id: Int, stockLevel: StockLevel, title: String, desc: String)
object PublicItem {
def apply(i: Item): PublicItem = PublicItem(i.id, StockLevel(i.stock), i.title, i.desc)
}
case class PublicItemSummary(id: Int, stockLevel: StockLevel, title: String)
object PublicItemSummary {
def apply(i: ItemSummary): PublicItemSummary = PublicItemSummary(i.id, StockLevel(i.stock), i.title)
}
trait ServiceJsonProtocol extends DefaultJsonProtocol {
implicit object StockLevelFmt extends JsonFormat[StockLevel] {
def write(obj: StockLevel) = JsString(obj.toString)
def read(json: JsValue): StockLevel = json match {
case JsString("InStock") => InStock
case JsString("LowStock") => LowStock
case JsString("SoldOut") => SoldOut
case _ => throw new Exception("Unsupported StockLevel")
}
}
implicit val publicItemFmt = jsonFormat4(PublicItem.apply)
implicit val publicItemSummaryFmt = jsonFormat3(PublicItemSummary.apply)
}
| stig/spray-example | src/main/scala/example/ServiceJsonProtocol.scala | Scala | mit | 1,286 |
/*
* Copyright 2012 Miklos Juhasz (mjuhasz)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mjuhasz.moviecatalog.web
import com.mjuhasz.moviecatalog.movies.MovieService
import com.mjuhasz.moviecatalog.movies.MovieInformation
import org.springframework.web.context.support.WebApplicationContextUtils
import org.scalatra.{UrlGeneratorSupport, Route, UrlSupport, ScalatraFilter}
import net.liftweb.json.Serialization.write
import net.liftweb.json.{NoTypeHints, Serialization}
import xml.{Xhtml, NodeSeq}
case class JsonResponse[A](content: A)
class MovieCatalogFilter extends ScalatraFilter with UrlSupport with UrlGeneratorSupport with MovieCatalogRoutes with Menu {
implicit val formats = Serialization.formats(NoTypeHints)
override def contextPath = servletContext.getContextPath()
override def contentTypeInferrer = {
case _: JsonResponse[_] => "application/json"
case _: NodeSeq => "text/html"
case any => super.contentTypeInferrer(any)
}
override def renderPipeline = {
case json: JsonResponse[_] => write(json)
case xhtml: NodeSeq => Xhtml.toXhtml(xhtml)
case any => super.renderPipeline(any)
}
protected def movieService: MovieService = {
WebApplicationContextUtils.getWebApplicationContext(servletContext).getBean("movieService", classOf[MovieService])
}
get("/") {
redirect(url(movieIndexRoute))
}
get("/movies.json") {
val query = params.get("q").getOrElse("")
JsonResponse(movieService.search(query).results)
}
get("/movies/:title.json") {
JsonResponse(movieService.find(params("title")))
}
val movieIndexRoute: Route = get("/movies.html") {
val query = params.get("q").getOrElse("")
val movies = movieService.search(query)
if (movies.results.isEmpty) {
status(404)
withSidebarLayout("Movie Catalog", None, Some(query), None, <div class="content"><h1>No movie matches '{ query }'</h1></div>)
} else if (movies.results.size == 1 && movies.results.head.title == query) {
redirect(url(movieTemperatureRoute, "title" -> movies.results.head.title))
} else {
withSidebarLayout("Movie Catalog", None, None, None,
<div class="content">
<h2>Use the search bar to find information for a specific movie</h2>
<h2>Matching movies ({ movies.results.size } out of { movies.totalSize }):</h2>
<ul>{
for (movie <- movies.results) yield {
<li><a href={ url(movieTemperatureRoute, "title" -> movie.title) }>{ movie.title }</a> - { movie.title_hu }</li>
}
}</ul>
</div>)
}
}
val movieTemperatureRoute: Route = get("/movies/:title.html") {
renderWithMovieInformation(params("title"), movieTemperatureRoute) { info =>
<div class="content">
<h1>{ info.title }</h1>
<h3>{ info.title_hu }</h3>
<table>
<tr><td>Audio:</td><td><strong>{ info.audio }</strong></td></tr>
<tr><td>Subtitle</td><td><strong>{ info.subtitle }</strong></td></tr>
<tr><td>Runtime</td><td><strong>{ info.runtime }</strong></td></tr>
<tr><td>Storage</td><td><strong>{ info.storage_a }{ if (info.storage_b != info.storage_a) " (" + info.storage_b + ")" }</strong></td></tr>
<tr><td>Source</td><td><strong>{ info.source }</strong></td></tr>
</table>
</div>
}
}
val movieTechnicalRoute: Route = get("/movies/spec/:title.html") {
renderWithMovieInformation(params("title"), movieTechnicalRoute) { info =>
<div class="content">
<h1>{ info.title }</h1>
<h3>{ info.title_hu }</h3>
<table>
<tr><td>Size:</td><td><strong>{ info.size }</strong></td></tr>
<tr><td>Resolution:</td><td><strong>{ info.resolution }</strong></td></tr>
<tr><td>Aspect Ratio:</td><td><strong>{ info.aspect_ratio }</strong></td></tr>
<tr><td>Framerate:</td><td><strong>{ info.framerate }</strong></td></tr>
</table>
</div>
}
}
private def renderWithMovieInformation(title: String, route: Route)(body: MovieInformation => NodeSeq): NodeSeq = {
try {
movieService.find(title) match {
case None =>
status(404)
withSidebarLayout("Movie Catalog", None, Some(title), None, <div class="content"><h1>Movie '{ title }' not found</h1></div>)
case Some(info) =>
withSidebarLayout("Movie Catalog", Some(info.title), None, Some(route), body(info))
}
} catch {
case e =>
e.printStackTrace()
status(500)
withSidebarLayout("Movie Catalog", None, Some(title), None,
<div class="content">
<h1>Error accessing movie '{ title }'</h1>
<pre>{ e }</pre>
</div>)
}
}
private def withSidebarLayout(pageTitle: String, title: Option[String], query: Option[String], selected: Option[Route], body: NodeSeq) = {
withBasicLayout(pageTitle,
<form id="movieSearch" action={ url(movieIndexRoute) } method="GET">
<input type="text" name="q" value={ query.getOrElse("") } placeholder="Search for a movie..." autofocus=""/>
</form>,
<div class="container-fluid">
{ title.map(renderMenu(_, selected)).getOrElse(NodeSeq.Empty) }
{ body }
</div>)
}
private def renderMenu(title: String, selected: Option[Route]) = {
<div class="sidebar">
<div class="well">{
for (entry <- menuEntries) yield {
<h4 class={ if (entry.route == selected) "selected" else "" }>{ entry.toXhtml(title) }</h4> ++ (if (entry.children.isEmpty) NodeSeq.Empty else {
<ul class="unstyled">{
for (child <- entry.children) yield {
<li class={ if (child.route == selected) "selected" else "" }>{ child.toXhtml(title) }</li>
}
}</ul>
})
}
}</div>
</div>
}
private def withBasicLayout(pageTitle: String, topbar: NodeSeq, body: NodeSeq) = {
<html lang="en">
<head>
<meta charset="utf-8"/>
<title>{ pageTitle }</title>
<link rel="stylesheet" href={ url("/css/bootstrap.min.css") } />
<link rel="stylesheet" href={ url("/css/moviecatalog.css") } />
<link rel="stylesheet" href={ url("/css/ui-darkness/jquery-ui-1.8.17.custom.css") } />
<script type="text/javascript" src={ url("/js/jquery-1.7.1.min.js") }></script>
<script type="text/javascript" src={ url("/js/jquery-ui-1.8.17.custom.min.js") }></script>
<script type="text/javascript" src={ url("/js/moviecatalog.js") }></script>
</head>
<body>
<div class="topbar">
<div class="topbar-inner">
<div class="container-fluid">
<a class="brand" href={ url(movieIndexRoute) }>Movie Catalog</a>
{ topbar }
</div>
</div>
</div>
{ body }
</body>
</html>
}
}
| mjuhasz/Movie-Catalog | src/main/scala/com/mjuhasz/moviecatalog/web/MovieCatalogFilter.scala | Scala | apache-2.0 | 7,446 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.time.{Duration, Period}
import scala.util.Random
import org.scalatest.matchers.must.Matchers.the
import org.apache.spark.SparkException
import org.apache.spark.sql.execution.WholeStageCodegenExec
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec}
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.DecimalData
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.YearMonthIntervalType.{MONTH, YEAR}
case class Fact(date: Int, hour: Int, minute: Int, room_name: String, temp: Double)
class DataFrameAggregateSuite extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
val absTol = 1e-8
test("groupBy") {
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(1, 3), Row(2, 3), Row(3, 3))
)
checkAnswer(
testData2.groupBy("a").agg(sum($"b").as("totB")).agg(sum($"totB")),
Row(9)
)
checkAnswer(
testData2.groupBy("a").agg(count("*")),
Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil
)
checkAnswer(
testData2.groupBy("a").agg(Map("*" -> "count")),
Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil
)
checkAnswer(
testData2.groupBy("a").agg(Map("b" -> "sum")),
Row(1, 3) :: Row(2, 3) :: Row(3, 3) :: Nil
)
val df1 = Seq(("a", 1, 0, "b"), ("b", 2, 4, "c"), ("a", 2, 3, "d"))
.toDF("key", "value1", "value2", "rest")
checkAnswer(
df1.groupBy("key").min(),
df1.groupBy("key").min("value1", "value2").collect()
)
checkAnswer(
df1.groupBy("key").min("value2"),
Seq(Row("a", 0), Row("b", 4))
)
checkAnswer(
decimalData.groupBy("a").agg(sum("b")),
Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal(3)),
Row(new java.math.BigDecimal(2), new java.math.BigDecimal(3)),
Row(new java.math.BigDecimal(3), new java.math.BigDecimal(3)))
)
val decimalDataWithNulls = spark.sparkContext.parallelize(
DecimalData(1, 1) ::
DecimalData(1, null) ::
DecimalData(2, 1) ::
DecimalData(2, null) ::
DecimalData(3, 1) ::
DecimalData(3, 2) ::
DecimalData(null, 2) :: Nil).toDF()
checkAnswer(
decimalDataWithNulls.groupBy("a").agg(sum("b")),
Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal(1)),
Row(new java.math.BigDecimal(2), new java.math.BigDecimal(1)),
Row(new java.math.BigDecimal(3), new java.math.BigDecimal(3)),
Row(null, new java.math.BigDecimal(2)))
)
}
test("SPARK-17124 agg should be ordering preserving") {
val df = spark.range(2)
val ret = df.groupBy("id").agg("id" -> "sum", "id" -> "count", "id" -> "min")
assert(ret.schema.map(_.name) == Seq("id", "sum(id)", "count(id)", "min(id)"))
checkAnswer(
ret,
Row(0, 0, 1, 0) :: Row(1, 1, 1, 1) :: Nil
)
}
test("SPARK-18952: regexes fail codegen when used as keys due to bad forward-slash escapes") {
val df = Seq(("some[thing]", "random-string")).toDF("key", "val")
checkAnswer(
df.groupBy(regexp_extract($"key", "([a-z]+)\\\\[", 1)).count(),
Row("some", 1) :: Nil
)
}
test("rollup") {
checkAnswer(
courseSales.rollup("course", "year").sum("earnings"),
Row("Java", 2012, 20000.0) ::
Row("Java", 2013, 30000.0) ::
Row("Java", null, 50000.0) ::
Row("dotNET", 2012, 15000.0) ::
Row("dotNET", 2013, 48000.0) ::
Row("dotNET", null, 63000.0) ::
Row(null, null, 113000.0) :: Nil
)
}
test("cube") {
checkAnswer(
courseSales.cube("course", "year").sum("earnings"),
Row("Java", 2012, 20000.0) ::
Row("Java", 2013, 30000.0) ::
Row("Java", null, 50000.0) ::
Row("dotNET", 2012, 15000.0) ::
Row("dotNET", 2013, 48000.0) ::
Row("dotNET", null, 63000.0) ::
Row(null, 2012, 35000.0) ::
Row(null, 2013, 78000.0) ::
Row(null, null, 113000.0) :: Nil
)
val df0 = spark.sparkContext.parallelize(Seq(
Fact(20151123, 18, 35, "room1", 18.6),
Fact(20151123, 18, 35, "room2", 22.4),
Fact(20151123, 18, 36, "room1", 17.4),
Fact(20151123, 18, 36, "room2", 25.6))).toDF()
val cube0 = df0.cube("date", "hour", "minute", "room_name").agg(Map("temp" -> "avg"))
assert(cube0.where("date IS NULL").count > 0)
}
test("grouping and grouping_id") {
checkAnswer(
courseSales.cube("course", "year")
.agg(grouping("course"), grouping("year"), grouping_id("course", "year")),
Row("Java", 2012, 0, 0, 0) ::
Row("Java", 2013, 0, 0, 0) ::
Row("Java", null, 0, 1, 1) ::
Row("dotNET", 2012, 0, 0, 0) ::
Row("dotNET", 2013, 0, 0, 0) ::
Row("dotNET", null, 0, 1, 1) ::
Row(null, 2012, 1, 0, 2) ::
Row(null, 2013, 1, 0, 2) ::
Row(null, null, 1, 1, 3) :: Nil
)
// use column reference in `grouping_id` instead of column name
checkAnswer(
courseSales.cube("course", "year")
.agg(grouping_id(courseSales("course"), courseSales("year"))),
Row("Java", 2012, 0) ::
Row("Java", 2013, 0) ::
Row("Java", null, 1) ::
Row("dotNET", 2012, 0) ::
Row("dotNET", 2013, 0) ::
Row("dotNET", null, 1) ::
Row(null, 2012, 2) ::
Row(null, 2013, 2) ::
Row(null, null, 3) :: Nil
)
intercept[AnalysisException] {
courseSales.groupBy().agg(grouping("course")).explain()
}
intercept[AnalysisException] {
courseSales.groupBy().agg(grouping_id("course")).explain()
}
}
test("grouping/grouping_id inside window function") {
val w = Window.orderBy(sum("earnings"))
checkAnswer(
courseSales.cube("course", "year")
.agg(sum("earnings"),
grouping_id("course", "year"),
rank().over(Window.partitionBy(grouping_id("course", "year")).orderBy(sum("earnings")))),
Row("Java", 2012, 20000.0, 0, 2) ::
Row("Java", 2013, 30000.0, 0, 3) ::
Row("Java", null, 50000.0, 1, 1) ::
Row("dotNET", 2012, 15000.0, 0, 1) ::
Row("dotNET", 2013, 48000.0, 0, 4) ::
Row("dotNET", null, 63000.0, 1, 2) ::
Row(null, 2012, 35000.0, 2, 1) ::
Row(null, 2013, 78000.0, 2, 2) ::
Row(null, null, 113000.0, 3, 1) :: Nil
)
}
test("SPARK-21980: References in grouping functions should be indexed with semanticEquals") {
checkAnswer(
courseSales.cube("course", "year")
.agg(grouping("CouRse"), grouping("year")),
Row("Java", 2012, 0, 0) ::
Row("Java", 2013, 0, 0) ::
Row("Java", null, 0, 1) ::
Row("dotNET", 2012, 0, 0) ::
Row("dotNET", 2013, 0, 0) ::
Row("dotNET", null, 0, 1) ::
Row(null, 2012, 1, 0) ::
Row(null, 2013, 1, 0) ::
Row(null, null, 1, 1) :: Nil
)
}
test("rollup overlapping columns") {
checkAnswer(
testData2.rollup($"a" + $"b" as "foo", $"b" as "bar").agg(sum($"a" - $"b") as "foo"),
Row(2, 1, 0) :: Row(3, 2, -1) :: Row(3, 1, 1) :: Row(4, 2, 0) :: Row(4, 1, 2) :: Row(5, 2, 1)
:: Row(2, null, 0) :: Row(3, null, 0) :: Row(4, null, 2) :: Row(5, null, 1)
:: Row(null, null, 3) :: Nil
)
checkAnswer(
testData2.rollup("a", "b").agg(sum("b")),
Row(1, 1, 1) :: Row(1, 2, 2) :: Row(2, 1, 1) :: Row(2, 2, 2) :: Row(3, 1, 1) :: Row(3, 2, 2)
:: Row(1, null, 3) :: Row(2, null, 3) :: Row(3, null, 3)
:: Row(null, null, 9) :: Nil
)
}
test("cube overlapping columns") {
checkAnswer(
testData2.cube($"a" + $"b", $"b").agg(sum($"a" - $"b")),
Row(2, 1, 0) :: Row(3, 2, -1) :: Row(3, 1, 1) :: Row(4, 2, 0) :: Row(4, 1, 2) :: Row(5, 2, 1)
:: Row(2, null, 0) :: Row(3, null, 0) :: Row(4, null, 2) :: Row(5, null, 1)
:: Row(null, 1, 3) :: Row(null, 2, 0)
:: Row(null, null, 3) :: Nil
)
checkAnswer(
testData2.cube("a", "b").agg(sum("b")),
Row(1, 1, 1) :: Row(1, 2, 2) :: Row(2, 1, 1) :: Row(2, 2, 2) :: Row(3, 1, 1) :: Row(3, 2, 2)
:: Row(1, null, 3) :: Row(2, null, 3) :: Row(3, null, 3)
:: Row(null, 1, 3) :: Row(null, 2, 6)
:: Row(null, null, 9) :: Nil
)
}
test("spark.sql.retainGroupColumns config") {
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(1, 3), Row(2, 3), Row(3, 3))
)
spark.conf.set(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key, false)
checkAnswer(
testData2.groupBy("a").agg(sum($"b")),
Seq(Row(3), Row(3), Row(3))
)
spark.conf.set(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key, true)
}
test("agg without groups") {
checkAnswer(
testData2.agg(sum($"b")),
Row(9)
)
}
test("agg without groups and functions") {
checkAnswer(
testData2.agg(lit(1)),
Row(1)
)
}
test("average") {
checkAnswer(
testData2.agg(avg($"a"), mean($"a")),
Row(2.0, 2.0))
checkAnswer(
testData2.agg(avg($"a"), sumDistinct($"a")), // non-partial and test deprecated version
Row(2.0, 6.0) :: Nil)
checkAnswer(
decimalData.agg(avg($"a")),
Row(new java.math.BigDecimal(2)))
checkAnswer(
decimalData.agg(avg($"a"), sum_distinct($"a")), // non-partial
Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil)
checkAnswer(
decimalData.agg(avg($"a" cast DecimalType(10, 2))),
Row(new java.math.BigDecimal(2)))
// non-partial
checkAnswer(
decimalData.agg(
avg($"a" cast DecimalType(10, 2)), sum_distinct($"a" cast DecimalType(10, 2))),
Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil)
}
test("null average") {
checkAnswer(
testData3.agg(avg($"b")),
Row(2.0))
checkAnswer(
testData3.agg(avg($"b"), count_distinct($"b")),
Row(2.0, 1))
checkAnswer(
testData3.agg(avg($"b"), sum_distinct($"b")), // non-partial
Row(2.0, 2.0))
}
test("zero average") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(avg($"a")),
Row(null))
checkAnswer(
emptyTableData.agg(avg($"a"), sum_distinct($"b")), // non-partial
Row(null, null))
}
test("count") {
assert(testData2.count() === testData2.rdd.map(_ => 1).count())
checkAnswer(
testData2.agg(count($"a"), sum_distinct($"a")), // non-partial
Row(6, 6.0))
}
test("null count") {
checkAnswer(
testData3.groupBy($"a").agg(count($"b")),
Seq(Row(1, 0), Row(2, 1))
)
checkAnswer(
testData3.groupBy($"a").agg(count($"a" + $"b")),
Seq(Row(1, 0), Row(2, 1))
)
checkAnswer(
testData3.agg(
count($"a"), count($"b"), count(lit(1)), count_distinct($"a"), count_distinct($"b")),
Row(2, 1, 2, 2, 1)
)
checkAnswer(
testData3.agg(count($"b"), count_distinct($"b"), sum_distinct($"b")), // non-partial
Row(1, 1, 2)
)
}
test("multiple column distinct count") {
val df1 = Seq(
("a", "b", "c"),
("a", "b", "c"),
("a", "b", "d"),
("x", "y", "z"),
("x", "q", null.asInstanceOf[String]))
.toDF("key1", "key2", "key3")
checkAnswer(
df1.agg(count_distinct($"key1", $"key2")),
Row(3)
)
checkAnswer(
df1.agg(count_distinct($"key1", $"key2", $"key3")),
Row(3)
)
checkAnswer(
df1.groupBy($"key1").agg(count_distinct($"key2", $"key3")),
Seq(Row("a", 2), Row("x", 1))
)
}
test("zero count") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(count($"a"), sum_distinct($"a")), // non-partial
Row(0, null))
}
test("stddev") {
val testData2ADev = math.sqrt(4.0 / 5.0)
checkAnswer(
testData2.agg(stddev($"a"), stddev_pop($"a"), stddev_samp($"a")),
Row(testData2ADev, math.sqrt(4 / 6.0), testData2ADev))
checkAnswer(
testData2.agg(stddev("a"), stddev_pop("a"), stddev_samp("a")),
Row(testData2ADev, math.sqrt(4 / 6.0), testData2ADev))
}
test("zero stddev") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(stddev($"a"), stddev_pop($"a"), stddev_samp($"a")),
Row(null, null, null))
}
test("zero sum") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(sum($"a")),
Row(null))
}
test("zero sum distinct") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(
emptyTableData.agg(sum_distinct($"a")),
Row(null))
}
test("moments") {
val sparkVariance = testData2.agg(variance($"a"))
checkAggregatesWithTol(sparkVariance, Row(4.0 / 5.0), absTol)
val sparkVariancePop = testData2.agg(var_pop($"a"))
checkAggregatesWithTol(sparkVariancePop, Row(4.0 / 6.0), absTol)
val sparkVarianceSamp = testData2.agg(var_samp($"a"))
checkAggregatesWithTol(sparkVarianceSamp, Row(4.0 / 5.0), absTol)
val sparkSkewness = testData2.agg(skewness($"a"))
checkAggregatesWithTol(sparkSkewness, Row(0.0), absTol)
val sparkKurtosis = testData2.agg(kurtosis($"a"))
checkAggregatesWithTol(sparkKurtosis, Row(-1.5), absTol)
}
test("zero moments") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") {
val input = Seq((1, 2)).toDF("a", "b")
checkAnswer(
input.agg(stddev($"a"), stddev_samp($"a"), stddev_pop($"a"), variance($"a"),
var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")),
Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0,
Double.NaN, Double.NaN))
checkAnswer(
input.agg(
expr("stddev(a)"),
expr("stddev_samp(a)"),
expr("stddev_pop(a)"),
expr("variance(a)"),
expr("var_samp(a)"),
expr("var_pop(a)"),
expr("skewness(a)"),
expr("kurtosis(a)")),
Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0,
Double.NaN, Double.NaN))
}
}
test("SPARK-13860: zero moments LEGACY_STATISTICAL_AGGREGATE off") {
withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") {
val input = Seq((1, 2)).toDF("a", "b")
checkAnswer(
input.agg(stddev($"a"), stddev_samp($"a"), stddev_pop($"a"), variance($"a"),
var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")),
Row(null, null, 0.0, null, null, 0.0,
null, null))
checkAnswer(
input.agg(
expr("stddev(a)"),
expr("stddev_samp(a)"),
expr("stddev_pop(a)"),
expr("variance(a)"),
expr("var_samp(a)"),
expr("var_pop(a)"),
expr("skewness(a)"),
expr("kurtosis(a)")),
Row(null, null, 0.0, null, null, 0.0,
null, null))
}
}
test("null moments") {
val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b")
checkAnswer(emptyTableData.agg(
variance($"a"), var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")),
Row(null, null, null, null, null))
checkAnswer(
emptyTableData.agg(
expr("variance(a)"),
expr("var_samp(a)"),
expr("var_pop(a)"),
expr("skewness(a)"),
expr("kurtosis(a)")),
Row(null, null, null, null, null))
}
test("collect functions") {
val df = Seq((1, 2), (2, 2), (3, 4)).toDF("a", "b")
checkAnswer(
df.select(collect_list($"a"), collect_list($"b")),
Seq(Row(Seq(1, 2, 3), Seq(2, 2, 4)))
)
checkAnswer(
df.select(collect_set($"a"), collect_set($"b")),
Seq(Row(Seq(1, 2, 3), Seq(2, 4)))
)
checkDataset(
df.select(collect_set($"a").as("aSet")).as[Set[Int]],
Set(1, 2, 3))
checkDataset(
df.select(collect_set($"b").as("bSet")).as[Set[Int]],
Set(2, 4))
checkDataset(
df.select(collect_set($"a"), collect_set($"b")).as[(Set[Int], Set[Int])],
Seq(Set(1, 2, 3) -> Set(2, 4)): _*)
}
test("collect functions structs") {
val df = Seq((1, 2, 2), (2, 2, 2), (3, 4, 1))
.toDF("a", "x", "y")
.select($"a", struct($"x", $"y").as("b"))
checkAnswer(
df.select(collect_list($"a"), sort_array(collect_list($"b"))),
Seq(Row(Seq(1, 2, 3), Seq(Row(2, 2), Row(2, 2), Row(4, 1))))
)
checkAnswer(
df.select(collect_set($"a"), sort_array(collect_set($"b"))),
Seq(Row(Seq(1, 2, 3), Seq(Row(2, 2), Row(4, 1))))
)
}
test("SPARK-31500: collect_set() of BinaryType returns duplicate elements") {
val bytesTest1 = "test1".getBytes
val bytesTest2 = "test2".getBytes
val df = Seq(bytesTest1, bytesTest1, bytesTest2).toDF("a")
checkAnswer(df.select(size(collect_set($"a"))), Row(2) :: Nil)
val a = "aa".getBytes
val b = "bb".getBytes
val c = "cc".getBytes
val d = "dd".getBytes
val df1 = Seq((a, b), (a, b), (c, d))
.toDF("x", "y")
.select(struct($"x", $"y").as("a"))
checkAnswer(df1.select(size(collect_set($"a"))), Row(2) :: Nil)
}
test("collect_set functions cannot have maps") {
val df = Seq((1, 3, 0), (2, 3, 0), (3, 4, 1))
.toDF("a", "x", "y")
.select($"a", map($"x", $"y").as("b"))
val error = intercept[AnalysisException] {
df.select(collect_set($"a"), collect_set($"b"))
}
assert(error.message.contains("collect_set() cannot have map type data"))
}
test("SPARK-17641: collect functions should not collect null values") {
val df = Seq(("1", 2), (null, 2), ("1", 4)).toDF("a", "b")
checkAnswer(
df.select(collect_list($"a"), collect_list($"b")),
Seq(Row(Seq("1", "1"), Seq(2, 2, 4)))
)
checkAnswer(
df.select(collect_set($"a"), collect_set($"b")),
Seq(Row(Seq("1"), Seq(2, 4)))
)
}
test("collect functions should be able to cast to array type with no null values") {
val df = Seq(1, 2).toDF("a")
checkAnswer(df.select(collect_list("a") cast ArrayType(IntegerType, false)),
Seq(Row(Seq(1, 2))))
checkAnswer(df.select(collect_set("a") cast ArrayType(FloatType, false)),
Seq(Row(Seq(1.0, 2.0))))
}
test("SPARK-14664: Decimal sum/avg over window should work.") {
checkAnswer(
spark.sql("select sum(a) over () from values 1.0, 2.0, 3.0 T(a)"),
Row(6.0) :: Row(6.0) :: Row(6.0) :: Nil)
checkAnswer(
spark.sql("select avg(a) over () from values 1.0, 2.0, 3.0 T(a)"),
Row(2.0) :: Row(2.0) :: Row(2.0) :: Nil)
}
test("SQL decimal test (used for catching certain decimal handling bugs in aggregates)") {
checkAnswer(
decimalData.groupBy($"a" cast DecimalType(10, 2)).agg(avg($"b" cast DecimalType(10, 2))),
Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal("1.5")),
Row(new java.math.BigDecimal(2), new java.math.BigDecimal("1.5")),
Row(new java.math.BigDecimal(3), new java.math.BigDecimal("1.5"))))
}
test("SPARK-17616: distinct aggregate combined with a non-partial aggregate") {
val df = Seq((1, 3, "a"), (1, 2, "b"), (3, 4, "c"), (3, 4, "c"), (3, 5, "d"))
.toDF("x", "y", "z")
checkAnswer(
df.groupBy($"x").agg(count_distinct($"y"), sort_array(collect_list($"z"))),
Seq(Row(1, 2, Seq("a", "b")), Row(3, 2, Seq("c", "c", "d"))))
}
test("SPARK-18004 limit + aggregates") {
val df = Seq(("a", 1), ("b", 2), ("c", 1), ("d", 5)).toDF("id", "value")
val limit2Df = df.limit(2)
checkAnswer(
limit2Df.groupBy("id").count().select($"id"),
limit2Df.select($"id"))
}
test("SPARK-17237 remove backticks in a pivot result schema") {
val df = Seq((2, 3, 4), (3, 4, 5)).toDF("a", "x", "y")
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
df.groupBy("a").pivot("x").agg(count("y"), avg("y")).na.fill(0),
Seq(Row(3, 0, 0.0, 1, 5.0), Row(2, 1, 4.0, 0, 0.0))
)
}
}
test("aggregate function in GROUP BY") {
val e = intercept[AnalysisException] {
testData.groupBy(sum($"key")).count()
}
assert(e.message.contains("aggregate functions are not allowed in GROUP BY"))
}
private def assertNoExceptions(c: Column): Unit = {
for ((wholeStage, useObjectHashAgg) <-
Seq((true, true), (true, false), (false, true), (false, false))) {
withSQLConf(
(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStage.toString),
(SQLConf.USE_OBJECT_HASH_AGG.key, useObjectHashAgg.toString)) {
val df = Seq(("1", 1), ("1", 2), ("2", 3), ("2", 4)).toDF("x", "y")
// test case for HashAggregate
val hashAggDF = df.groupBy("x").agg(c, sum("y"))
hashAggDF.collect()
val hashAggPlan = hashAggDF.queryExecution.executedPlan
if (wholeStage) {
assert(find(hashAggPlan) {
case WholeStageCodegenExec(_: HashAggregateExec) => true
case _ => false
}.isDefined)
} else {
assert(stripAQEPlan(hashAggPlan).isInstanceOf[HashAggregateExec])
}
// test case for ObjectHashAggregate and SortAggregate
val objHashAggOrSortAggDF = df.groupBy("x").agg(c, collect_list("y"))
objHashAggOrSortAggDF.collect()
val objHashAggOrSortAggPlan =
stripAQEPlan(objHashAggOrSortAggDF.queryExecution.executedPlan)
if (useObjectHashAgg) {
assert(objHashAggOrSortAggPlan.isInstanceOf[ObjectHashAggregateExec])
} else {
assert(objHashAggOrSortAggPlan.isInstanceOf[SortAggregateExec])
}
}
}
}
test("SPARK-19471: AggregationIterator does not initialize the generated result projection" +
" before using it") {
Seq(
monotonically_increasing_id(), spark_partition_id(),
rand(Random.nextLong()), randn(Random.nextLong())
).foreach(assertNoExceptions)
}
test("SPARK-21580 ints in aggregation expressions are taken as group-by ordinal.") {
checkAnswer(
testData2.groupBy(lit(3), lit(4)).agg(lit(6), lit(7), sum("b")),
Seq(Row(3, 4, 6, 7, 9)))
checkAnswer(
testData2.groupBy(lit(3), lit(4)).agg(lit(6), $"b", sum("b")),
Seq(Row(3, 4, 6, 1, 3), Row(3, 4, 6, 2, 6)))
checkAnswer(
spark.sql("SELECT 3, 4, SUM(b) FROM testData2 GROUP BY 1, 2"),
Seq(Row(3, 4, 9)))
checkAnswer(
spark.sql("SELECT 3 AS c, 4 AS d, SUM(b) FROM testData2 GROUP BY c, d"),
Seq(Row(3, 4, 9)))
}
test("SPARK-22223: ObjectHashAggregate should not introduce unnecessary shuffle") {
withSQLConf(SQLConf.USE_OBJECT_HASH_AGG.key -> "true") {
val df = Seq(("1", "2", 1), ("1", "2", 2), ("2", "3", 3), ("2", "3", 4)).toDF("a", "b", "c")
.repartition(col("a"))
val objHashAggDF = df
.withColumn("d", expr("(a, b, c)"))
.groupBy("a", "b").agg(collect_list("d").as("e"))
.withColumn("f", expr("(b, e)"))
.groupBy("a").agg(collect_list("f").as("g"))
val aggPlan = objHashAggDF.queryExecution.executedPlan
val sortAggPlans = collect(aggPlan) {
case sortAgg: SortAggregateExec => sortAgg
}
assert(sortAggPlans.isEmpty)
val objHashAggPlans = collect(aggPlan) {
case objHashAgg: ObjectHashAggregateExec => objHashAgg
}
assert(objHashAggPlans.nonEmpty)
val exchangePlans = collect(aggPlan) {
case shuffle: ShuffleExchangeExec => shuffle
}
assert(exchangePlans.length == 1)
}
}
testWithWholeStageCodegenOnAndOff("SPARK-22951: dropDuplicates on empty dataFrames " +
"should produce correct aggregate") { _ =>
// explicit global aggregations
val emptyAgg = Map.empty[String, String]
checkAnswer(spark.emptyDataFrame.agg(emptyAgg), Seq(Row()))
checkAnswer(spark.emptyDataFrame.groupBy().agg(emptyAgg), Seq(Row()))
checkAnswer(spark.emptyDataFrame.groupBy().agg(count("*")), Seq(Row(0)))
checkAnswer(spark.emptyDataFrame.dropDuplicates().agg(emptyAgg), Seq(Row()))
checkAnswer(spark.emptyDataFrame.dropDuplicates().groupBy().agg(emptyAgg), Seq(Row()))
checkAnswer(spark.emptyDataFrame.dropDuplicates().groupBy().agg(count("*")), Seq(Row(0)))
// global aggregation is converted to grouping aggregation:
assert(spark.emptyDataFrame.dropDuplicates().count() == 0)
}
test("SPARK-21896: Window functions inside aggregate functions") {
def checkWindowError(df: => DataFrame): Unit = {
val thrownException = the [AnalysisException] thrownBy {
df.queryExecution.analyzed
}
assert(thrownException.message.contains("not allowed to use a window function"))
}
checkWindowError(testData2.select(min(avg($"b").over(Window.partitionBy($"a")))))
checkWindowError(testData2.agg(sum($"b"), max(rank().over(Window.orderBy($"a")))))
checkWindowError(testData2.groupBy($"a").agg(sum($"b"), max(rank().over(Window.orderBy($"b")))))
checkWindowError(testData2.groupBy($"a").agg(max(sum(sum($"b")).over(Window.orderBy($"a")))))
checkWindowError(testData2.groupBy($"a").agg(
sum($"b").as("s"), max(count("*").over())).where($"s" === 3))
checkAnswer(testData2.groupBy($"a").agg(
max($"b"), sum($"b").as("s"), count("*").over()).where($"s" === 3),
Row(1, 2, 3, 3) :: Row(2, 2, 3, 3) :: Row(3, 2, 3, 3) :: Nil)
checkWindowError(sql("SELECT MIN(AVG(b) OVER(PARTITION BY a)) FROM testData2"))
checkWindowError(sql("SELECT SUM(b), MAX(RANK() OVER(ORDER BY a)) FROM testData2"))
checkWindowError(sql("SELECT SUM(b), MAX(RANK() OVER(ORDER BY b)) FROM testData2 GROUP BY a"))
checkWindowError(sql("SELECT MAX(SUM(SUM(b)) OVER(ORDER BY a)) FROM testData2 GROUP BY a"))
checkWindowError(
sql("SELECT MAX(RANK() OVER(ORDER BY b)) FROM testData2 GROUP BY a HAVING SUM(b) = 3"))
checkAnswer(
sql("SELECT a, MAX(b), RANK() OVER(ORDER BY a) FROM testData2 GROUP BY a HAVING SUM(b) = 3"),
Row(1, 2, 1) :: Row(2, 2, 2) :: Row(3, 2, 3) :: Nil)
}
test("SPARK-24788: RelationalGroupedDataset.toString with unresolved exprs should not fail") {
// Checks if these raise no exception
assert(testData.groupBy($"key").toString.contains(
"[grouping expressions: [key], value: [key: int, value: string], type: GroupBy]"))
assert(testData.groupBy(col("key")).toString.contains(
"[grouping expressions: [key], value: [key: int, value: string], type: GroupBy]"))
assert(testData.groupBy(current_date()).toString.contains(
"grouping expressions: [current_date(None)], value: [key: int, value: string], " +
"type: GroupBy]"))
}
test("SPARK-26021: NaN and -0.0 in grouping expressions") {
checkAnswer(
Seq(0.0f, -0.0f, 0.0f/0.0f, Float.NaN).toDF("f").groupBy("f").count(),
Row(0.0f, 2) :: Row(Float.NaN, 2) :: Nil)
checkAnswer(
Seq(0.0d, -0.0d, 0.0d/0.0d, Double.NaN).toDF("d").groupBy("d").count(),
Row(0.0d, 2) :: Row(Double.NaN, 2) :: Nil)
// test with complicated type grouping expressions
checkAnswer(
Seq(0.0f, -0.0f, 0.0f/0.0f, Float.NaN).toDF("f")
.groupBy(array("f"), struct("f")).count(),
Row(Seq(0.0f), Row(0.0f), 2) ::
Row(Seq(Float.NaN), Row(Float.NaN), 2) :: Nil)
checkAnswer(
Seq(0.0d, -0.0d, 0.0d/0.0d, Double.NaN).toDF("d")
.groupBy(array("d"), struct("d")).count(),
Row(Seq(0.0d), Row(0.0d), 2) ::
Row(Seq(Double.NaN), Row(Double.NaN), 2) :: Nil)
checkAnswer(
Seq(0.0f, -0.0f, 0.0f/0.0f, Float.NaN).toDF("f")
.groupBy(array(struct("f")), struct(array("f"))).count(),
Row(Seq(Row(0.0f)), Row(Seq(0.0f)), 2) ::
Row(Seq(Row(Float.NaN)), Row(Seq(Float.NaN)), 2) :: Nil)
checkAnswer(
Seq(0.0d, -0.0d, 0.0d/0.0d, Double.NaN).toDF("d")
.groupBy(array(struct("d")), struct(array("d"))).count(),
Row(Seq(Row(0.0d)), Row(Seq(0.0d)), 2) ::
Row(Seq(Row(Double.NaN)), Row(Seq(Double.NaN)), 2) :: Nil)
// test with complicated type grouping columns
val df = Seq(
(Array(-0.0f, 0.0f), Tuple2(-0.0d, Double.NaN), Seq(Tuple2(-0.0d, Double.NaN))),
(Array(0.0f, -0.0f), Tuple2(0.0d, Double.NaN), Seq(Tuple2(0.0d, 0.0/0.0)))
).toDF("arr", "stru", "arrOfStru")
checkAnswer(
df.groupBy("arr", "stru", "arrOfStru").count(),
Row(Seq(0.0f, 0.0f), Row(0.0d, Double.NaN), Seq(Row(0.0d, Double.NaN)), 2)
)
}
test("SPARK-27581: DataFrame count_distinct(\\"*\\") shouldn't fail with AnalysisException") {
val df = sql("select id % 100 from range(100000)")
val distinctCount1 = df.select(expr("count(distinct(*))"))
val distinctCount2 = df.select(countDistinct("*"))
checkAnswer(distinctCount1, distinctCount2)
val countAndDistinct = df.select(count("*"), countDistinct("*"))
checkAnswer(countAndDistinct, Row(100000, 100))
}
test("max_by") {
val yearOfMaxEarnings =
sql("SELECT course, max_by(year, earnings) FROM courseSales GROUP BY course")
checkAnswer(yearOfMaxEarnings, Row("dotNET", 2013) :: Row("Java", 2013) :: Nil)
checkAnswer(
sql("SELECT max_by(x, y) FROM VALUES (('a', 10)), (('b', 50)), (('c', 20)) AS tab(x, y)"),
Row("b") :: Nil
)
checkAnswer(
sql("SELECT max_by(x, y) FROM VALUES (('a', 10)), (('b', null)), (('c', 20)) AS tab(x, y)"),
Row("c") :: Nil
)
checkAnswer(
sql("SELECT max_by(x, y) FROM VALUES (('a', null)), (('b', null)), (('c', 20)) AS tab(x, y)"),
Row("c") :: Nil
)
checkAnswer(
sql("SELECT max_by(x, y) FROM VALUES (('a', 10)), (('b', 50)), (('c', null)) AS tab(x, y)"),
Row("b") :: Nil
)
checkAnswer(
sql("SELECT max_by(x, y) FROM VALUES (('a', null)), (('b', null)) AS tab(x, y)"),
Row(null) :: Nil
)
// structs as ordering value.
checkAnswer(
sql("select max_by(x, y) FROM VALUES (('a', (10, 20))), (('b', (10, 50))), " +
"(('c', (10, 60))) AS tab(x, y)"),
Row("c") :: Nil
)
checkAnswer(
sql("select max_by(x, y) FROM VALUES (('a', (10, 20))), (('b', (10, 50))), " +
"(('c', null)) AS tab(x, y)"),
Row("b") :: Nil
)
withTempView("tempView") {
val dfWithMap = Seq((0, "a"), (1, "b"), (2, "c"))
.toDF("x", "y")
.select($"x", map($"x", $"y").as("y"))
.createOrReplaceTempView("tempView")
val error = intercept[AnalysisException] {
sql("SELECT max_by(x, y) FROM tempView").show
}
assert(
error.message.contains("function max_by does not support ordering on type map<int,string>"))
}
}
test("min_by") {
val yearOfMinEarnings =
sql("SELECT course, min_by(year, earnings) FROM courseSales GROUP BY course")
checkAnswer(yearOfMinEarnings, Row("dotNET", 2012) :: Row("Java", 2012) :: Nil)
checkAnswer(
sql("SELECT min_by(x, y) FROM VALUES (('a', 10)), (('b', 50)), (('c', 20)) AS tab(x, y)"),
Row("a") :: Nil
)
checkAnswer(
sql("SELECT min_by(x, y) FROM VALUES (('a', 10)), (('b', null)), (('c', 20)) AS tab(x, y)"),
Row("a") :: Nil
)
checkAnswer(
sql("SELECT min_by(x, y) FROM VALUES (('a', null)), (('b', null)), (('c', 20)) AS tab(x, y)"),
Row("c") :: Nil
)
checkAnswer(
sql("SELECT min_by(x, y) FROM VALUES (('a', 10)), (('b', 50)), (('c', null)) AS tab(x, y)"),
Row("a") :: Nil
)
checkAnswer(
sql("SELECT min_by(x, y) FROM VALUES (('a', null)), (('b', null)) AS tab(x, y)"),
Row(null) :: Nil
)
// structs as ordering value.
checkAnswer(
sql("select min_by(x, y) FROM VALUES (('a', (10, 20))), (('b', (10, 50))), " +
"(('c', (10, 60))) AS tab(x, y)"),
Row("a") :: Nil
)
checkAnswer(
sql("select min_by(x, y) FROM VALUES (('a', null)), (('b', (10, 50))), " +
"(('c', (10, 60))) AS tab(x, y)"),
Row("b") :: Nil
)
withTempView("tempView") {
val dfWithMap = Seq((0, "a"), (1, "b"), (2, "c"))
.toDF("x", "y")
.select($"x", map($"x", $"y").as("y"))
.createOrReplaceTempView("tempView")
val error = intercept[AnalysisException] {
sql("SELECT min_by(x, y) FROM tempView").show
}
assert(
error.message.contains("function min_by does not support ordering on type map<int,string>"))
}
}
test("count_if") {
withTempView("tempView") {
Seq(("a", None), ("a", Some(1)), ("a", Some(2)), ("a", Some(3)),
("b", None), ("b", Some(4)), ("b", Some(5)), ("b", Some(6)))
.toDF("x", "y")
.createOrReplaceTempView("tempView")
checkAnswer(
sql("SELECT COUNT_IF(NULL), COUNT_IF(y % 2 = 0), COUNT_IF(y % 2 <> 0), " +
"COUNT_IF(y IS NULL) FROM tempView"),
Row(0L, 3L, 3L, 2L))
checkAnswer(
sql("SELECT x, COUNT_IF(NULL), COUNT_IF(y % 2 = 0), COUNT_IF(y % 2 <> 0), " +
"COUNT_IF(y IS NULL) FROM tempView GROUP BY x"),
Row("a", 0L, 1L, 2L, 1L) :: Row("b", 0L, 2L, 1L, 1L) :: Nil)
checkAnswer(
sql("SELECT x FROM tempView GROUP BY x HAVING COUNT_IF(y % 2 = 0) = 1"),
Row("a"))
checkAnswer(
sql("SELECT x FROM tempView GROUP BY x HAVING COUNT_IF(y % 2 = 0) = 2"),
Row("b"))
checkAnswer(
sql("SELECT x FROM tempView GROUP BY x HAVING COUNT_IF(y IS NULL) > 0"),
Row("a") :: Row("b") :: Nil)
checkAnswer(
sql("SELECT x FROM tempView GROUP BY x HAVING COUNT_IF(NULL) > 0"),
Nil)
val error = intercept[AnalysisException] {
sql("SELECT COUNT_IF(x) FROM tempView")
}
assert(error.message.contains("function count_if requires boolean type"))
}
}
Seq(true, false).foreach { value =>
test(s"SPARK-31620: agg with subquery (whole-stage-codegen = $value)") {
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> value.toString) {
withTempView("t1", "t2") {
sql("create temporary view t1 as select * from values (1, 2) as t1(a, b)")
sql("create temporary view t2 as select * from values (3, 4) as t2(c, d)")
// test without grouping keys
checkAnswer(sql("select sum(if(c > (select a from t1), d, 0)) as csum from t2"),
Row(4) :: Nil)
// test with grouping keys
checkAnswer(sql("select c, sum(if(c > (select a from t1), d, 0)) as csum from " +
"t2 group by c"), Row(3, 4) :: Nil)
// test with distinct
checkAnswer(sql("select avg(distinct(d)), sum(distinct(if(c > (select a from t1)," +
" d, 0))) as csum from t2 group by c"), Row(4, 4) :: Nil)
// test subquery with agg
checkAnswer(sql("select sum(distinct(if(c > (select sum(distinct(a)) from t1)," +
" d, 0))) as csum from t2 group by c"), Row(4) :: Nil)
// test SortAggregateExec
var df = sql("select max(if(c > (select a from t1), 'str1', 'str2')) as csum from t2")
assert(find(df.queryExecution.executedPlan)(_.isInstanceOf[SortAggregateExec]).isDefined)
checkAnswer(df, Row("str1") :: Nil)
// test ObjectHashAggregateExec
df = sql("select collect_list(d), sum(if(c > (select a from t1), d, 0)) as csum from t2")
assert(
find(df.queryExecution.executedPlan)(_.isInstanceOf[ObjectHashAggregateExec]).isDefined)
checkAnswer(df, Row(Array(4), 4) :: Nil)
}
}
}
}
test("SPARK-32038: NormalizeFloatingNumbers should work on distinct aggregate") {
withTempView("view") {
val nan1 = java.lang.Float.intBitsToFloat(0x7f800001)
val nan2 = java.lang.Float.intBitsToFloat(0x7fffffff)
Seq(("mithunr", Float.NaN),
("mithunr", nan1),
("mithunr", nan2),
("abellina", 1.0f),
("abellina", 2.0f)).toDF("uid", "score").createOrReplaceTempView("view")
val df = spark.sql("select uid, count(distinct score) from view group by 1 order by 1 asc")
checkAnswer(df, Row("abellina", 2) :: Row("mithunr", 1) :: Nil)
}
}
test("SPARK-32136: NormalizeFloatingNumbers should work on null struct") {
val df = Seq(
A(None),
A(Some(B(None))),
A(Some(B(Some(1.0))))).toDF
val groupBy = df.groupBy("b").agg(count("*"))
checkAnswer(groupBy, Row(null, 1) :: Row(Row(null), 1) :: Row(Row(1.0), 1) :: Nil)
}
test("SPARK-32344: Unevaluable's set to FIRST/LAST ignoreNullsExpr in distinct aggregates") {
val queryTemplate = (agg: String) =>
s"SELECT $agg(DISTINCT v) FROM (SELECT v FROM VALUES 1, 2, 3 t(v) ORDER BY v)"
checkAnswer(sql(queryTemplate("FIRST")), Row(1))
checkAnswer(sql(queryTemplate("LAST")), Row(3))
}
test("SPARK-32906: struct field names should not change after normalizing floats") {
val df = Seq(Tuple1(Tuple2(-0.0d, Double.NaN)), Tuple1(Tuple2(0.0d, Double.NaN))).toDF("k")
val aggs = df.distinct().queryExecution.sparkPlan.collect { case a: HashAggregateExec => a }
assert(aggs.length == 2)
assert(aggs.head.output.map(_.dataType.simpleString).head ===
aggs.last.output.map(_.dataType.simpleString).head)
}
test("SPARK-33726: Aggregation on a table where a column name is reused") {
val query =
"""|with T as (
|select id as a, -id as x from range(3)),
|U as (
|select id as b, cast(id as string) as x from range(3))
|select T.x, U.x, min(a) as ma, min(b) as mb
|from T join U on a=b
|group by U.x, T.x
""".stripMargin
val df = spark.sql(query)
checkAnswer(df, Row(0, "0", 0, 0) :: Row(-1, "1", 1, 1) :: Row(-2, "2", 2, 2) :: Nil)
}
test("SPARK-34713: group by CreateStruct with ExtractValue") {
val structDF = Seq(Tuple1(1 -> 1)).toDF("col")
checkAnswer(structDF.groupBy(struct($"col._1")).count().select("count"), Row(1))
val arrayOfStructDF = Seq(Tuple1(Seq(1 -> 1))).toDF("col")
checkAnswer(arrayOfStructDF.groupBy(struct($"col._1")).count().select("count"), Row(1))
val mapDF = Seq(Tuple1(Map("a" -> "a"))).toDF("col")
checkAnswer(mapDF.groupBy(struct($"col.a")).count().select("count"), Row(1))
val nonStringMapDF = Seq(Tuple1(Map(1 -> 1))).toDF("col")
// Spark implicit casts string literal "a" to int to match the key type.
checkAnswer(nonStringMapDF.groupBy(struct($"col.a")).count().select("count"), Row(1))
val arrayDF = Seq(Tuple1(Seq(1))).toDF("col")
val e = intercept[AnalysisException](arrayDF.groupBy(struct($"col.a")).count())
assert(e.message.contains("requires integral type"))
}
test("SPARK-34716: Support ANSI SQL intervals by the aggregate function `sum`") {
val df = Seq(
(1, Period.ofMonths(10), Period.ofYears(8), Period.ofMonths(10), Duration.ofDays(10)),
(2, Period.ofMonths(1), Period.ofYears(1), Period.ofMonths(1), Duration.ofDays(1)),
(2, null, null, null, null),
(3, Period.ofMonths(-3), Period.ofYears(-12), Period.ofMonths(-3), Duration.ofDays(-6)),
(3, Period.ofMonths(21), Period.ofYears(30), Period.ofMonths(5), Duration.ofDays(-5)))
.toDF("class", "year-month", "year", "month", "day-time")
.select(
$"class",
$"year-month",
$"year" cast YearMonthIntervalType(YEAR) as "year",
$"month" cast YearMonthIntervalType(MONTH) as "month",
$"day-time")
val df2 = Seq((Period.ofMonths(Int.MaxValue), Duration.ofDays(106751991)),
(Period.ofMonths(10), Duration.ofDays(10)))
.toDF("year-month", "day-time")
val sumDF = df.select(sum($"year-month"), sum($"year"), sum($"month"), sum($"day-time"))
checkAnswer(sumDF,
Row(Period.of(2, 5, 0), Period.ofYears(27), Period.of(1, 1, 0), Duration.ofDays(0)))
assert(find(sumDF.queryExecution.executedPlan)(_.isInstanceOf[HashAggregateExec]).isDefined)
assert(sumDF.schema == StructType(Seq(
StructField("sum(year-month)", YearMonthIntervalType()),
StructField("sum(year)", YearMonthIntervalType(YEAR)),
StructField("sum(month)", YearMonthIntervalType(MONTH)),
// TODO(SPARK-35729): Check all day-time interval types in aggregate expressions
StructField("sum(day-time)", DayTimeIntervalType()))))
val sumDF2 =
df.groupBy($"class").agg(sum($"year-month"), sum($"year"), sum($"month"), sum($"day-time"))
checkAnswer(sumDF2,
Row(1, Period.ofMonths(10), Period.ofYears(8), Period.ofMonths(10), Duration.ofDays(10)) ::
Row(2, Period.ofMonths(1), Period.ofYears(1), Period.ofMonths(1), Duration.ofDays(1)) ::
Row(3, Period.of(1, 6, 0), Period.ofYears(18), Period.ofMonths(2), Duration.ofDays(-11)) ::
Nil)
assert(find(sumDF2.queryExecution.executedPlan)(_.isInstanceOf[HashAggregateExec]).isDefined)
assert(sumDF2.schema == StructType(Seq(StructField("class", IntegerType, false),
StructField("sum(year-month)", YearMonthIntervalType()),
StructField("sum(year)", YearMonthIntervalType(YEAR)),
StructField("sum(month)", YearMonthIntervalType(MONTH)),
// TODO(SPARK-35729): Check all day-time interval types in aggregate expressions
StructField("sum(day-time)", DayTimeIntervalType()))))
val error = intercept[SparkException] {
checkAnswer(df2.select(sum($"year-month")), Nil)
}
assert(error.toString contains "java.lang.ArithmeticException: integer overflow")
val error2 = intercept[SparkException] {
checkAnswer(df2.select(sum($"day-time")), Nil)
}
assert(error2.toString contains "java.lang.ArithmeticException: long overflow")
}
test("SPARK-34837: Support ANSI SQL intervals by the aggregate function `avg`") {
val df = Seq(
(1, Period.ofMonths(10), Period.ofYears(8), Period.ofMonths(10), Duration.ofDays(10)),
(2, Period.ofMonths(1), Period.ofYears(1), Period.ofMonths(1), Duration.ofDays(1)),
(2, null, null, null, null),
(3, Period.ofMonths(-3), Period.ofYears(-12), Period.ofMonths(-3), Duration.ofDays(-6)),
(3, Period.ofMonths(21), Period.ofYears(30), Period.ofMonths(5), Duration.ofDays(-5)),
(3, null, Period.ofYears(1), null, null))
.toDF("class", "year-month", "year", "month", "day-time")
.select(
$"class",
$"year-month",
$"year" cast YearMonthIntervalType(YEAR) as "year",
$"month" cast YearMonthIntervalType(MONTH) as "month",
$"day-time")
val df2 = Seq((Period.ofMonths(Int.MaxValue), Duration.ofDays(106751991)),
(Period.ofMonths(10), Duration.ofDays(10)))
.toDF("year-month", "day-time")
val avgDF = df.select(avg($"year-month"), avg($"year"), avg($"month"), avg($"day-time"))
checkAnswer(avgDF,
Row(Period.ofMonths(7), Period.of(5, 7, 0), Period.ofMonths(3), Duration.ofDays(0)))
assert(find(avgDF.queryExecution.executedPlan)(_.isInstanceOf[HashAggregateExec]).isDefined)
assert(avgDF.schema == StructType(Seq(
StructField("avg(year-month)", YearMonthIntervalType()),
StructField("avg(year)", YearMonthIntervalType()),
StructField("avg(month)", YearMonthIntervalType()),
// TODO(SPARK-35729): Check all day-time interval types in aggregate expressions
StructField("avg(day-time)", DayTimeIntervalType()))))
val avgDF2 =
df.groupBy($"class").agg(avg($"year-month"), avg($"year"), avg($"month"), avg($"day-time"))
checkAnswer(avgDF2,
Row(1, Period.ofMonths(10), Period.ofYears(8), Period.ofMonths(10), Duration.ofDays(10)) ::
Row(2, Period.ofMonths(1), Period.ofYears(1), Period.ofMonths(1), Duration.ofDays(1)) ::
Row(3, Period.ofMonths(9), Period.of(6, 4, 0), Period.ofMonths(1),
Duration.ofDays(-5).plusHours(-12)) :: Nil)
assert(find(avgDF2.queryExecution.executedPlan)(_.isInstanceOf[HashAggregateExec]).isDefined)
assert(avgDF2.schema == StructType(Seq(StructField("class", IntegerType, false),
StructField("avg(year-month)", YearMonthIntervalType()),
StructField("avg(year)", YearMonthIntervalType()),
StructField("avg(month)", YearMonthIntervalType()),
// TODO(SPARK-35729): Check all day-time interval types in aggregate expressions
StructField("avg(day-time)", DayTimeIntervalType()))))
val error = intercept[SparkException] {
checkAnswer(df2.select(avg($"year-month")), Nil)
}
assert(error.toString contains "java.lang.ArithmeticException: integer overflow")
val error2 = intercept[SparkException] {
checkAnswer(df2.select(avg($"day-time")), Nil)
}
assert(error2.toString contains "java.lang.ArithmeticException: long overflow")
val df3 = df.filter($"class" > 4)
val avgDF3 = df3.select(avg($"year-month"), avg($"day-time"))
checkAnswer(avgDF3, Row(null, null) :: Nil)
val avgDF4 = df3.groupBy($"class").agg(avg($"year-month"), avg($"day-time"))
checkAnswer(avgDF4, Nil)
}
test("SPARK-35412: groupBy of year-month/day-time intervals should work") {
val df1 = Seq(Duration.ofDays(1)).toDF("a").groupBy("a").count()
checkAnswer(df1, Row(Duration.ofDays(1), 1))
val df2 = Seq(Period.ofYears(1)).toDF("a").groupBy("a").count()
checkAnswer(df2, Row(Period.ofYears(1), 1))
}
}
case class B(c: Option[Double])
case class A(b: Option[B])
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala | Scala | apache-2.0 | 46,412 |
/*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.schemaguru
package webui
// specs2
import org.specs2.Specification
import org.specs2.matcher.JsonMatchers
import scala.concurrent.duration._
// spray
import spray.http._
import spray.testkit.Specs2RouteTest
class ProcessSpec extends Specification with Specs2RouteTest with JsonMatchers with SchemaGuruRoutes { def is = s2"""
Check API endpoint
process request with one JSON $processRequestWithJson
"""
implicit def actorRefFactory = system
val entryPointUrl = "/upload"
implicit val routeTestTimeout = RouteTestTimeout((4 * 1000 * 1000).microsecond)
def processRequestWithJson = {
val json = """{"referrer":"127.0.0.1", "id":42 }"""
val payload = MultipartFormData(
Seq(BodyPart(HttpEntity(MediaTypes.`multipart/form-data`, json), "test.json"))
)
Post(entryPointUrl, payload) ~> rootRoute ~> check {
body.data.asString must /("schema") /("type" -> "object")
}
}
} | snowplow/schema-guru | webui/src/test/scala/ProcessSpec.scala | Scala | apache-2.0 | 1,657 |
package com.skittr.actor
/* *\\
(c) 2007 WorldWide Conferencing, LLC
Distributed under an Apache License
http://www.apache.org/licenses/LICENSE-2.0
\\* */
import _root_.scala.actors._
import _root_.scala.actors.Actor._
import _root_.com.skittr.model._
import _root_.scala.collection.mutable.{HashMap}
import _root_.net.liftweb.mapper._
import _root_.java.util.concurrent.locks.ReentrantReadWriteLock
import _root_.net.liftweb.util._
import _root_.net.liftweb.util.Helpers._
/**
* A singleton the holds the map between user names and Actors that service
* the users. Right now, this assumes that the Actors are all local, but
* it could also be extended to choose other machines (remote actors)
* by using a hash on the username to choose the machine that's hosting
* the user
*/
object UserList {
private val set = new HashMap[String, UserActor]() // a map between the username and the Actor
private val rwl = new ReentrantReadWriteLock // lots of readers, few writers
private val r = rwl.readLock
private val w = rwl.writeLock
/**
* Load all the users from the database and create actors for each of them
*/
def create {
def userToUserActor(u: User) = {
val ua = new UserActor // create a new Actor
ua.start // start it up
ua !? Setup(u.id, u.name, u.wholeName) // tell it to set up
Full(ua) // return it
}
// load all the users
User.findMap()(userToUserActor).foreach (_ !? ConfigFollowers) // for each of the UserActors, tell them to configure their followers
}
// We've just added a new user to the system
// add that user to the list
def startUser(who: User) {
if (who.shouldStart_?) {
val ua = new UserActor
ua.start
ua ! Setup(who.id, who.name, who.wholeName)
ua ! ConfigFollowers
}
}
def shutdown = foreach(_ ! Bye) // shutdown by telling each of the Actors a "Bye" message
private def writeLock[T](f: => T): T = {
w.lock
try {f} finally {w.unlock}
}
private def readLock[T](f: => T): T = {
r.lock
try {f} finally {r.unlock}
}
// iterate over all the actors in the system
// and perform a function
def foreach(f: UserActor => Any) = readLock(set.foreach(i => f(i._2)))
// add a user to the list by mapping
// the name to the UserActor
def add(name: String, who: UserActor) = writeLock(set(name) = who)
// find a user by name
def find(name: String): Box[UserActor] = readLock(Box(set.get(name)))
// remove a user
def remove(name: String) = writeLock(set -= name)
// Find a random set of about cnt users
def randomUsers(cnt: Int) = {
val percent = if (set.size == 0) 1.d else cnt.toDouble / set.size.toDouble
readLock(set.filter(z => shouldShow(percent)).map(_._1))
}
}
| andreum/liftweb | sites/skittr/src/main/scala/com/skittr/actor/UserList.scala | Scala | apache-2.0 | 2,867 |
package com.twitter.finagle.http
import com.twitter.conversions.StorageUnitOps._
import com.twitter.conversions.DurationOps._
import com.twitter.finagle
import com.twitter.finagle._
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.context.{Contexts, Deadline, Retries}
import com.twitter.finagle.filter.ServerAdmissionControl
import com.twitter.finagle.http.service.{HttpResponseClassifier, NullService}
import com.twitter.finagle.http2.param.EncoderIgnoreMaxHeaderListSize
import com.twitter.finagle.liveness.{FailureAccrualFactory, FailureDetector}
import com.twitter.finagle.service._
import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing.Trace
import com.twitter.finagle.util.DefaultTimer
import com.twitter.io.{Buf, Pipe, Reader, ReaderDiscardedException, Writer}
import com.twitter.util._
import io.netty.buffer.PooledByteBufAllocator
import java.io.{PrintWriter, StringWriter}
import java.net.{InetAddress, InetSocketAddress}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference}
import org.scalactic.source.Position
import org.scalatest.{BeforeAndAfter, FunSuite, OneInstancePerTest, Tag}
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import scala.language.reflectiveCalls
abstract class AbstractEndToEndTest
extends FunSuite
with BeforeAndAfter
with Eventually
with IntegrationPatience
with OneInstancePerTest {
sealed trait Feature
object ClientAbort extends Feature
object NoBodyMessage extends Feature
object MaxHeaderSize extends Feature
object RequiresAsciiFilter extends Feature
var saveBase: Dtab = Dtab.empty
var statsRecv: InMemoryStatsReceiver = new InMemoryStatsReceiver()
before {
saveBase = Dtab.base
Dtab.base = Dtab.read("/foo=>/bar; /baz=>/biz")
statsRecv = new InMemoryStatsReceiver()
}
after {
Dtab.base = saveBase
statsRecv = new InMemoryStatsReceiver()
}
type HttpService = Service[Request, Response]
type HttpTest = (HttpService => HttpService) => Unit
def await[T](f: Future[T]): T = Await.result(f, 30.seconds)
def drip(w: Writer[Buf]): Future[Unit] = w.write(buf("*")) before drip(w)
def buf(msg: String): Buf = Buf.Utf8(msg)
def implName: String
def skipWholeTest: Boolean = false
def clientImpl(): finagle.Http.Client
def serverImpl(): finagle.Http.Server
def initClient(client: HttpService): Unit = {}
def initService: HttpService = Service.mk { _: Request =>
Future.exception(new Exception("boom!"))
}
def featureImplemented(feature: Feature): Boolean
def testIfImplemented(feature: Feature)(name: String)(testFn: => Unit): Unit = {
if (!featureImplemented(feature)) ignore(name)(testFn) else test(name)(testFn)
}
/**
* Read `n` number of bytes from the bytestream represented by `r`.
*/
def readNBytes(n: Int, r: Reader[Buf]): Future[Buf] = {
def loop(left: Buf): Future[Buf] = n - left.length match {
case x if x > 0 =>
r.read().flatMap {
case Some(right) => loop(left.concat(right))
case None => Future.value(left)
}
case _ => Future.value(left)
}
loop(Buf.Empty)
}
private def requestWith(status: Status): Request =
Request("/", ("statusCode", status.code.toString))
private val statusCodeSvc = new HttpService {
def apply(request: Request): Future[Response] = {
val statusCode = request.getIntParam("statusCode", Status.BadRequest.code)
Future.value(Response(Status.fromCode(statusCode)))
}
}
override def test(
testName: String,
testTags: Tag*
)(testFun: => Any
)(
implicit pos: Position
): Unit = {
if (skipWholeTest)
ignore(testName)(testFun)
else
super.test(testName, testTags: _*)(testFun)
}
/**
* Run the tests using the supplied connection generation function
*/
def run(tests: HttpTest*)(connect: HttpService => HttpService): Unit = {
tests.foreach(t => t(connect))
}
/**
* Create a new non-streaming HTTP client/server pair and attach the service to the client
*/
def nonStreamingConnect(service: HttpService): HttpService = {
val ref = new ServiceFactoryRef(ServiceFactory.const(initService))
val server = serverImpl()
.withLabel("server")
.withStatsReceiver(statsRecv)
.withMaxHeaderSize(8.kilobytes)
.withMaxRequestSize(200.bytes)
.serve("localhost:*", ref)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(statsRecv)
.configured(FailureDetector.Param(FailureDetector.NullConfig))
.newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
val ret = new ServiceProxy(client) {
override def close(deadline: Time) =
Closable.all(client, server).close(deadline)
}
initClient(client)
ref() = ServiceFactory.const(service)
ret
}
/**
* Create a new streaming HTTP client/server pair and attach the service to the client
*/
def streamingConnect(service: HttpService): HttpService = {
val ref = new ServiceFactoryRef(ServiceFactory.const(initService))
val server = serverImpl()
.withStreaming(8.kilobytes)
.withLabel("server")
.withStatsReceiver(statsRecv)
.serve("localhost:*", ref)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStreaming(8.kilobytes)
.withStatsReceiver(statsRecv)
.newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
initClient(client)
ref() = ServiceFactory.const(service)
new ServiceProxy(client) {
override def close(deadline: Time) =
Closable.all(client, server).close(deadline)
}
}
def standardErrors(connect: HttpService => HttpService): Unit = {
test(implName + ": request header fields too large") {
val service = new HttpService {
def apply(request: Request) = Future.value(Response())
}
val client = connect(service)
val request = Request("/")
request.headerMap.add("header", "a" * 8192)
val response = await(client(request))
assert(response.status == Status.RequestHeaderFieldsTooLarge)
await(client.close())
}
test(implName + ": with default client-side ResponseClassifier") {
val client = connect(statusCodeSvc)
await(client(requestWith(Status.Ok)))
assert(statsRecv.counters(Seq("client", "requests")) == 1)
assert(statsRecv.counters(Seq("client", "success")) == 1)
await(client(requestWith(Status.ServiceUnavailable)))
assert(statsRecv.counters(Seq("client", "requests")) == 2)
// by default 500s are treated as unsuccessful
assert(statsRecv.counters(Seq("client", "success")) == 1)
await(client.close())
}
test(implName + ": with default server-side ResponseClassifier") {
val client = connect(statusCodeSvc)
await(client(requestWith(Status.Ok)))
assert(statsRecv.counters(Seq("server", "requests")) == 1)
assert(statsRecv.counters(Seq("server", "success")) == 1)
await(client(requestWith(Status.ServiceUnavailable)))
assert(statsRecv.counters(Seq("server", "requests")) == 2)
// by default 500s are treated as unsuccessful
assert(statsRecv.counters(Seq("server", "success")) == 1)
await(client.close())
}
test(implName + ": unhandled exceptions are converted into 500s") {
val service = new HttpService {
def apply(request: Request) = Future.exception(new IllegalArgumentException("bad news"))
}
val client = connect(service)
val response = await(client(Request("/")))
assert(response.status == Status.InternalServerError)
await(client.close())
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS"))
test(implName + ": return 413s for fixed-length requests with too large payloads") {
val service = new HttpService {
def apply(request: Request) = Future.value(Response())
}
val client = connect(service)
val tooBig = Request("/")
tooBig.content = Buf.ByteArray.Owned(new Array[Byte](300))
val justRight = Request("/")
justRight.content = Buf.ByteArray.Owned(new Array[Byte](200))
assert(await(client(tooBig)).status == Status.RequestEntityTooLarge)
assert(await(client(justRight)).status == Status.Ok)
await(client.close())
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS"))
test(
implName +
": return 413s for chunked requests which stream too much data"
) {
val service = new HttpService {
def apply(request: Request) = Future.value(Response())
}
val client = connect(service)
val justRight = Request("/")
assert(await(client(justRight)).status == Status.Ok)
val tooMuch = Request("/")
tooMuch.setChunked(true)
val w = tooMuch.writer
w.write(buf("a" * 1000)).before(w.close)
val res = client(tooMuch)
Await.ready(res, 5.seconds)
res.poll.get match {
case Return(resp) =>
assert(resp.status == Status.RequestEntityTooLarge)
case Throw(_: ChannelClosedException) =>
()
case t =>
fail(s"expected a 413 or a ChannelClosedException, saw $t")
}
await(client.close())
}
}
def standardBehaviour(connect: HttpService => HttpService): Unit = {
test(implName + ": client stack observes max header size") {
val service = new HttpService {
def apply(req: Request) = {
val res = Response()
res.headerMap.put("Foo", ("*" * 8192) + "Bar: a")
Future.value(res)
}
}
val client = connect(service)
// Whether this fails or not, which determined by configuration of max
// header size in client configuration, there should definitely be no
// "Bar" header.
val hasBar = client(Request()).transform {
case Throw(_) => Future.False
case Return(res) =>
val names = res.headerMap.keys
Future.value(names.exists(_.contains("Bar")))
}
assert(!await(hasBar))
await(client.close())
}
test(implName + ": client sets content length") {
val service = new HttpService {
def apply(request: Request) = {
val response = Response()
val len = request.headerMap.get(Fields.ContentLength)
response.contentString = len.getOrElse("")
Future.value(response)
}
}
val body = "hello"
val client = connect(service)
val req = Request()
req.contentString = body
assert(await(client(req)).contentString == body.length.toString)
await(client.close())
}
test(implName + ": echo") {
val service = new HttpService {
def apply(request: Request) = {
val response = Response()
response.contentString = request.uri
Future.value(response)
}
}
val client = connect(service)
val response = client(Request("123"))
assert(await(response).contentString == "123")
await(client.close())
}
test(implName + ": dtab") {
val service = new HttpService {
def apply(request: Request) = {
val stringer = new StringWriter
val printer = new PrintWriter(stringer)
Dtab.local.print(printer)
val response = Response(request)
response.contentString = stringer.toString
Future.value(response)
}
}
val client = connect(service)
Dtab.unwind {
Dtab.local ++= Dtab.read("/a=>/b; /c=>/d")
val res = await(client(Request("/")))
assert(res.contentString == "Dtab(2)\n\t/a => /b\n\t/c => /d\n")
}
await(client.close())
}
test(implName + ": (no) dtab") {
val service = new HttpService {
def apply(request: Request) = {
val stringer = new StringWriter
val response = Response(request)
response.contentString = "%d".format(Dtab.local.length)
Future.value(response)
}
}
val client = connect(service)
val res = await(client(Request("/")))
assert(res.contentString == "0")
await(client.close())
}
test(implName + ": context") {
val writtenDeadline = Deadline.ofTimeout(5.seconds)
val service = new HttpService {
def apply(request: Request) = {
val deadline = Deadline.current.get
assert(deadline.deadline == writtenDeadline.deadline)
val retries = Retries.current.get
assert(retries == Retries(0))
val response = Response(request)
Future.value(response)
}
}
Contexts.broadcast.let(Deadline, writtenDeadline) {
val req = Request()
val client = connect(service)
val res = await(client(Request("/")))
assert(res.status == Status.Ok)
await(client.close())
}
}
if (!sys.props.contains("SKIP_FLAKY"))
testIfImplemented(ClientAbort)(implName + ": client abort") {
import com.twitter.conversions.DurationOps._
val timer = new JavaTimer
val promise = new Promise[Response]
val service = new HttpService {
def apply(request: Request) = promise
}
val client = connect(service)
client(Request())
await(timer.doLater(20.milliseconds) {
await(client.close(20.milliseconds))
intercept[CancelledRequestException] {
promise.isInterrupted match {
case Some(intr) => throw intr
case _ =>
}
}
})
}
test(implName + ": measure payload size") {
val service = new HttpService {
def apply(request: Request) = {
val rep = Response()
rep.content = request.content.concat(request.content)
Future.value(rep)
}
}
val client = connect(service)
val req = Request()
req.content = Buf.Utf8("." * 10)
await(client(req))
eventually {
assert(statsRecv.stat("client", "request_payload_bytes")() == Seq(10.0f))
// the payloadsize filter measures response sizes as a side-effect (.respond)
// so for h2c we sometimes see the warmup request's response payload despite
// clearing the stats
val clientResponse = statsRecv.stat("client", "response_payload_bytes")()
assert(clientResponse == Seq(20.0f) || clientResponse == Seq(0f, 20.0f))
assert(statsRecv.stat("server", "request_payload_bytes")() == Seq(10.0f))
assert(statsRecv.stat("server", "response_payload_bytes")() == Seq(20.0f))
}
await(client.close())
}
test(implName + ": interrupt requests") {
val p = Promise[Unit]()
val interrupted = Promise[Unit]()
val service = new HttpService {
def apply(request: Request) = {
p.setDone()
val interruptee = Promise[Response]()
interruptee.setInterruptHandler {
case exn: Throwable =>
interrupted.setDone()
}
interruptee
}
}
val client = connect(service)
val req = Request()
req.content = Buf.Utf8("." * 10)
val f = client(req)
await(p)
assert(!f.isDefined)
val e = new Exception("boom!")
f.raise(e)
val actual = intercept[Exception] {
await(f)
}
assert(actual == e)
await(interrupted)
await(client.close())
}
test(implName + ": interrupting requests doesn't interfere with others") {
val p = Promise[Unit]()
val interrupted = Promise[Unit]()
val second = Promise[Response]
val service = new HttpService {
def apply(request: Request) = {
if (!p.isDefined) {
p.setDone()
val interruptee = Promise[Response]()
interruptee.setInterruptHandler {
case exn: Throwable =>
interrupted.setDone()
}
interruptee
} else {
second
}
}
}
val client = connect(service)
val req = Request()
req.content = Buf.Utf8("." * 10)
val f1 = client(req)
await(p)
val f2 = client(req)
assert(!f1.isDefined)
assert(!f2.isDefined)
val e = new Exception("boom!")
f1.raise(e)
val actual = intercept[Exception] {
await(f1)
}
assert(actual == e)
await(interrupted)
assert(!f2.isDefined)
second.setValue(req.response)
assert(await(f2).status == Status.Ok)
await(client.close())
}
test(implName + ": aggregates trailers when streams are aggregated") {
val service = new HttpService {
def apply(req: Request): Future[Response] = {
assert(req.trailers.size == 2)
assert(req.trailers("foo") == "bar")
assert(req.trailers("bar") == "baz")
Future.value(Response())
}
}
val client = connect(service)
val req = Request()
req.setChunked(true)
val rep = client(req)
val out = for {
_ <- req.chunkWriter.write(Chunk.last(HeaderMap("foo" -> "bar", "bar" -> "baz")))
_ <- req.chunkWriter.close()
} yield ()
await(out.before(rep))
await(client.close())
}
}
def streaming(connect: HttpService => HttpService): Unit = {
test(s"$implName (streaming)" + ": stream") {
def service(r: Reader[Buf]) = new HttpService {
def apply(request: Request) = {
val response = Response.chunked(Version.Http11, Status.Ok, r)
Future.value(response)
}
}
val writer = new Pipe[Buf]()
val client = connect(service(writer))
val reader = await(client(Request())).reader
await(writer.write(buf("hello")))
assert(await(readNBytes(5, reader)) == Buf.Utf8("hello"))
await(writer.write(buf("world")))
assert(await(readNBytes(5, reader)) == Buf.Utf8("world"))
await(client.close())
}
test(s"$implName (streaming)" + ": stream via ResponseProxy filter") {
class ResponseProxyFilter extends SimpleFilter[Request, Response] {
override def apply(
request: Request,
service: Service[Request, Response]
): Future[Response] = {
service(request).map { responseOriginal =>
new ResponseProxy {
override val response = responseOriginal
}
}
}
}
def service = new HttpService {
def apply(request: Request) = {
val response = Response()
response.setChunked(true)
response.writer.write(buf("goodbye")).before {
response.writer.write(buf("world")).before {
response.close()
}
}
Future.value(response)
}
}
val serviceWithResponseProxy = (new ResponseProxyFilter).andThen(service)
val client = connect(serviceWithResponseProxy)
val response = await(client(Request()))
val Buf.Utf8(actual) = await(Reader.readAll(response.reader))
assert(actual == "goodbyeworld")
await(client.close())
}
test(s"$implName (streaming): aggregates responses that must not have a body") {
val service = new HttpService {
def apply(request: Request): Future[Response] = {
val resp = Response()
resp.status = Status.NoContent
Future.value(resp)
}
}
val client = connect(service)
val resp = await(client(Request()))
assert(!resp.isChunked)
assert(resp.content.isEmpty)
}
test(s"$implName (streaming)" + ": stream via ResponseProxy class") {
case class EnrichedResponse(resp: Response) extends ResponseProxy {
override val response = resp
}
// Test streaming partial data separated in time
def service = new HttpService {
def apply(request: Request) = {
val response = EnrichedResponse(Response(Version.Http11, Status.Ok))
response.setChunked(true)
response.writer.write(Buf.Utf8("hello")) before {
Future.sleep(Duration.fromSeconds(3))(DefaultTimer) before {
response.writer.write(Buf.Utf8("world")) ensure {
response.close()
}
}
}
Future.value(response)
}
}
val client = connect(service)
val response = await(client(Request()))
val Buf.Utf8(actual) = await(Reader.readAll(response.reader))
assert(actual == "helloworld")
await(client.close())
}
test(s"$implName (streaming)" + ": streaming clients can decompress content") {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.contentString = "raw content"
Future.value(response)
}
}
val client = connect(svc)
val req = Request("/")
req.headerMap.set("accept-encoding", "gzip")
val content = await(client(req).flatMap { rep =>
Reader.readAll(rep.reader)
})
assert(Buf.Utf8.unapply(content).get == "raw content")
await(client.close())
}
test(s"$implName (streaming)" + ": symmetric reader and getContent") {
val s = Service.mk[Request, Response] { req =>
Reader.readAll(req.reader).map { buf =>
assert(buf == Buf.Utf8("hello"))
if (!req.isChunked) {
assert(req.contentString == "hello")
}
val resp = Response(req)
resp.content = buf
resp
}
}
val req = Request()
req.contentString = "hello"
req.headerMap.put("Content-Length", "5")
val client = connect(s)
val res = await(client(req))
val buf = await(Reader.readAll(res.reader))
assert(buf == Buf.Utf8("hello"))
assert(res.contentString == "hello")
}
test(
s"$implName (streaming): transport closure propagates to request stream reader"
) {
val p = new Promise[Buf]
val s = Service.mk[Request, Response] { req =>
p.become(Reader.readAll(req.reader))
Future.value(Response())
}
val client = connect(s)
val req = Request()
req.setChunked(true)
await(client(req))
await(client.close())
intercept[ChannelClosedException] { await(p) }
}
test(
s"$implName (streaming)" +
": transport closure propagates to request stream producer"
) {
val s = Service.mk[Request, Response] { _ =>
Future.value(Response())
}
val client = connect(s)
val req = Request()
req.setChunked(true)
await(client(req))
await(client.close())
intercept[ReaderDiscardedException] { await(drip(req.writer)) }
}
test(
s"$implName (streaming): " +
"request discard terminates remote stream producer"
) {
val s = Service.mk[Request, Response] { req =>
val res = Response()
res.setChunked(true)
def go =
for {
Some(c) <- req.reader.read()
_ <- res.writer.write(c)
_ <- res.close()
} yield ()
// discard the reader, which should terminate the drip.
go ensure req.reader.discard()
Future.value(res)
}
val client = connect(s)
val req = Request()
req.setChunked(true)
val resf = client(req)
await(req.writer.write(buf("hello")))
val contentf = resf flatMap { res =>
Reader.readAll(res.reader)
}
assert(await(contentf) == Buf.Utf8("hello"))
// drip should terminate because the request is discarded.
intercept[ReaderDiscardedException] { await(drip(req.writer)) }
}
test(
s"$implName (streaming): " +
"client discard terminates stream and frees up the connection"
) {
val s = new Service[Request, Response] {
var rep: Response = null
def apply(req: Request) = {
rep = Response()
rep.setChunked(true)
// Make sure the body is fully read.
// Then we hang forever.
val body = Reader.readAll(req.reader)
Future.value(rep)
}
}
val client = connect(s)
val rep = await(client(Request()))
assert(s.rep != null)
rep.reader.discard()
s.rep = null
// Now, make sure the connection doesn't clog up.
await(client(Request()))
assert(s.rep != null)
}
test(s"$implName (streaming)" + ": two fixed-length requests") {
val svc = Service.mk[Request, Response] { _ =>
Future.value(Response())
}
val client = connect(svc)
await(client(Request()))
await(client(Request()))
await(client.close())
}
test(s"$implName (streaming)" + ": measure chunk payload size") {
val svc = Service.mk[Request, Response] { req =>
req.reader.read()
val rep = Response()
rep.setChunked(true)
rep.writer.write(Buf.Utf8("01234"))
Future.value(rep)
}
val req = Request()
req.setChunked(true)
req.writer.write(Buf.Utf8("0123456789"))
val client = connect(svc)
val response = await(client(req))
response.reader.read()
eventually {
assert(statsRecv.stat("client", "stream", "request", "chunk_payload_bytes")() == Seq(10f))
assert(statsRecv.stat("client", "stream", "response", "chunk_payload_bytes")() == Seq(5f))
assert(statsRecv.stat("server", "stream", "request", "chunk_payload_bytes")() == Seq(10f))
assert(statsRecv.stat("server", "stream", "response", "chunk_payload_bytes")() == Seq(5f))
}
await(client.close())
}
def makeService(size: Int): Service[Request, Response] = {
Service.mk[Request, Response] { req =>
val resp = Response()
resp.contentString = "*" * size
resp.contentLength = size
Future.value(resp)
}
}
test("Responses with Content-length header larger than 8 KB are not aggregated") {
val svc = makeService(8 * 1024 + 1)
val client = connect(svc)
val resp = await(client(Request()))
assert(resp.isChunked)
assert(resp.content.isEmpty)
assert(resp.contentLength == Some(8 * 1024 + 1))
}
test("Responses with Content-length header equal to 8 KB are aggregated") {
val svc = makeService(8 * 1024)
val client = connect(svc)
val resp = await(client(Request()))
assert(!resp.isChunked)
assert(!resp.content.isEmpty)
assert(resp.contentLength == Some(8 * 1024))
}
test("Responses with Content-length header smaller than 8 KB are aggregated") {
val svc = makeService(8 * 1024 - 1)
val client = connect(svc)
val resp = await(client(Request()))
assert(!resp.isChunked)
assert(!resp.content.isEmpty)
assert(resp.contentLength == Some(8 * 1024 - 1))
}
test(implName + ": streaming requests can't be retried") {
val failService = new HttpService {
def apply(req: Request): Future[Response] =
req.reader.read().flatMap { _ =>
Future.exception(Failure("try again", FailureFlags.Retryable | FailureFlags.Rejected))
}
}
val client = connect(failService)
val e = intercept[FailureFlags[_]] {
val out = new Pipe[Buf]
val req = Request(Version.Http11, Method.Post, "/", out)
val rep = client(req)
await(out.write(Buf.Utf8("foo")))
await(rep)
}
assert(e.isFlagged(FailureFlags.Rejected))
eventually {
assert(
!statsRecv.counters.contains(Seq("client", "retries", "requeues")) ||
statsRecv.counters(Seq("client", "retries", "requeues")) == 0)
assert(statsRecv.counters(Seq("client", "failures")) == 1)
assert(statsRecv.counters(Seq("client", "requests")) == 1)
}
await(client.close())
}
test(implName + ": streaming session bi-directionally transmit trailing headers") {
val service = new HttpService {
def apply(req: Request): Future[Response] = {
val rep = Response()
rep.setChunked(true)
for {
ts <- req.chunkReader.read().map(_.get.trailers)
_ <- rep.chunkWriter.write(Chunk.last(ts.add("bar", "baz")))
_ <- req.chunkReader.read()
_ <- rep.chunkWriter.close()
} yield ()
Future.value(rep)
}
}
val client = connect(service)
val req = Request()
req.setChunked(true)
val rep = await(client(req))
val trailersIn = HeaderMap.apply("foo" -> "bar")
val out = for {
_ <- req.chunkWriter.write(Chunk.last(trailersIn))
_ <- req.chunkWriter.close()
} yield ()
await(out)
val trailersOut = await(rep.chunkReader.read()).get.trailers
assert(await(rep.chunkReader.read()).isEmpty)
assert(trailersOut.size == 2)
assert(trailersOut("foo") == "bar")
assert(trailersOut("bar") == "baz")
await(client.close())
}
test(implName + ": invalid trailer causes server to hang up") {
val observed = new Promise[HeaderMap]
val service = new HttpService {
def apply(req: Request): Future[Response] = {
observed.become(req.chunkReader.read().map(_.get.trailers))
Future.value(Response())
}
}
val client = connect(service)
val req = Request()
req.setChunked(true)
val rep = await(client(req))
assert(rep.status == Status.Ok)
val trailers = HeaderMap.newHeaderMap
illegalHeaders.foreach { case (k, v) => trailers.addUnsafe(k, v) }
val out = for {
_ <- req.chunkWriter.write(Chunk.last(trailers))
_ <- req.chunkWriter.close()
} yield ()
await(out)
intercept[ChannelException](await(observed))
await(client.close())
}
}
def tracing(connect: HttpService => HttpService): Unit = {
test(implName + ": trace") {
var (outerTrace, outerSpan) = ("", "")
val inner = connect(new HttpService {
def apply(request: Request) = {
val response = Response(request)
response.contentString = Seq(
Trace.id.traceId.toString,
Trace.id.spanId.toString,
Trace.id.parentId.toString
).mkString(".")
Future.value(response)
}
})
val outer = connect(new HttpService {
def apply(request: Request) = {
outerTrace = Trace.id.traceId.toString
outerSpan = Trace.id.spanId.toString
inner(request)
}
})
val response = await(outer(Request()))
val Seq(innerTrace, innerSpan, innerParent) =
response.contentString.split('.').toSeq
assert(innerTrace == outerTrace, "traceId")
assert(outerSpan == innerParent, "outer span vs inner parent")
assert(innerSpan != outerSpan, "inner (%s) vs outer (%s) spanId".format(innerSpan, outerSpan))
await(outer.close())
await(inner.close())
}
}
// use 1 less than the requeue limit so that we trigger failure accrual
// before we run into the requeue limit.
private val failureAccrualFailures = 19
run(standardErrors, standardBehaviour, tracing)(nonStreamingConnect(_))
run(streaming)(streamingConnect(_))
test(
implName + ": PooledByteBufAllocator maxOrder " +
"is 7 for servers"
) {
// this will set the default order
val server = serverImpl().serve(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new ConstantService[Request, Response](Future.value(Response()))
)
assert(PooledByteBufAllocator.defaultMaxOrder == 7)
await(server.close())
}
test(
implName + ": PooledByteBufAllocator maxOrder " +
"is 7 for clients"
) {
val server = serverImpl().serve(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0),
new ConstantService[Request, Response](Future.value(Response()))
)
// this will set the default order
val client = clientImpl().newService(
Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])),
"client"
)
client.apply(Request())
assert(PooledByteBufAllocator.defaultMaxOrder == 7)
await(Closable.all(client, server).close())
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS"))
test(implName + ": Status.busy propagates along the Stack") {
val failService = new HttpService {
def apply(req: Request): Future[Response] =
Future.exception(Failure.rejected("unhappy"))
}
val clientName = "http"
val server = serverImpl().serve(new InetSocketAddress(0), failService)
val client = clientImpl()
.configured(FailureAccrualFactory.Param(failureAccrualFailures, () => 1.minute))
.withStatsReceiver(statsRecv)
.newService(
Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])),
clientName
)
val e = intercept[FailureFlags[_]] {
await(client(Request("/")))
}
assert(e.isFlagged(FailureFlags.Rejected))
assert(statsRecv.counters(Seq(clientName, "failure_accrual", "removals")) == 1)
assert(
statsRecv.counters(Seq(clientName, "retries", "requeues")) == failureAccrualFailures - 1
)
assert(
statsRecv.counters(Seq(clientName, "failures", "restartable")) == failureAccrualFailures
)
await(Closable.all(client, server).close())
}
test(implName + ": nonretryable isn't retried") {
val failService = new HttpService {
def apply(req: Request): Future[Response] =
Future.exception(Failure("unhappy", FailureFlags.NonRetryable | FailureFlags.Rejected))
}
val clientName = "http"
val server = serverImpl().serve(new InetSocketAddress(0), failService)
val client = clientImpl()
.configured(FailureAccrualFactory.Param(failureAccrualFailures, () => 1.minute))
.withStatsReceiver(statsRecv)
.newService(
Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])),
clientName
)
val e = intercept[FailureFlags[_]] {
val req = Request("/")
await(client(req))
}
assert(e.isFlagged(FailureFlags.Rejected))
assert(statsRecv.counters(Seq(clientName, "failure_accrual", "removals")) == 0)
assert(statsRecv.counters(Seq(clientName, "retries", "requeues")) == 0)
assert(!statsRecv.counters.contains(Seq(clientName, "failures", "restartable")))
assert(statsRecv.counters(Seq(clientName, "failures")) == 1)
assert(statsRecv.counters(Seq(clientName, "requests")) == 1)
await(Closable.all(client, server).close())
}
test("Client-side ResponseClassifier based on status code") {
val classifier = HttpResponseClassifier {
case (_, r: Response) if r.status == Status.ServiceUnavailable =>
ResponseClass.NonRetryableFailure
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", statusCodeSvc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(statsRecv)
.withResponseClassifier(classifier)
.newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
val rep1 = await(client(requestWith(Status.Ok)))
assert(statsRecv.counters(Seq("client", "requests")) == 1)
assert(statsRecv.counters(Seq("client", "success")) == 1)
val rep2 = await(client(requestWith(Status.ServiceUnavailable)))
assert(statsRecv.counters(Seq("client", "requests")) == 2)
assert(statsRecv.counters(Seq("client", "success")) == 1)
await(client.close())
await(server.close())
}
test("server-side ResponseClassifier based on status code") {
val classifier = HttpResponseClassifier {
case (_, r: Response) if r.status == Status.ServiceUnavailable =>
ResponseClass.NonRetryableFailure
}
val server = serverImpl()
.withResponseClassifier(classifier)
.withStatsReceiver(statsRecv)
.withLabel("server")
.serve("localhost:*", statusCodeSvc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
await(client(requestWith(Status.Ok)))
assert(statsRecv.counters(Seq("server", "requests")) == 1)
assert(statsRecv.counters(Seq("server", "success")) == 1)
await(client(requestWith(Status.ServiceUnavailable)))
assert(statsRecv.counters(Seq("server", "requests")) == 2)
assert(statsRecv.counters(Seq("server", "success")) == 1)
assert(statsRecv.counters(Seq("server", "failures")) == 1)
await(client.close())
await(server.close())
}
test("codec should require a message size be less than 2Gb") {
intercept[IllegalArgumentException] {
serverImpl().withMaxRequestSize(2049.megabytes)
}
intercept[IllegalArgumentException] {
clientImpl().withMaxResponseSize(3000.megabytes)
}
}
test("client respects MaxResponseSize") {
val svc = new HttpService {
def apply(request: Request): Future[Response] = {
val response = Response()
response.contentString = "*" * 600.kilobytes.bytes.toInt
Future.value(response)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withMaxResponseSize(500.kilobytes) // wontfix: doesn't work on netty3 with limit <= 8 KB
.withStatsReceiver(NullStatsReceiver)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val req = Request("/")
intercept[TooLongMessageException] {
await(client(req))
}
await(client.close())
await(server.close())
}
val illegalHeaders = for {
k <- Seq("FgR", "a\fb")
v <- Seq("a\u000bb", "a\fb") // vtab
} yield k -> v
test("server rejects illegal headers with a 400") {
val service = nonStreamingConnect(Service.mk(_ => Future.value(Response())))
illegalHeaders.foreach {
case (k, v) =>
val badRequest = Request()
badRequest.headerMap.addUnsafe(k, v)
val resp = await(service(badRequest))
assert(resp.status == Status.BadRequest)
}
await(service.close())
}
test("server rejects illegal trailers with a 400") {
val service = nonStreamingConnect(Service.mk(_ => Future.value(Response())))
illegalHeaders.foreach {
case (k, v) =>
val badRequest = Request()
badRequest.setChunked(true)
val trailers = HeaderMap.newHeaderMap
trailers.addUnsafe(k, v)
for {
_ <- badRequest.chunkWriter.write(Chunk.last(trailers))
_ <- badRequest.chunkWriter.close()
} yield ()
val resp = await(service(badRequest))
assert(resp.status == Status.BadRequest)
}
await(service.close())
}
test("client rejects illegal headers with an exception") {
val current = new AtomicReference("a" -> "b")
val service = nonStreamingConnect(Service.mk { _ =>
val resp = Response()
val (k, v) = current.get
resp.headerMap.addUnsafe(k, v)
Future.value(resp)
})
illegalHeaders.foreach { kv =>
current.set(kv)
intercept[Exception](await(service(Request())))
}
await(service.close())
}
test("client rejects illegal trailer with an exception") {
val current = new AtomicReference("a" -> "b")
val service = nonStreamingConnect(Service.mk { _ =>
val rep = Response()
rep.setChunked(true)
val (k, v) = current.get
val trailers = HeaderMap.newHeaderMap
trailers.addUnsafe(k, v)
for {
_ <- rep.chunkWriter.write(Chunk.last(trailers))
_ <- rep.chunkWriter.close()
} yield ()
Future.value(rep)
})
illegalHeaders.foreach { kv =>
current.set(kv)
intercept[Exception](await(service(Request())))
}
await(service.close())
}
test("obs-fold sequences are 'fixed' when received by clients") {
val service = nonStreamingConnect(Service.mk { _ =>
val resp = Response()
resp.headerMap.addUnsafe("foo", "biz\r\n baz")
Future.value(resp)
})
val resp = await(service(Request()))
assert(resp.headerMap.get("foo") == Some("biz baz"))
await(service.close())
}
test("obs-fold sequences are 'fixed' when received by servers") {
val service = nonStreamingConnect(Service.mk { req =>
val resp = Response()
req.headerMap.get("foo").foreach { v =>
resp.contentString = v
}
Future.value(resp)
})
val req = Request()
req.headerMap.addUnsafe("foo", "biz\r\n baz")
val resp = await(service(req))
assert(resp.contentString == "biz baz")
await(service.close())
}
testIfImplemented(RequiresAsciiFilter)(
"server responds with 400 Bad Request if non-ascii character is present in uri") {
val service = NullService
val server = serverImpl().withStatsReceiver(NullStatsReceiver).serve("localhost:*", service)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(NullStatsReceiver)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
try {
val rep = await(client(Request("/DSC02175拷貝.jpg")))
assert(rep.status == Status.BadRequest)
} finally {
await(client.close())
await(server.close())
}
}
test("server responds 500 if an invalid header is being served") {
val service = new HttpService {
def apply(request: Request): Future[Response] = {
val response = Response()
response.headerMap.add("foo", "|\f") // these are prohibited in N3
Future.value(response)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", service)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(NullStatsReceiver)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val rep = await(client(Request("/")))
assert(rep.status == Status.InternalServerError)
}
testIfImplemented(MaxHeaderSize)("client respects MaxHeaderSize in response") {
val ref = new ServiceFactoryRef(ServiceFactory.const(initService))
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
// we need to ignore header list size so we can examine behavior on the client-side
.configured(EncoderIgnoreMaxHeaderListSize(true))
.serve("localhost:*", ref)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withMaxHeaderSize(1.kilobyte)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
initClient(client)
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.headerMap.set("foo", "*" * 1.kilobytes.bytes.toInt)
Future.value(response)
}
}
ref() = ServiceFactory.const(svc)
val req = Request("/")
intercept[TooLongMessageException] {
await(client(req))
}
await(client.close())
await(server.close())
}
test("non-streaming clients can decompress content") {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.contentString = "raw content"
Future.value(response)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.withCompressionLevel(5)
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(NullStatsReceiver)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val req = Request("/")
req.headerMap.set("accept-encoding", "gzip")
assert(await(client(req)).contentString == "raw content")
await(client.close())
await(server.close())
}
test("non-streaming clients can disable decompression") {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.contentString = "raw content"
Future.value(response)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.withCompressionLevel(5)
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withDecompression(false)
.withStatsReceiver(NullStatsReceiver)
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val req = Request("/")
req.headerMap.set("accept-encoding", "gzip")
val rep = await(client(req))
assert(rep.headerMap("content-encoding") == "gzip")
assert(rep.contentString != "raw content")
await(client.close())
await(server.close())
}
test("removing the compressor works") {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.contentString = "raw content"
Future.value(response)
}
}
val server = serverImpl()
.withCompressionLevel(0)
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val req = Request("/")
val rep = await(client(req))
assert(rep.contentString == "raw content")
await(client.close())
await(server.close())
}
test("request remote address") {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response()
response.contentString = request.remoteAddress.toString
Future.value(response)
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
assert(await(client(Request("/"))).contentString.startsWith("/127.0.0.1"))
await(client.close())
await(server.close())
}
test("out of order client requests are OK") {
val svc = new Service[Request, Response] {
def apply(request: Request): Future[Response] = {
Future.value(Response())
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val factory = clientImpl()
.newClient(s"${addr.getHostName}:${addr.getPort}", "client")
val client1 = await(factory())
val client2 = await(factory())
await(client2(Request("/")))
await(client1(Request("/")))
await(client1.close())
await(client2.close())
await(factory.close())
await(server.close())
}
test(s"$implName client handles cut connection properly") {
val svc = Service.mk[Request, Response] { _: Request =>
Future.value(Response())
}
val server1 = serverImpl()
.serve("localhost:*", svc)
val addr = server1.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService("%s:%d".format(addr.getHostName, addr.getPort), "client")
val rep1 = await(client(Request("/")))
assert(rep1.status == Status.Ok)
await(server1.close())
// we wait to ensure the client has been informed the connection has been dropped
Thread.sleep(20)
val server2 = serverImpl()
.serve("localhost:%d".format(addr.getPort), svc)
val rep2 = await(client(Request("/")))
assert(rep2.status == Status.Ok)
}
test("Does not retry service acquisition many times when not using FactoryToService") {
val svc = Service.mk[Request, Response] { _: Request =>
Future.value(Response())
}
val sr = new InMemoryStatsReceiver
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.withStatsReceiver(sr)
.newClient("%s:%d".format(addr.getHostName, addr.getPort), "client")
val conn = await(client())
assert(sr.counters(Seq("client", "retries", "requeues")) == 0)
await(conn.close())
await(server.close())
await(client.close())
}
private def testMethodBuilderTimeouts(
stats: InMemoryStatsReceiver,
server: ListeningServer,
builder: MethodBuilder
): Unit = {
// these should never complete within the timeout
val shortTimeout: Service[Request, Response] = builder
.withTimeoutPerRequest(5.millis)
.newService("fast")
intercept[IndividualRequestTimeoutException] {
await(shortTimeout(Request()))
}
eventually {
assert(stats.counter("a_label", "fast", "logical", "requests")() == 1)
assert(stats.counter("a_label", "fast", "logical", "success")() == 0)
}
// these should always complete within the timeout
val longTimeout: Service[Request, Response] = builder
.withTimeoutPerRequest(5.seconds)
.newService("slow")
assert("ok" == await(longTimeout(Request())).contentString)
eventually {
assert(stats.counter("a_label", "slow", "logical", "requests")() == 1)
assert(stats.counter("a_label", "slow", "logical", "success")() == 1)
}
await(Future.join(Seq(longTimeout.close(), shortTimeout.close())))
await(server.close())
}
test(s"$implName: Graceful shutdown & draining") {
val p = new Promise[Unit]
@volatile var holdResponses = false
val service = new HttpService {
def apply(request: Request) = {
val response = Response()
response.contentString = request.uri
if (holdResponses) p.map { _ =>
response
} else Future.value(response)
}
}
val server = serverImpl().serve(new InetSocketAddress(0), service)
val client = clientImpl().newService(
Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])),
"client"
)
await(client(Request("/1")))
holdResponses = true
val rep = client(Request("/2"))
Thread.sleep(100)
server.close(5.seconds)
Thread.sleep(100)
p.setDone()
assert(await(rep).contentString == "/2")
val f = intercept[FailureFlags[_]] {
await(client(Request("/3")))
}
// Connection refused
assert(f.isFlagged(FailureFlags.Rejected))
}
test(implName + ": methodBuilder timeouts from Stack") {
import DefaultTimer.Implicit
val svc = new Service[Request, Response] {
def apply(req: Request): Future[Response] = {
Future.sleep(50.millis).before {
val rep = Response()
rep.setContentString("ok")
Future.value(rep)
}
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", svc)
val stats = new InMemoryStatsReceiver()
val client = clientImpl()
.withStatsReceiver(stats)
.configured(com.twitter.finagle.param.Timer(DefaultTimer))
.withLabel("a_label")
val name = Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress]))
val builder: MethodBuilder = client.methodBuilder(name)
testMethodBuilderTimeouts(stats, server, builder)
}
test(implName + ": methodBuilder timeouts from ClientBuilder") {
import DefaultTimer.Implicit
val svc = new Service[Request, Response] {
def apply(req: Request): Future[Response] = {
Future.sleep(50.millis).before {
val rep = Response()
rep.setContentString("ok")
Future.value(rep)
}
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", svc)
val stats = new InMemoryStatsReceiver()
val client = clientImpl()
.configured(com.twitter.finagle.param.Timer(DefaultTimer))
val clientBuilder = ClientBuilder()
.reportTo(stats)
.name("a_label")
.stack(client)
.dest(Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])))
val builder: MethodBuilder = MethodBuilder.from(clientBuilder)
testMethodBuilderTimeouts(stats, server, builder)
}
private[this] def testMethodBuilderRetries(
stats: InMemoryStatsReceiver,
server: ListeningServer,
builder: MethodBuilder
): Unit = {
val retry500sClassifier: ResponseClassifier = {
case ReqRep(_, Return(r: Response)) if r.statusCode / 100 == 5 =>
ResponseClass.RetryableFailure
}
val ok503sClassifier: ResponseClassifier = {
case ReqRep(_, Return(r: Response)) if r.statusCode == 503 =>
ResponseClass.Success
}
val retry5xxs = builder
.withRetryForClassifier(retry500sClassifier)
.newService("5xx")
val ok503s = builder
.withRetryForClassifier(ok503sClassifier.orElse(retry500sClassifier))
.newService("503")
val req500 = Request()
req500.contentString = "500"
val req503 = Request()
req503.contentString = "503"
assert(500 == await(retry5xxs(req500)).statusCode)
eventually {
assert(stats.counter("a_label", "5xx", "logical", "requests")() == 1)
assert(stats.counter("a_label", "5xx", "logical", "success")() == 0)
assert(stats.stat("a_label", "5xx", "retries")() == Seq(2))
}
assert(503 == await(ok503s(req503)).statusCode)
eventually {
assert(stats.counter("a_label", "503", "logical", "requests")() == 1)
assert(stats.counter("a_label", "503", "logical", "success")() == 1)
assert(stats.stat("a_label", "503", "retries")() == Seq(0))
}
assert(500 == await(ok503s(req500)).statusCode)
eventually {
assert(stats.counter("a_label", "503", "logical", "requests")() == 2)
assert(stats.counter("a_label", "503", "logical", "success")() == 1)
assert(stats.stat("a_label", "503", "retries")() == Seq(0, 2))
}
await(Future.join(Seq(retry5xxs.close(), ok503s.close())))
await(server.close())
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS")) // Maybe netty4 http/2 only
test(implName + ": methodBuilder retries from Stack") {
val svc = new Service[Request, Response] {
def apply(req: Request): Future[Response] = {
val rep = Response()
rep.contentString = req.contentString
req.contentString match {
case "500" => rep.statusCode = 500
case "503" => rep.statusCode = 503
case _ => ()
}
Future.value(rep)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", svc)
val stats = new InMemoryStatsReceiver()
val client = clientImpl()
.withStatsReceiver(stats)
.withLabel("a_label")
val name = Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress]))
val builder: MethodBuilder = client.methodBuilder(name)
testMethodBuilderRetries(stats, server, builder)
}
if (!sys.props.contains("SKIP_FLAKY_TRAVIS")) // Maybe netty4 http/2 only
test(implName + ": methodBuilder retries from ClientBuilder") {
val svc = new Service[Request, Response] {
def apply(req: Request): Future[Response] = {
val rep = Response()
rep.contentString = req.contentString
req.contentString match {
case "500" => rep.statusCode = 500
case "503" => rep.statusCode = 503
case _ => ()
}
Future.value(rep)
}
}
val server = serverImpl()
.withStatsReceiver(NullStatsReceiver)
.serve("localhost:*", svc)
val stats = new InMemoryStatsReceiver()
val client = clientImpl()
val clientBuilder = ClientBuilder()
.reportTo(stats)
.name("a_label")
.stack(client)
.dest(Name.bound(Address(server.boundAddress.asInstanceOf[InetSocketAddress])))
val builder: MethodBuilder = MethodBuilder.from(clientBuilder)
testMethodBuilderRetries(stats, server, builder)
}
testIfImplemented(NoBodyMessage)(
"response with status code {1xx, 204 and 304} must not have a message body nor Content-Length header field"
) {
def check(resStatus: Status): Unit = {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response(Version.Http11, resStatus)
Future.value(response)
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val res = await(client(Request(Method.Get, "/")))
assert(res.status == resStatus)
assert(!res.isChunked)
assert(res.content.isEmpty)
assert(res.contentLength.isEmpty)
await(client.close())
await(server.close())
}
List(
Status.Continue, /*Status.SwitchingProtocols,*/ Status.Processing,
Status.NoContent,
Status.NotModified
).foreach {
check(_)
}
}
testIfImplemented(NoBodyMessage)(
"response with status code {1xx, 204 and 304} must not have a message body nor Content-Length header field" +
"when non-empty body is returned"
) {
def check(resStatus: Status): Unit = {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val body = Buf.Utf8("some data")
val response = Response(Version.Http11, resStatus)
response.content = body
Future.value(response)
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val res = await(client(Request(Method.Get, "/")))
assert(res.status == resStatus)
assert(!res.isChunked)
assert(res.content.isEmpty)
assert(res.contentLength.isEmpty)
await(client.close())
await(server.close())
}
List(
Status.Continue, /*Status.SwitchingProtocols,*/ Status.Processing,
Status.NoContent,
Status.NotModified
).foreach {
check(_)
}
}
// We exclude SwitchingProtocols(101) since it should only be sent in response to a upgrade request
List(Status.Continue, Status.Processing, Status.NoContent)
.foreach { resStatus =>
testIfImplemented(NoBodyMessage)(
s"response with status code ${resStatus.code} must not have a message body nor " +
"Content-Length header field when non-empty body with explicit Content-Length is returned"
) {
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val body = Buf.Utf8("some data")
val response = Response(Version.Http11, resStatus)
response.content = body
response.headerMap.set(Fields.ContentLength, body.length.toString)
Future.value(response)
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val res = await(client(Request(Method.Get, "/")))
assert(res.status == resStatus)
assert(!res.isChunked)
assert(res.length == 0)
assert(res.contentLength.isEmpty)
await(client.close())
await(server.close())
}
}
testIfImplemented(NoBodyMessage)(
"response with status code 304 must not have a message body *BUT* Content-Length " +
"header field when non-empty body with explicit Content-Length is returned"
) {
val body = Buf.Utf8("some data")
val svc = new Service[Request, Response] {
def apply(request: Request) = {
val response = Response(Version.Http11, Status.NotModified)
response.content = body
response.headerMap.set(Fields.ContentLength, body.length.toString)
Future.value(response)
}
}
val server = serverImpl()
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
val res = await(client(Request(Method.Get, "/")))
assert(res.status == Status.NotModified)
assert(!res.isChunked)
assert(res.length == 0)
assert(res.contentLength.contains(body.length.toLong))
await(client.close())
await(server.close())
}
if (!sys.props.contains("SKIP_FLAKY"))
test("ServerAdmissionControl doesn't filter requests with a chunked body") {
val responseString = "a response"
val svc = Service.mk[Request, Response] { _ =>
val response = Response()
response.contentString = responseString
Future.value(response)
}
val nacked = new AtomicBoolean(false)
val filter = new Filter.TypeAgnostic {
override def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] = new SimpleFilter[Req, Rep] {
// nacks them all
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
if (nacked.compareAndSet(false, true)) Future.exception(Failure.rejected)
else service(request)
}
}
}
val server = serverImpl()
.configured(ServerAdmissionControl.Filters(Some(Seq(_ => filter))))
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
// first, a request with a body
val reqWithBody = Request(Method.Post, "/")
reqWithBody.setChunked(true)
val writer = reqWithBody.writer
writer.write(Buf.Utf8("data")).before(writer.close())
// Shouldn't be nacked
assert(await(client(reqWithBody)).contentString == responseString)
assert(!nacked.get)
// Should be nacked the first time
val reqWithoutBody = Request(Method.Get, "/")
assert(await(client(reqWithoutBody)).contentString == responseString)
assert(nacked.get)
await(client.close())
await(server.close())
}
if (!sys.props.contains("SKIP_FLAKY"))
test("ServerAdmissionControl can filter requests with the magic header") {
val responseString = "a response"
val svc = Service.mk[Request, Response] { _ =>
val response = Response()
response.contentString = responseString
Future.value(response)
}
val nacked = new AtomicBoolean(false)
val filter = new Filter.TypeAgnostic {
override def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep] = new SimpleFilter[Req, Rep] {
// nacks them all
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] = {
if (nacked.compareAndSet(false, true)) Future.exception(Failure.rejected)
else service(request)
}
}
}
val server = serverImpl()
.configured(ServerAdmissionControl.Filters(Some(Seq(_ => filter))))
.serve("localhost:*", svc)
val addr = server.boundAddress.asInstanceOf[InetSocketAddress]
val client = clientImpl()
.newService(s"${addr.getHostName}:${addr.getPort}", "client")
// first, a request with a body
val reqWithBody = Request(Method.Post, "/")
reqWithBody.contentString = "not-empty"
// Header should be there so we can nack it
assert(await(client(reqWithBody)).contentString == responseString)
assert(nacked.get)
await(client.close())
await(server.close())
}
}
| luciferous/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/AbstractEndToEndTest.scala | Scala | apache-2.0 | 64,649 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.URI
import Def.{ displayFull, ScopedKey, ScopeLocal, Setting }
import BuildPaths.outputDirectory
import Scope.GlobalScope
import BuildStreams.Streams
import sbt.io.Path._
import sbt.internal.util.{ Attributed, AttributeEntry, AttributeKey, AttributeMap, Settings }
import sbt.internal.util.Attributed.data
import sbt.util.Logger
final class BuildStructure(val units: Map[URI, LoadedBuildUnit], val root: URI, val settings: Seq[Setting[_]], val data: Settings[Scope], val index: StructureIndex, val streams: State => Streams, val delegates: Scope => Seq[Scope], val scopeLocal: ScopeLocal) {
val rootProject: URI => String = Load getRootProject units
def allProjects: Seq[ResolvedProject] = units.values.flatMap(_.defined.values).toSeq
def allProjects(build: URI): Seq[ResolvedProject] = units.get(build).toList.flatMap(_.defined.values)
def allProjectRefs: Seq[ProjectRef] = units.toSeq flatMap { case (build, unit) => refs(build, unit.defined.values.toSeq) }
def allProjectRefs(build: URI): Seq[ProjectRef] = refs(build, allProjects(build))
val extra: BuildUtil[ResolvedProject] = BuildUtil(root, units, index.keyIndex, data)
private[this] def refs(build: URI, projects: Seq[ResolvedProject]): Seq[ProjectRef] = projects.map { p => ProjectRef(build, p.id) }
}
// information that is not original, but can be reconstructed from the rest of BuildStructure
final class StructureIndex(
val keyMap: Map[String, AttributeKey[_]],
val taskToKey: Map[Task[_], ScopedKey[Task[_]]],
val triggers: Triggers[Task],
val keyIndex: KeyIndex,
val aggregateKeyIndex: KeyIndex)
/**
* A resolved build unit. (`ResolvedBuildUnit` would be a better name to distinguish it from the loaded, but unresolved `BuildUnit`.)
* @param unit The loaded, but unresolved [[BuildUnit]] this was resolved from.
* @param defined The definitive map from project IDs to resolved projects.
* These projects have had [[Reference]]s resolved and [[AutoPlugin]]s evaluated.
* @param rootProjects The list of project IDs for the projects considered roots of this build.
* The first root project is used as the default in several situations where a project is not otherwise selected.
*/
final class LoadedBuildUnit(val unit: BuildUnit, val defined: Map[String, ResolvedProject], val rootProjects: Seq[String], val buildSettings: Seq[Setting[_]]) extends BuildUnitBase {
assert(rootProjects.nonEmpty, "No root projects defined for build unit " + unit)
/**
* The project to use as the default when one is not otherwise selected.
* [[LocalRootProject]] resolves to this from within the same build.
*/
val root = rootProjects.head
/** The base directory of the build unit (not the build definition).*/
def localBase = unit.localBase
/**
* The classpath to use when compiling against this build unit's publicly visible code.
* It includes build definition and plugin classes and classes for .sbt file statements and expressions.
*/
def classpath: Seq[File] = unit.definitions.target ++ unit.plugins.classpath ++ unit.definitions.dslDefinitions.classpath
/**
* The class loader to use for this build unit's publicly visible code.
* It includes build definition and plugin classes and classes for .sbt file statements and expressions.
*/
def loader = unit.definitions.dslDefinitions.classloader(unit.definitions.loader)
/** The imports to use for .sbt files, `consoleProject` and other contexts that use code from the build definition. */
def imports = BuildUtil.getImports(unit)
override def toString = unit.toString
}
// TODO: figure out how to deprecate and drop buildNames
/**
* The built and loaded build definition, including loaded but unresolved [[Project]]s, for a build unit (for a single URI).
*
* @param base The base directory of the build definition, typically `<build base>/project/`.
* @param loader The ClassLoader containing all classes and plugins for the build definition project.
* Note that this does not include classes for .sbt files.
* @param builds The list of [[Build]]s for the build unit.
* In addition to auto-discovered [[Build]]s, this includes any auto-generated default [[Build]]s.
* @param projects The list of all [[Project]]s from all [[Build]]s.
* These projects have not yet been resolved, but they have had auto-plugins applied.
* In particular, each [[Project]]'s `autoPlugins` field is populated according to their configured `plugins`
* and their `settings` and `configurations` updated as appropriate.
* @param buildNames No longer used and will be deprecated once feasible.
*/
final class LoadedDefinitions(
val base: File,
val target: Seq[File],
val loader: ClassLoader,
val builds: Seq[Build],
val projects: Seq[Project],
val buildNames: Seq[String],
val dslDefinitions: DefinedSbtValues) {
def this(base: File,
target: Seq[File],
loader: ClassLoader,
builds: Seq[Build],
projects: Seq[Project],
buildNames: Seq[String]) = this(base, target, loader, builds, projects, buildNames, DefinedSbtValues.empty)
}
/** Auto-detected top-level modules (as in `object X`) of type `T` paired with their source names. */
final class DetectedModules[T](val modules: Seq[(String, T)]) {
/**
* The source names of the modules. This is "X" in `object X`, as opposed to the implementation class name "X$".
* The names are returned in a stable order such that `names zip values` pairs a name with the actual module.
*/
def names: Seq[String] = modules.map(_._1)
/**
* The singleton value of the module.
* The values are returned in a stable order such that `names zip values` pairs a name with the actual module.
*/
def values: Seq[T] = modules.map(_._2)
}
/** Auto-detected auto plugin. */
case class DetectedAutoPlugin(name: String, value: AutoPlugin, hasAutoImport: Boolean)
/**
* Auto-discovered modules for the build definition project. These include modules defined in build definition sources
* as well as modules in binary dependencies.
*
* @param builds The [[Build]]s detected in the build definition. This does not include the default [[Build]] that sbt creates if none is defined.
*/
final class DetectedPlugins(val plugins: DetectedModules[Plugin], val autoPlugins: Seq[DetectedAutoPlugin], val builds: DetectedModules[Build]) {
/** Sequence of import expressions for the build definition. This includes the names of the [[Plugin]], [[Build]], and [[AutoImport]] modules, but not the [[AutoPlugin]] modules. */
lazy val imports: Seq[String] = BuildUtil.getImports(plugins.names ++ builds.names) ++
BuildUtil.importAllRoot(autoImports(autoPluginAutoImports)) ++
BuildUtil.importAll(autoImports(topLevelAutoPluginAutoImports)) ++
BuildUtil.importNamesRoot(autoPlugins.map(_.name).filter(nonTopLevelPlugin))
private[this] lazy val (autoPluginAutoImports, topLevelAutoPluginAutoImports) =
autoPlugins.flatMap {
case DetectedAutoPlugin(name, ap, hasAutoImport) =>
if (hasAutoImport) Some(name)
else None
}.partition(nonTopLevelPlugin)
/** A function to select the right [[AutoPlugin]]s from [[autoPlugins]] for a [[Project]]. */
@deprecated("Use deducePluginsFromProject", "0.13.8")
lazy val deducePlugins: (Plugins, Logger) => Seq[AutoPlugin] = Plugins.deducer(autoPlugins.toList map { _.value })
/** Selects the right [[AutoPlugin]]s from a [[Project]]. */
def deducePluginsFromProject(p: Project, log: Logger): Seq[AutoPlugin] =
{
val ps0 = p.plugins
val allDetected = autoPlugins.toList map { _.value }
val detected = p match {
case _: GeneratedRootProject => allDetected filterNot { _ == sbt.plugins.IvyPlugin }
case _ => allDetected
}
Plugins.deducer(detected)(ps0, log)
}
private[this] def autoImports(pluginNames: Seq[String]) = pluginNames.map(_ + ".autoImport")
private[this] def nonTopLevelPlugin(name: String) = name.contains('.')
}
/**
* The built and loaded build definition project.
* @param base The base directory for the build definition project (not the base of the project itself).
* @param pluginData Evaluated tasks/settings from the build definition for later use.
* This is necessary because the build definition project is discarded.
* @param loader The class loader for the build definition project, notably excluding classes used for .sbt files.
* @param detected Auto-detected modules in the build definition.
*/
final class LoadedPlugins(val base: File, val pluginData: PluginData, val loader: ClassLoader, val detected: DetectedPlugins) {
@deprecated("Use the primary constructor.", "0.13.2")
def this(base: File, pluginData: PluginData, loader: ClassLoader, plugins: Seq[Plugin], pluginNames: Seq[String]) =
this(base, pluginData, loader,
new DetectedPlugins(new DetectedModules(pluginNames zip plugins), Nil, new DetectedModules(Nil))
)
@deprecated("Use detected.plugins.values.", "0.13.2")
val plugins: Seq[Plugin] = detected.plugins.values
@deprecated("Use detected.plugins.names.", "0.13.2")
val pluginNames: Seq[String] = detected.plugins.names
def fullClasspath: Seq[Attributed[File]] = pluginData.classpath
def classpath = data(fullClasspath)
}
/**
* The loaded, but unresolved build unit.
* @param uri The uniquely identifying URI for the build.
* @param localBase The working location of the build on the filesystem.
* For local URIs, this is the same as `uri`, but for remote URIs, this is the local copy or workspace allocated for the build.
*/
final class BuildUnit(val uri: URI, val localBase: File, val definitions: LoadedDefinitions, val plugins: LoadedPlugins) {
override def toString = if (uri.getScheme == "file") localBase.toString else (uri + " (locally: " + localBase + ")")
}
final class LoadedBuild(val root: URI, val units: Map[URI, LoadedBuildUnit]) {
BuildUtil.checkCycles(units)
def allProjectRefs: Seq[(ProjectRef, ResolvedProject)] = for ((uri, unit) <- units.toSeq; (id, proj) <- unit.defined) yield ProjectRef(uri, id) -> proj
def extra(data: Settings[Scope])(keyIndex: KeyIndex): BuildUtil[ResolvedProject] = BuildUtil(root, units, keyIndex, data)
private[sbt] def autos = GroupedAutoPlugins(units)
}
final class PartBuild(val root: URI, val units: Map[URI, PartBuildUnit])
sealed trait BuildUnitBase { def rootProjects: Seq[String]; def buildSettings: Seq[Setting[_]] }
final class PartBuildUnit(val unit: BuildUnit, val defined: Map[String, Project], val rootProjects: Seq[String], val buildSettings: Seq[Setting[_]]) extends BuildUnitBase {
def resolve(f: Project => ResolvedProject): LoadedBuildUnit = new LoadedBuildUnit(unit, defined mapValues f toMap, rootProjects, buildSettings)
def resolveRefs(f: ProjectReference => ProjectRef): LoadedBuildUnit = resolve(_ resolve f)
}
object BuildStreams {
type Streams = std.Streams[ScopedKey[_]]
final val GlobalPath = "$global"
final val BuildUnitPath = "$build"
final val StreamsDirectory = "streams"
def mkStreams(units: Map[URI, LoadedBuildUnit], root: URI, data: Settings[Scope]): State => Streams = s =>
s get Keys.stateStreams getOrElse std.Streams(path(units, root, data), displayFull, LogManager.construct(data, s))
def path(units: Map[URI, LoadedBuildUnit], root: URI, data: Settings[Scope])(scoped: ScopedKey[_]): File =
resolvePath(projectPath(units, root, scoped, data), nonProjectPath(scoped))
def resolvePath(base: File, components: Seq[String]): File =
(base /: components)((b, p) => new File(b, p))
def pathComponent[T](axis: ScopeAxis[T], scoped: ScopedKey[_], label: String)(show: T => String): String =
axis match {
case Global => GlobalPath
case This => sys.error("Unresolved This reference for " + label + " in " + displayFull(scoped))
case Select(t) => show(t)
}
def nonProjectPath[T](scoped: ScopedKey[T]): Seq[String] =
{
val scope = scoped.scope
pathComponent(scope.config, scoped, "config")(_.name) ::
pathComponent(scope.task, scoped, "task")(_.label) ::
pathComponent(scope.extra, scoped, "extra")(showAMap) ::
scoped.key.label ::
Nil
}
def showAMap(a: AttributeMap): String =
a.entries.toSeq.sortBy(_.key.label).map { case AttributeEntry(key, value) => key.label + "=" + value.toString } mkString (" ")
def projectPath(units: Map[URI, LoadedBuildUnit], root: URI, scoped: ScopedKey[_], data: Settings[Scope]): File =
scoped.scope.project match {
case Global => refTarget(GlobalScope, units(root).localBase, data) / GlobalPath
case Select(br @ BuildRef(uri)) => refTarget(br, units(uri).localBase, data) / BuildUnitPath
case Select(pr @ ProjectRef(uri, id)) => refTarget(pr, units(uri).defined(id).base, data)
case Select(pr) => sys.error("Unresolved project reference (" + pr + ") in " + displayFull(scoped))
case This => sys.error("Unresolved project reference (This) in " + displayFull(scoped))
}
def refTarget(ref: ResolvedReference, fallbackBase: File, data: Settings[Scope]): File =
refTarget(GlobalScope.copy(project = Select(ref)), fallbackBase, data)
def refTarget(scope: Scope, fallbackBase: File, data: Settings[Scope]): File =
(Keys.target in scope get data getOrElse outputDirectory(fallbackBase).asFile) / StreamsDirectory
}
| dansanduleac/sbt | main/src/main/scala/sbt/BuildStructure.scala | Scala | bsd-3-clause | 13,668 |
package org.scalatra
package test
import servlet.{ ScalatraAsyncSupport, HasMultipartConfig }
import javax.servlet.{ DispatcherType, Filter }
import javax.servlet.http.{ HttpServlet }
import java.util.EnumSet
import org.eclipse.jetty.servlet._
import java.util
trait JettyContainer extends Container {
private val DefaultDispatcherTypes: EnumSet[DispatcherType] = EnumSet.of(DispatcherType.REQUEST, DispatcherType.ASYNC)
def servletContextHandler: ServletContextHandler
def skipDefaultServlet: Boolean = false
def mount(klass: Class[_], path: String) = klass match {
case servlet if classOf[HttpServlet].isAssignableFrom(servlet) =>
addServlet(servlet.asInstanceOf[Class[_ <: HttpServlet]], path)
case filter if classOf[Filter].isAssignableFrom(filter) =>
addFilter(filter.asInstanceOf[Class[_ <: Filter]], path)
case _ =>
throw new IllegalArgumentException(klass + " is not assignable to either HttpServlet or Filter")
}
def mount(servlet: HttpServlet, path: String) { addServlet(servlet, path) }
def mount(servlet: HttpServlet, path: String, name: String) { addServlet(servlet, path, name) }
def mount(app: Filter, path: String, dispatches: EnumSet[DispatcherType] = DefaultDispatcherTypes) =
addFilter(app, path, dispatches)
def addServlet(servlet: HttpServlet, path: String) { addServlet(servlet, path, servlet.getClass.getName) }
def addServlet(servlet: HttpServlet, path: String, name: String) {
val holder = new ServletHolder(name, servlet)
servlet match {
case s: HasMultipartConfig =>
holder.getRegistration.setMultipartConfig(s.multipartConfig.toMultipartConfigElement)
// *** PATCH ***
case s: skinny.engine.multipart.HasMultipartConfig =>
holder.getRegistration.setMultipartConfig(s.multipartConfig.toMultipartConfigElement)
case s: ScalatraAsyncSupport =>
holder.getRegistration.setAsyncSupported(true)
// *** PATCH ***
case s: skinny.engine.async.AsyncSupported =>
holder.getRegistration.setAsyncSupported(true)
case _ =>
}
servletContextHandler.addServlet(holder, if (path.endsWith("/*")) path else path + "/*")
}
def addServlet(servlet: Class[_ <: HttpServlet], path: String) =
servletContextHandler.addServlet(servlet, path)
def addFilter(filter: Filter, path: String, dispatches: util.EnumSet[DispatcherType] = DefaultDispatcherTypes): FilterHolder = {
val holder = new FilterHolder(filter)
servletContextHandler.addFilter(holder, path, dispatches)
holder
}
def addFilter(filter: Class[_ <: Filter], path: String): FilterHolder =
addFilter(filter, path, DefaultDispatcherTypes)
def addFilter(filter: Class[_ <: Filter], path: String, dispatches: util.EnumSet[DispatcherType]): FilterHolder =
servletContextHandler.addFilter(filter, path, dispatches)
// Add a default servlet. If there is no underlying servlet, then
// filters just return 404.
if (!skipDefaultServlet) servletContextHandler.addServlet(new ServletHolder("default", classOf[DefaultServlet]), "/")
protected def ensureSessionIsSerializable() {
servletContextHandler.getSessionHandler.addEventListener(SessionSerializingListener)
}
}
| holycattle/skinny-framework | engine/src/test/scala/org/scalatra/test/JettyContainer.scala | Scala | mit | 3,227 |
package at.forsyte.apalache.tla.assignments
import at.forsyte.apalache.tla.lir.oper.{BmcOper, TlaActionOper, TlaOper}
import at.forsyte.apalache.tla.lir._
/**
* Instead of relying on the automatic inference of assignment sites, users may instead choose to
* manually annotate where they expect assignments in their specifications (with BmcOper.assign)
*
* This changes automatic assignment detection in the following way:
* If, for a given (primed) variable `v`, there exists at least one manual assignment
* [ v' := ... ], then all expressions of the form [ v' = ... ] are ignored as assignment candidates
* Manual assignments are still required to satisfy both the acyclicity and the covering properties,
* or assignment finding will fail.
*
* Any variable, for which no manual assignemt exists, is handled in the standard way.
*
* @author Jure Kukovec
*/
object ManualAssignments {
type varnameT = String
/**
* List all variables with at least one manual assignment site
*/
def findAll( ex: TlaEx ): Set[varnameT] = ex match {
case OperEx( BmcOper.assign, OperEx( TlaActionOper.prime, NameEx( n ) ), _ ) =>
Set(n)
case OperEx( _, args@_* ) =>
args.map( findAll ).foldLeft(Set.empty[varnameT]) {
_ ++ _
}
case LetInEx( body, defs@_* ) =>
defs.map( d => findAll( d.body ) ).foldLeft( findAll( body ) ) {
_ ++ _
}
case _ => Set.empty
}
}
| konnov/dach | tla-assignments/src/main/scala/at/forsyte/apalache/tla/assignments/ManualAssignments.scala | Scala | apache-2.0 | 1,441 |
package java.util
import scala.concurrent._
import scala.concurrent.duration._
import org.scalatest._
import org.scalatest.concurrent.AsyncTimeLimitedTests
class TimerTest extends AsyncFunSuite with AsyncTimeLimitedTests {
def timeLimit = 10 seconds
implicit override def executionContext =
scala.concurrent.ExecutionContext.Implicits.global
test("be scheduled once") {
val done = Promise[Boolean]()
val timer = new Timer
timer.schedule(new TimerTask {
def run() {
done.success(true)
}
}, 1000)
done.future.map(t => assert(t))
}
} | storm-enroute/reactive-collections | reactors-common/js/src/test/scala/java/util/java-util-timer-tests.scala | Scala | bsd-3-clause | 592 |
package is.hail.expr.ir.functions
import is.hail.annotations.Region
import is.hail.asm4s._
import is.hail.expr.ir._
import is.hail.types.coerce
import is.hail.types.physical.{PArray, PCode, PFloat64, PType}
import is.hail.types.virtual._
import is.hail.utils._
object ArrayFunctions extends RegistryFunctions {
val arrayOps: Array[(String, Type, Type, (IR, IR) => IR)] =
Array(
("mul", tnum("T"), tv("T"), ApplyBinaryPrimOp(Multiply(), _, _)),
("div", TInt32, TFloat32, ApplyBinaryPrimOp(FloatingPointDivide(), _, _)),
("div", TInt64, TFloat32, ApplyBinaryPrimOp(FloatingPointDivide(), _, _)),
("div", TFloat32, TFloat32, ApplyBinaryPrimOp(FloatingPointDivide(), _, _)),
("div", TFloat64, TFloat64, ApplyBinaryPrimOp(FloatingPointDivide(), _, _)),
("floordiv", tnum("T"), tv("T"), ApplyBinaryPrimOp(RoundToNegInfDivide(), _, _)),
("add", tnum("T"), tv("T"), ApplyBinaryPrimOp(Add(), _, _)),
("sub", tnum("T"), tv("T"), ApplyBinaryPrimOp(Subtract(), _, _)),
("pow", tnum("T"), TFloat64, (ir1: IR, ir2: IR) => Apply("pow", Seq(), Seq(ir1, ir2), TFloat64)),
("mod", tnum("T"), tv("T"), (ir1: IR, ir2: IR) => Apply("mod", Seq(), Seq(ir1, ir2), ir2.typ)))
def mean(args: Seq[IR]): IR = {
val Seq(a) = args
val t = coerce[TArray](a.typ).elementType
val elt = genUID()
val n = genUID()
val sum = genUID()
StreamFold2(
ToStream(a),
FastIndexedSeq((n, I32(0)), (sum, zero(t))),
elt,
FastIndexedSeq(Ref(n, TInt32) + I32(1), Ref(sum, t) + Ref(elt, t)),
Cast(Ref(sum, t), TFloat64) / Cast(Ref(n, TInt32), TFloat64)
)
}
def isEmpty(a: IR): IR = ApplyComparisonOp(EQ(TInt32), ArrayLen(a), I32(0))
def extend(a1: IR, a2: IR): IR = {
val uid = genUID()
val typ = a1.typ
If(IsNA(a1),
NA(typ),
If(IsNA(a2),
NA(typ),
ToArray(StreamFlatMap(
MakeStream(Seq(a1, a2), TStream(typ)),
uid,
ToStream(Ref(uid, a1.typ))))))
}
def exists(a: IR, cond: IR => IR): IR = {
val t = coerce[TArray](a.typ).elementType
StreamFold(
ToStream(a),
False(),
"acc",
"elt",
invoke("lor",TBoolean,
Ref("acc", TBoolean),
cond(Ref("elt", t))))
}
def contains(a: IR, value: IR): IR = {
exists(a, elt => ApplyComparisonOp(
EQWithNA(elt.typ, value.typ),
elt,
value))
}
def sum(a: IR): IR = {
val t = coerce[TArray](a.typ).elementType
val sum = genUID()
val v = genUID()
val zero = Cast(I64(0), t)
StreamFold(ToStream(a), zero, sum, v, ApplyBinaryPrimOp(Add(), Ref(sum, t), Ref(v, t)))
}
def product(a: IR): IR = {
val t = coerce[TArray](a.typ).elementType
val product = genUID()
val v = genUID()
val one = Cast(I64(1), t)
StreamFold(ToStream(a), one, product, v, ApplyBinaryPrimOp(Multiply(), Ref(product, t), Ref(v, t)))
}
def registerAll() {
registerIR1("isEmpty", TArray(tv("T")), TBoolean)((_, a) => isEmpty(a))
registerIR2("extend", TArray(tv("T")), TArray(tv("T")), TArray(tv("T")))((_, a, b) => extend(a, b))
registerIR2("append", TArray(tv("T")), tv("T"), TArray(tv("T"))) { (_, a, c) =>
extend(a, MakeArray(Seq(c), TArray(c.typ)))
}
registerIR2("contains", TArray(tv("T")), tv("T"), TBoolean) { (_, a, e) => contains(a, e) }
for ((stringOp, argType, retType, irOp) <- arrayOps) {
registerIR2(stringOp, TArray(argType), argType, TArray(retType)) { (_, a, c) =>
val i = genUID()
ToArray(StreamMap(ToStream(a), i, irOp(Ref(i, c.typ), c)))
}
registerIR2(stringOp, argType, TArray(argType), TArray(retType)) { (_, c, a) =>
val i = genUID()
ToArray(StreamMap(ToStream(a), i, irOp(c, Ref(i, c.typ))))
}
registerIR2(stringOp, TArray(argType), TArray(argType), TArray(retType)) { (_, array1, array2) =>
val a1id = genUID()
val e1 = Ref(a1id, coerce[TArray](array1.typ).elementType)
val a2id = genUID()
val e2 = Ref(a2id, coerce[TArray](array2.typ).elementType)
ToArray(StreamZip(FastIndexedSeq(ToStream(array1), ToStream(array2)), FastIndexedSeq(a1id, a2id), irOp(e1, e2), ArrayZipBehavior.AssertSameLength))
}
}
registerIR1("sum", TArray(tnum("T")), tv("T"))((_, a) => sum(a))
registerIR1("product", TArray(tnum("T")), tv("T"))((_, a) => product(a))
def makeMinMaxOp(op: String): Seq[IR] => IR = {
{ case Seq(a) =>
val t = coerce[TArray](a.typ).elementType
val value = genUID()
val first = genUID()
val acc = genUID()
StreamFold2(ToStream(a),
FastIndexedSeq((acc, NA(t)), (first, True())),
value,
FastIndexedSeq(
If(Ref(first, TBoolean), Ref(value, t), invoke(op, t, Ref(acc, t), Ref(value, t))),
False()
),
Ref(acc, t))
}
}
registerIR("min", Array(TArray(tnum("T"))), tv("T"), inline = true)((_, a) => makeMinMaxOp("min")(a))
registerIR("nanmin", Array(TArray(tnum("T"))), tv("T"), inline = true)((_, a) => makeMinMaxOp("nanmin")(a))
registerIR("max", Array(TArray(tnum("T"))), tv("T"), inline = true)((_, a) => makeMinMaxOp("max")(a))
registerIR("nanmax", Array(TArray(tnum("T"))), tv("T"), inline = true)((_, a) => makeMinMaxOp("nanmax")(a))
registerIR("mean", Array(TArray(tnum("T"))), TFloat64, inline = true)((_, a) => mean(a))
registerIR1("median", TArray(tnum("T")), tv("T")) { (_, array) =>
val t = array.typ.asInstanceOf[TArray].elementType
val v = Ref(genUID(), t)
val a = Ref(genUID(), TArray(t))
val size = Ref(genUID(), TInt32)
val lastIdx = size - 1
val midIdx = lastIdx.floorDiv(2)
def ref(i: IR) = ArrayRef(a, i)
def div(a: IR, b: IR): IR = ApplyBinaryPrimOp(BinaryOp.defaultDivideOp(t), a, b)
Let(a.name, ArraySort(StreamFilter(ToStream(array), v.name, !IsNA(v))),
If(IsNA(a),
NA(t),
Let(size.name,
ArrayLen(a),
If(size.ceq(0),
NA(t),
If(invoke("mod", TInt32, size, 2).cne(0),
ref(midIdx), // odd number of non-missing elements
div(ref(midIdx) + ref(midIdx + 1), Cast(2, t)))))))
}
def argF(a: IR, op: (Type) => ComparisonOp[Boolean]): IR = {
val t = coerce[TArray](a.typ).elementType
val tAccum = TStruct("m" -> t, "midx" -> TInt32)
val accum = genUID()
val value = genUID()
val m = genUID()
val idx = genUID()
def updateAccum(min: IR, midx: IR): IR =
MakeStruct(FastSeq("m" -> min, "midx" -> midx))
val body =
Let(value, ArrayRef(a, Ref(idx, TInt32)),
Let(m, GetField(Ref(accum, tAccum), "m"),
If(IsNA(Ref(value, t)),
Ref(accum, tAccum),
If(IsNA(Ref(m, t)),
updateAccum(Ref(value, t), Ref(idx, TInt32)),
If(ApplyComparisonOp(op(t), Ref(value, t), Ref(m, t)),
updateAccum(Ref(value, t), Ref(idx, TInt32)),
Ref(accum, tAccum))))))
GetField(StreamFold(
StreamRange(I32(0), ArrayLen(a), I32(1)),
NA(tAccum),
accum,
idx,
body
), "midx")
}
registerIR1("argmin", TArray(tv("T")), TInt32)((_, a) => argF(a, LT(_)))
registerIR1("argmax", TArray(tv("T")), TInt32)((_, a) => argF(a, GT(_)))
def uniqueIndex(a: IR, op: (Type) => ComparisonOp[Boolean]): IR = {
val t = coerce[TArray](a.typ).elementType
val tAccum = TStruct("m" -> t, "midx" -> TInt32, "count" -> TInt32)
val accum = genUID()
val value = genUID()
val m = genUID()
val idx = genUID()
val result = genUID()
def updateAccum(m: IR, midx: IR, count: IR): IR =
MakeStruct(FastSeq("m" -> m, "midx" -> midx, "count" -> count))
val body =
Let(value, ArrayRef(a, Ref(idx, TInt32)),
Let(m, GetField(Ref(accum, tAccum), "m"),
If(IsNA(Ref(value, t)),
Ref(accum, tAccum),
If(IsNA(Ref(m, t)),
updateAccum(Ref(value, t), Ref(idx, TInt32), I32(1)),
If(ApplyComparisonOp(op(t), Ref(value, t), Ref(m, t)),
updateAccum(Ref(value, t), Ref(idx, TInt32), I32(1)),
If(ApplyComparisonOp(EQ(t), Ref(value, t), Ref(m, t)),
updateAccum(
Ref(value, t),
Ref(idx, TInt32),
ApplyBinaryPrimOp(Add(), GetField(Ref(accum, tAccum), "count"), I32(1))),
Ref(accum, tAccum)))))))
Let(result, StreamFold(
StreamRange(I32(0), ArrayLen(a), I32(1)),
NA(tAccum),
accum,
idx,
body
), If(ApplyComparisonOp(EQ(TInt32), GetField(Ref(result, tAccum), "count"), I32(1)),
GetField(Ref(result, tAccum), "midx"),
NA(TInt32)))
}
registerIR1("uniqueMinIndex", TArray(tv("T")), TInt32)((_, a) => uniqueIndex(a, LT(_)))
registerIR1("uniqueMaxIndex", TArray(tv("T")), TInt32)((_, a) => uniqueIndex(a, GT(_)))
registerIR3("indexArray", TArray(tv("T")), TInt32, TString, tv("T")) { (_, a, i, s) =>
ArrayRef(
a,
If(ApplyComparisonOp(LT(TInt32), i, I32(0)),
ApplyBinaryPrimOp(Add(), ArrayLen(a), i),
i), s)
}
registerIR2("sliceRight", TArray(tv("T")), TInt32, TArray(tv("T"))) { (_, a, i) =>
val idx = genUID()
ToArray(StreamMap(
StreamRange(
If(ApplyComparisonOp(LT(TInt32), i, I32(0)),
UtilFunctions.intMax(
ApplyBinaryPrimOp(Add(), ArrayLen(a), i),
I32(0)),
i),
ArrayLen(a),
I32(1)),
idx,
ArrayRef(a, Ref(idx, TInt32))))
}
registerIR2("sliceLeft", TArray(tv("T")), TInt32, TArray(tv("T"))) { (_, a, i) =>
val idx = genUID()
If(IsNA(a), a,
ToArray(StreamMap(
StreamRange(
I32(0),
If(ApplyComparisonOp(LT(TInt32), i, I32(0)),
ApplyBinaryPrimOp(Add(), ArrayLen(a), i),
UtilFunctions.intMin(i, ArrayLen(a))),
I32(1)),
idx,
ArrayRef(a, Ref(idx, TInt32)))))
}
registerIR3("slice", TArray(tv("T")), TInt32, TInt32, TArray(tv("T"))) { case(_, a, i, j) =>
val idx = genUID()
ToArray(StreamMap(
StreamRange(
If(ApplyComparisonOp(LT(TInt32), i, I32(0)),
UtilFunctions.intMax(
ApplyBinaryPrimOp(Add(), ArrayLen(a), i),
I32(0)),
i),
If(ApplyComparisonOp(LT(TInt32), j, I32(0)),
ApplyBinaryPrimOp(Add(), ArrayLen(a), j),
UtilFunctions.intMin(j, ArrayLen(a))),
I32(1)),
idx,
ArrayRef(a, Ref(idx, TInt32))))
}
registerIR1("flatten", TArray(TArray(tv("T"))), TArray(tv("T"))) { (_, a) =>
val elt = Ref(genUID(), coerce[TArray](a.typ).elementType)
ToArray(StreamFlatMap(ToStream(a), elt.name, ToStream(elt)))
}
registerEmitCode2("corr", TArray(TFloat64), TArray(TFloat64), TFloat64, {
(_: Type, _: PType, _: PType) => PFloat64()
}) { case (r, rt, EmitCode(setup1, m1, v1), EmitCode(setup2, m2, v2)) =>
val t1 = v1.pt.asInstanceOf[PArray]
val t2 = v2.pt.asInstanceOf[PArray]
val a1 = r.mb.newLocal[Long]()
val a2 = r.mb.newLocal[Long]()
val xSum = r.mb.newLocal[Double]()
val ySum = r.mb.newLocal[Double]()
val xSqSum = r.mb.newLocal[Double]()
val ySqSum = r.mb.newLocal[Double]()
val xySum = r.mb.newLocal[Double]()
val n = r.mb.newLocal[Int]()
val i = r.mb.newLocal[Int]()
val l1 = r.mb.newLocal[Int]()
val l2 = r.mb.newLocal[Int]()
val x = r.mb.newLocal[Double]()
val y = r.mb.newLocal[Double]()
EmitCode(
Code(
setup1,
setup2),
m1 || m2 || Code(
a1 := v1.tcode[Long],
a2 := v2.tcode[Long],
l1 := t1.loadLength(a1),
l2 := t2.loadLength(a2),
l1.cne(l2).mux(
Code._fatal[Boolean](new CodeString("'corr': cannot compute correlation between arrays of different lengths: ")
.concat(l1.toS)
.concat(", ")
.concat(l2.toS)),
l1.ceq(0))),
PCode(rt, Code(
i := 0,
n := 0,
xSum := 0d,
ySum := 0d,
xSqSum := 0d,
ySqSum := 0d,
xySum := 0d,
Code.whileLoop(i < l1,
Code(
(t1.isElementDefined(a1, i) && t2.isElementDefined(a2, i)).mux(
Code(
x := Region.loadDouble(t1.loadElement(a1, i)),
xSum := xSum + x,
xSqSum := xSqSum + x * x,
y := Region.loadDouble(t2.loadElement(a2, i)),
ySum := ySum + y,
ySqSum := ySqSum + y * y,
xySum := xySum + x * y,
n := n + 1
),
Code._empty
),
i := i + 1
)
),
(n.toD * xySum - xSum * ySum) / Code.invokeScalaObject1[Double, Double](
MathFunctions.mathPackageClass,
"sqrt",
(n.toD * xSqSum - xSum * xSum) * (n.toD * ySqSum - ySum * ySum))
)
))
}
}
}
| danking/hail | hail/src/main/scala/is/hail/expr/ir/functions/ArrayFunctions.scala | Scala | mit | 13,576 |
abstract class Monoid[a] {
def unit: a
}
object test {
def sum[a](xs: List)(implicit m: Monoid[a]): a =
if (xs.isEmpty) m.unit else xs.head
sum(List(1,2,3))
}
| som-snytt/dotty | tests/untried/neg/t550.scala | Scala | apache-2.0 | 170 |
package com.itszuvalex.itszulib
import com.itszuvalex.itszulib.network.PacketHandler
import com.itszuvalex.itszulib.proxy.ProxyCommon
import com.itszuvalex.itszulib.testing.{BlockLocTrackerTest, BlockPortalTest, BlockTankTest, ItemPreviewable}
import cpw.mods.fml.common.Mod.EventHandler
import cpw.mods.fml.common.event.{FMLInitializationEvent, FMLInterModComms, FMLPostInitializationEvent, FMLPreInitializationEvent}
import cpw.mods.fml.common.network.NetworkRegistry
import cpw.mods.fml.common.registry.GameRegistry
import cpw.mods.fml.common.{Mod, SidedProxy}
import net.minecraft.creativetab.CreativeTabs
import org.apache.logging.log4j.LogManager
/**
* Created by Christopher on 4/5/2015.
*/
@Mod(modid = ItszuLib.ID, name = ItszuLib.ID, version = ItszuLib.VERSION, modLanguage = "scala")
object ItszuLib {
final val ID = "ItszuLib"
final val VERSION = Version.FULL_VERSION
final val logger = LogManager.getLogger(ID)
@SidedProxy(clientSide = "com.itszuvalex.itszulib.proxy.ProxyClient",
serverSide = "com.itszuvalex.itszulib.proxy.ProxyServer")
var proxy: ProxyCommon = null
@EventHandler def preInit(event: FMLPreInitializationEvent): Unit = {
PacketHandler.init()
// PlayerUUIDTracker.init()
// PlayerUUIDTracker.setFile(new File())
proxy.init()
NetworkRegistry.INSTANCE.registerGuiHandler(this, proxy)
}
@EventHandler def load(event: FMLInitializationEvent): Unit = {
GameRegistry.registerBlock(new BlockPortalTest, "BlockPortalTest").setCreativeTab(CreativeTabs.tabBlock)
GameRegistry.registerBlock(new BlockLocTrackerTest, "BlockLocTrackerTest").setCreativeTab(CreativeTabs.tabBlock)
GameRegistry.registerBlock(new BlockTankTest, "BlockTankTest").setCreativeTab(CreativeTabs.tabBlock)
val prev = new ItemPreviewable
prev.setCreativeTab(CreativeTabs.tabDecorations)
GameRegistry.registerItem(prev, "TilePreviewable")
}
@EventHandler def postInit(event: FMLPostInitializationEvent): Unit = {
}
@EventHandler def imcCallback(event: FMLInterModComms.IMCEvent) {
InterModComms.imcCallback(event)
}
}
| BlockWorker/ItszuLib | src/main/scala/com/itszuvalex/itszulib/ItszuLib.scala | Scala | gpl-2.0 | 2,118 |
package es.pirita.Iteratee
import play.api.libs.iteratee.{Enumeratee, Iteratee, Enumerator}
import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Enumeratee Example
*
* @author Ignacio Navarro Martín
* @version 1.0
*/
object Enumeratee_9_Example extends App
{
val enum: Enumerator[Int] = Enumerator.enumerate(List.range(1, 10))
//Iteratee Float
val iteratee: Iteratee[String, Float] = Iteratee.fold[String, Float](0f){ (acc, el) =>
acc + el.toFloat + 0.1f
}
//We can change the enumerator
val enumeratee: Enumeratee[Int, String] = Enumeratee.map[Int]{s => s.toString}
val enumerator: Enumerator[String] = enum &> enumeratee
val futExampleEnumerator: Future[Float] = enumerator |>>> iteratee
assert(Await.result(futExampleEnumerator, 10 seconds)==45.899998f)
}
| pirita/IterateePresentation | src/main/scala/es/pirita/Iteratee/Enumeratee_9_Example.scala | Scala | apache-2.0 | 891 |
import scala.language.reflectiveCalls
object Test extends App {
val foo = new {
def apply(args : String*) = args foreach println
}
foo("var", "args")
}
| som-snytt/dotty | tests/disabled/reflect/run/t1141.scala | Scala | apache-2.0 | 166 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.optimizer.NormalizeFloatingNumbers
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.streaming.{StateStoreRestoreExec, StateStoreSaveExec}
/**
* Utility functions used by the query planner to convert our plan to new aggregation code path.
*/
object AggUtils {
private def createAggregate(
requiredChildDistributionExpressions: Option[Seq[Expression]] = None,
groupingExpressions: Seq[NamedExpression] = Nil,
aggregateExpressions: Seq[AggregateExpression] = Nil,
aggregateAttributes: Seq[Attribute] = Nil,
initialInputBufferOffset: Int = 0,
resultExpressions: Seq[NamedExpression] = Nil,
child: SparkPlan): SparkPlan = {
val useHash = HashAggregateExec.supportsAggregate(
aggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes))
if (useHash) {
HashAggregateExec(
requiredChildDistributionExpressions = requiredChildDistributionExpressions,
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
child = child)
} else {
val objectHashEnabled = child.sqlContext.conf.useObjectHashAggregation
val useObjectHash = ObjectHashAggregateExec.supportsAggregate(aggregateExpressions)
if (objectHashEnabled && useObjectHash) {
ObjectHashAggregateExec(
requiredChildDistributionExpressions = requiredChildDistributionExpressions,
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
child = child)
} else {
SortAggregateExec(
requiredChildDistributionExpressions = requiredChildDistributionExpressions,
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = initialInputBufferOffset,
resultExpressions = resultExpressions,
child = child)
}
}
}
def planAggregateWithoutDistinct(
groupingExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
// Check if we can use HashAggregate.
// 1. Create an Aggregate Operator for partial aggregations.
val groupingAttributes = groupingExpressions.map(_.toAttribute)
val partialAggregateExpressions = aggregateExpressions.map(_.copy(mode = Partial))
val partialAggregateAttributes =
partialAggregateExpressions.flatMap(_.aggregateFunction.aggBufferAttributes)
val partialResultExpressions =
groupingAttributes ++
partialAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes)
val partialAggregate = createAggregate(
requiredChildDistributionExpressions = None,
groupingExpressions = groupingExpressions,
aggregateExpressions = partialAggregateExpressions,
aggregateAttributes = partialAggregateAttributes,
initialInputBufferOffset = 0,
resultExpressions = partialResultExpressions,
child = child)
// 2. Create an Aggregate Operator for final aggregations.
val finalAggregateExpressions = aggregateExpressions.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
val finalAggregate = createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions,
aggregateAttributes = finalAggregateAttributes,
initialInputBufferOffset = groupingExpressions.length,
resultExpressions = resultExpressions,
child = partialAggregate)
finalAggregate :: Nil
}
def planAggregateWithOneDistinct(
groupingExpressions: Seq[NamedExpression],
functionsWithDistinct: Seq[AggregateExpression],
functionsWithoutDistinct: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
child: SparkPlan): Seq[SparkPlan] = {
// functionsWithDistinct is guaranteed to be non-empty. Even though it may contain more than one
// DISTINCT aggregate function, all of those functions will have the same column expressions.
// For example, it would be valid for functionsWithDistinct to be
// [COUNT(DISTINCT foo), MAX(DISTINCT foo)], but [COUNT(DISTINCT bar), COUNT(DISTINCT foo)] is
// disallowed because those two distinct aggregates have different column expressions.
val distinctExpressions = functionsWithDistinct.head.aggregateFunction.children
val namedDistinctExpressions = distinctExpressions.map {
case ne: NamedExpression => ne
case other => Alias(other, other.toString)()
}
val distinctAttributes = namedDistinctExpressions.map(_.toAttribute)
val groupingAttributes = groupingExpressions.map(_.toAttribute)
// 1. Create an Aggregate Operator for partial aggregations.
val partialAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
// We will group by the original grouping expression, plus an additional expression for the
// DISTINCT column. For example, for AVG(DISTINCT value) GROUP BY key, the grouping
// expressions will be [key, value].
createAggregate(
groupingExpressions = groupingExpressions ++ namedDistinctExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
resultExpressions = groupingAttributes ++ distinctAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = child)
}
// 2. Create an Aggregate Operator for partial merge aggregations.
val partialMergeAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes ++ distinctAttributes),
groupingExpressions = groupingAttributes ++ distinctAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length,
resultExpressions = groupingAttributes ++ distinctAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = partialAggregate)
}
// 3. Create an Aggregate operator for partial aggregation (for distinct)
val distinctColumnAttributeLookup = distinctExpressions.zip(distinctAttributes).toMap
val rewrittenDistinctFunctions = functionsWithDistinct.map {
// Children of an AggregateFunction with DISTINCT keyword has already
// been evaluated. At here, we need to replace original children
// to AttributeReferences.
case agg @ AggregateExpression(aggregateFunction, mode, true, _) =>
aggregateFunction.transformDown(distinctColumnAttributeLookup)
.asInstanceOf[AggregateFunction]
case agg =>
throw new IllegalArgumentException(
"Non-distinct aggregate is found in functionsWithDistinct " +
s"at planAggregateWithOneDistinct: $agg")
}
val partialDistinctAggregate: SparkPlan = {
val mergeAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val mergeAggregateAttributes = mergeAggregateExpressions.map(_.resultAttribute)
val (distinctAggregateExpressions, distinctAggregateAttributes) =
rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) =>
// We rewrite the aggregate function to a non-distinct aggregation because
// its input will have distinct arguments.
// We just keep the isDistinct setting to true, so when users look at the query plan,
// they still can see distinct aggregations.
val expr = AggregateExpression(func, Partial, isDistinct = true)
// Use original AggregationFunction to lookup attributes, which is used to build
// aggregateFunctionToAttribute
val attr = functionsWithDistinct(i).resultAttribute
(expr, attr)
}.unzip
val partialAggregateResult = groupingAttributes ++
mergeAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes) ++
distinctAggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes)
createAggregate(
groupingExpressions = groupingAttributes,
aggregateExpressions = mergeAggregateExpressions ++ distinctAggregateExpressions,
aggregateAttributes = mergeAggregateAttributes ++ distinctAggregateAttributes,
initialInputBufferOffset = (groupingAttributes ++ distinctAttributes).length,
resultExpressions = partialAggregateResult,
child = partialMergeAggregate)
}
// 4. Create an Aggregate Operator for the final aggregation.
val finalAndCompleteAggregate: SparkPlan = {
val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
val (distinctAggregateExpressions, distinctAggregateAttributes) =
rewrittenDistinctFunctions.zipWithIndex.map { case (func, i) =>
// We rewrite the aggregate function to a non-distinct aggregation because
// its input will have distinct arguments.
// We just keep the isDistinct setting to true, so when users look at the query plan,
// they still can see distinct aggregations.
val expr = AggregateExpression(func, Final, isDistinct = true)
// Use original AggregationFunction to lookup attributes, which is used to build
// aggregateFunctionToAttribute
val attr = functionsWithDistinct(i).resultAttribute
(expr, attr)
}.unzip
createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions ++ distinctAggregateExpressions,
aggregateAttributes = finalAggregateAttributes ++ distinctAggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = resultExpressions,
child = partialDistinctAggregate)
}
finalAndCompleteAggregate :: Nil
}
/**
* Plans a streaming aggregation using the following progression:
* - Partial Aggregation
* - Shuffle
* - Partial Merge (now there is at most 1 tuple per group)
* - StateStoreRestore (now there is 1 tuple from this batch + optionally one from the previous)
* - PartialMerge (now there is at most 1 tuple per group)
* - StateStoreSave (saves the tuple for the next batch)
* - Complete (output the current result of the aggregation)
*/
def planStreamingAggregation(
groupingExpressions: Seq[NamedExpression],
functionsWithoutDistinct: Seq[AggregateExpression],
resultExpressions: Seq[NamedExpression],
stateFormatVersion: Int,
child: SparkPlan): Seq[SparkPlan] = {
val groupingAttributes = groupingExpressions.map(_.toAttribute)
val partialAggregate: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Partial))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
groupingExpressions = groupingExpressions,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = child)
}
val partialMerged1: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = partialAggregate)
}
val restored = StateStoreRestoreExec(groupingAttributes, None, stateFormatVersion,
partialMerged1)
val partialMerged2: SparkPlan = {
val aggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = PartialMerge))
val aggregateAttributes = aggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions =
Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = aggregateExpressions,
aggregateAttributes = aggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = groupingAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes),
child = restored)
}
// Note: stateId and returnAllStates are filled in later with preparation rules
// in IncrementalExecution.
val saved =
StateStoreSaveExec(
groupingAttributes,
stateInfo = None,
outputMode = None,
eventTimeWatermark = None,
stateFormatVersion = stateFormatVersion,
partialMerged2)
val finalAndCompleteAggregate: SparkPlan = {
val finalAggregateExpressions = functionsWithoutDistinct.map(_.copy(mode = Final))
// The attributes of the final aggregation buffer, which is presented as input to the result
// projection:
val finalAggregateAttributes = finalAggregateExpressions.map(_.resultAttribute)
createAggregate(
requiredChildDistributionExpressions = Some(groupingAttributes),
groupingExpressions = groupingAttributes,
aggregateExpressions = finalAggregateExpressions,
aggregateAttributes = finalAggregateAttributes,
initialInputBufferOffset = groupingAttributes.length,
resultExpressions = resultExpressions,
child = saved)
}
finalAndCompleteAggregate :: Nil
}
}
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggUtils.scala | Scala | apache-2.0 | 16,429 |
package org.scalafmt.dynamic
import scala.meta._
object PositionSyntax {
def formatMessage(pos: Position, severity: String, message: String): String =
pos match {
case Position.None =>
s"$severity: $message"
case _ =>
new java.lang.StringBuilder()
.append(pos.lineInput)
.append(if (severity.isEmpty) "" else " ")
.append(severity)
.append(
if (message.isEmpty) ""
else if (severity.isEmpty) " "
else if (message.startsWith("\n")) ":"
else ": "
)
.append(message)
.append(pos.rangeText)
.toString
}
implicit class XtensionPositionsScalafix(private val pos: Position)
extends AnyVal {
def contains(other: Position): Boolean = {
pos.start <= other.start &&
pos.end >= other.end
}
def formatMessage(severity: String, message: String): String =
PositionSyntax.formatMessage(pos, severity, message)
/** Returns a formatted string of this position including
* filename/line/caret.
*/
def lineInput: String =
s"${pos.input.syntax}:${pos.startLine + 1}:${pos.startColumn + 1}:"
def rangeNumber: String =
s"${pos.startLine + 1}:${pos.startColumn + 1}..${pos.endLine + 1}:${pos.endColumn + 1}"
def rangeText: String =
pos match {
case Position.None => ""
case _ =>
if (pos.startLine != pos.endLine) multilines
else lineTextAndCaret
}
def lineTextAndCaret: String = {
new StringBuilder()
.append("\n")
.append(lineContent)
.append("\n")
.append(pos.lineCaret)
.toString()
}
def multilines: String = {
var i = pos.startLine
val sb = new StringBuilder()
while (i <= pos.endLine) {
val startColumn =
if (i == pos.startLine) pos.startColumn
else 0
val endColumn =
if (i == pos.endLine) pos.endColumn
else Int.MaxValue
sb.append("\n> ")
.append(
lineContent(
i,
startColumn = startColumn,
endColumn = endColumn
).text
)
i += 1
}
sb.toString()
}
def lineCaret: String =
pos match {
case Position.None =>
""
case _ =>
val caret =
if (pos.start == pos.end) "^"
else if (pos.startLine == pos.endLine) "^" * (pos.end - pos.start)
else "^"
(" " * pos.startColumn) + caret
}
private def lineContent(
line: Int,
startColumn: Int = 0,
endColumn: Int = Int.MaxValue
): Position =
Position.Range(
pos.input,
startLine = line,
startColumn = startColumn,
endLine = line,
endColumn = endColumn
)
private def lineContent: String =
pos match {
case Position.None => ""
case range: Position.Range =>
lineContent(range.startLine).text
}
}
}
| scalameta/scalafmt | scalafmt-dynamic/src/test/scala/org/scalafmt/dynamic/PositionSyntax.scala | Scala | apache-2.0 | 3,087 |
/*
* Copyright 2014 Kate von Roeder (katevonroder at gmail dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.itsdamiya.legendary.cache
import play.api.libs.ws.{ WS, WSResponse }
import scala.concurrent.Future
import play.Logger
import play.api.Play.current
import scala.concurrent.duration.Duration
import play.api.libs.json.{ JsValue, Json }
import com.itsdamiya.legendary.utils.DefaultWebServices
import scala.concurrent.ExecutionContext.Implicits.global
import play.api.mvc._
object CacheableExternalWS extends Results with DefaultWebServices {
def apply(cacheKey: String, timeToLive: Int, url: String)(resultTransformer: String => JsValue)(responder: WSResponse => SimpleResult): Future[SimpleResult] = {
val cacheResult = Cache.getAs[JsValue](cacheKey)
cacheResult match {
case Some(value) =>
Logger.debug(s"Cache hit for $cacheKey")
Future.successful(Ok(value))
case None =>
Logger.debug(s"Cache miss for $cacheKey")
WS.url(url).withDefaultHeaders().get().map { response =>
Cache.set(cacheKey, resultTransformer(response.body), 0, timeToLive)
responder(response)
}
}
}
def apply(cacheKey: String, timeToLive: Duration, url: String): Future[SimpleResult] = {
apply(cacheKey, timeToLive.toSeconds.toInt, url)(result => Json.parse(result))(response => Ok(response.json))
}
}
| Damiya/legendary | Legendary-Core/app/com/itsdamiya/legendary/cache/CacheableExternalWS.scala | Scala | apache-2.0 | 1,912 |
package com.despegar.soffheap.snapshot
import com.despegar.soffheap.concurrent.CronThreadPoolExecutor
import java.util.concurrent.ThreadFactory
import java.util.concurrent.Executors
import com.despegar.soffheap.CronExpression
import scala.collection.JavaConverters._
class Loader[Key,Value](snapshot: SoffHeapSnapshot[Key, Value], dataSource: DataSource[Key,Value], diskPersistor: Option[DiskPersistor]) {
val cronExpressionPool = new CronThreadPoolExecutor(1, new ThreadFactory() {
override def newThread(runnable: Runnable) = {
val thread = Executors.defaultThreadFactory().newThread(runnable)
thread.setDaemon(true)
thread.setName(s"SoffHeapSnapshotReloader-${snapshot.getName}")
thread
}
})
def load(shouldCheckDisk: Boolean = true) = {
val values = getValues(shouldCheckDisk)
snapshot.reload(values.asScala.toMap)
diskPersistor.foreach( p => p.persist(values))
}
def reload() = {
load(false)
}
private def getValues(shouldCheckDisk: Boolean): java.util.Map[Key, Value] = {
if (shouldCheckDisk && diskPersistor.isDefined && diskPersistor.get.hasData ) {
diskPersistor.get.loadFromDisk.asInstanceOf[java.util.Map[Key, Value]]
} else {
dataSource.get()
}
}
def scheduleReloadAt(cronExpression: Option[String]) = {
cronExpression foreach { expression =>
cronExpressionPool.schedule(new Runnable() {
override def run() = {
reload()
}
}, new CronExpression(expression))
}
}
} | despegar/soffheap | src/main/scala/com/despegar/soffheap/snapshot/Loader.scala | Scala | bsd-2-clause | 1,528 |
package org.scaladebugger.api.profiles.traits.info
import com.sun.jdi._
import java.util.NoSuchElementException
import scala.annotation.tailrec
import scala.util.Try
/**
* Represents the interface that needs to be implemented to provide
* the ability to grab various information for a specific debug profile.
*/
trait GrabInfoProfile {
/**
* Retrieves a object profile for the given JDI object reference.
*
* @param objectReference The JDI object reference with which to wrap in
* a object info profile
* @return Success containing the object profile, otherwise a failure
*/
def tryObject(objectReference: ObjectReference): Try[ObjectInfo] =
Try(`object`(objectReference))
/**
* Retrieves a object profile for the given JDI object reference.
*
* object
*
* @param objectReference The JDI object reference with which to wrap in
* a object info profile
* @return The new object info profile
*/
def `object`(objectReference: ObjectReference): ObjectInfo =
`object`(objectReference)
/**
* Retrieves all threads contained in the remote JVM.
*
* @return Success containing the collection of thread info profiles,
* otherwise a failure
*/
def tryThreads: Try[Seq[ThreadInfo]] = Try(threads)
/**
* Retrieves all threads contained in the remote JVM.
*
* @return The collection of thread info profiles
*/
def threads: Seq[ThreadInfo]
/**
* Retrieves a thread profile for the given JDI thread reference.
*
* @param threadReference The JDI thread reference with which to wrap in
* a thread info profile
* @return Success containing the thread profile, otherwise a failure
*/
def tryThread(threadReference: ThreadReference): Try[ThreadInfo] =
Try(thread(threadReference))
/**
* Retrieves a thread profile for the given JDI thread reference.
*
* @param threadReference The JDI thread reference with which to wrap in
* a thread info profile
* @return The new thread info profile
*/
def thread(threadReference: ThreadReference): ThreadInfo
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name.
*
* @param name The name of the thread
* @return Success containing the thread profile if found, otherwise
* a failure
*/
def tryThread(name: String): Try[ThreadInfo] =
Try(thread(name))
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name.
*
* @param name The name of the thread
* @return The profile of the matching thread, or throws an exception
*/
def thread(name: String): ThreadInfo = {
val t = threadOption(name)
if (t.isEmpty)
throw new NoSuchElementException(s"No thread named $name found!")
t.get
}
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name.
*
* @param name The name of the thread
* @return Some profile of the matching thread, or None
*/
def threadOption(name: String): Option[ThreadInfo] = {
threads.find(_.name == name)
}
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name and whose thread group has the specified name.
*
* @param threadName The name of the thread
* @param threadGroupName The name of the thread group
* @return Success containing the thread profile if found, otherwise
* a failure
*/
def tryThread(
threadName: String,
threadGroupName: String
): Try[ThreadInfo] = Try(thread(
threadGroupName = threadGroupName,
threadName = threadName
))
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name and whose thread group has the specified name.
*
* @param threadName The name of the thread
* @param threadGroupName The name of the thread group
* @return The profile of the matching thread, or throws an exception
*/
def thread(
threadName: String,
threadGroupName: String
): ThreadInfo = {
val t = threadOption(
threadGroupName = threadGroupName,
threadName = threadName
)
if (t.isEmpty) throw new NoSuchElementException(
s"No thread named $threadName with thread group $threadGroupName found!"
)
t.get
}
/**
* Retrieves a thread profile for the thread reference whose name matches
* the provided name and whose thread group has the specified name.
*
* @param threadName The name of the thread
* @param threadGroupName The name of the thread group
* @return Some profile of the matching thread, or None
*/
def threadOption(
threadName: String,
threadGroupName: String
): Option[ThreadInfo] = {
threads.find(t =>
t.name == threadName && t.threadGroup.name == threadGroupName
)
}
/**
* Retrieves a thread profile for the thread reference whose unique id
* matches the provided id.
*
* @param threadId The id of the thread
* @return Success containing the thread profile if found, otherwise
* a failure
*/
def tryThread(threadId: Long): Try[ThreadInfo] =
Try(thread(threadId))
/**
* Retrieves a thread profile for the thread reference whose unique id
* matches the provided id.
*
* @param threadId The id of the thread
* @return The profile of the matching thread, or throws an exception
*/
def thread(threadId: Long): ThreadInfo = {
val t = threadOption(threadId)
if (t.isEmpty)
throw new NoSuchElementException(s"No thread with $threadId found!")
t.get
}
/**
* Retrieves a thread profile for the thread reference whose unique id
* matches the provided id.
*
* @param threadId The id of the thread
* @return Some profile of the matching thread, or None
*/
def threadOption(threadId: Long): Option[ThreadInfo] = {
threads.find(_.uniqueId == threadId)
}
/**
* Retrieves a thread group profile for the thread group reference whose
* unique id matches the provided id.
*
* @param threadGroupReference The JDI thread group reference with which to
* wrap in a thread group info profile
* @return Success containing the thread group profile if found, otherwise
* a failure
*/
def tryThreadGroup(
threadGroupReference: ThreadGroupReference
): Try[ThreadGroupInfo] = Try(threadGroup(threadGroupReference))
/**
* Retrieves a threadGroup group profile for the thread group reference whose
* unique id matches the provided id.
*
* @param threadGroupReference The JDI thread group reference with which to
* wrap in a thread group info profile
* @return The profile of the matching thread group, or throws an exception
*/
def threadGroup(
threadGroupReference: ThreadGroupReference
): ThreadGroupInfo
/**
* Retrieves a thread group profile for the thread group reference whose
* unique id matches the provided id.
*
* @param threadGroupId The id of the thread group
* @return Success containing the thread group profile if found, otherwise
* a failure
*/
def tryThreadGroup(threadGroupId: Long): Try[ThreadGroupInfo] =
Try(threadGroup(threadGroupId))
/**
* Retrieves a thread group profile for the thread group reference whose
* unique id matches the provided id.
*
* @param threadGroupId The id of the thread group
* @return The profile of the matching thread group, or throws an exception
*/
def threadGroup(threadGroupId: Long): ThreadGroupInfo = {
val tg = threadGroupOption(threadGroupId)
if (tg.isEmpty) throw new NoSuchElementException(
s"No thread group with $threadGroupId found!")
tg.get
}
/**
* Retrieves a thread group profile for the thread group reference whose
* unique id matches the provided id.
*
* @param threadGroupId The id of the thread group
* @return Some profile of the matching thread group, or None
*/
def threadGroupOption(
threadGroupId: Long
): Option[ThreadGroupInfo] = {
findThreadGroupByPredicate(threadGroups, _.uniqueId == threadGroupId)
}
/**
* Retrieves a thread group profile for the thread group reference whose
* name matches the provided name.
*
* @param name The name of the thread group
* @return Success containing the thread group profile if found, otherwise
* a failure
*/
def tryThreadGroup(name: String): Try[ThreadGroupInfo] =
Try(threadGroup(name))
/**
* Retrieves a thread group profile for the thread group reference whose
* name matches the provided name.
*
* @param name The name of the thread group
* @return The profile of the matching thread group, or throws an exception
*/
def threadGroup(name: String): ThreadGroupInfo = {
val tg = threadGroupOption(name)
if (tg.isEmpty) throw new NoSuchElementException(
s"No thread group named $name found!")
tg.get
}
/**
* Retrieves a thread group profile for the thread group reference whose
* name matches the provided name.
*
* @param name The name of the thread group
* @return Some profile of the matching thread group, or None
*/
def threadGroupOption(
name: String
): Option[ThreadGroupInfo] = {
findThreadGroupByPredicate(threadGroups, _.name == name)
}
/**
* Recursively searches a collection of thread groups (and their subgroups)
* for a thread group that satisfies the predicate.
*
* @param threadGroups The initial collection of thread groups to search
* @param predicate The predicate used to find a matching thread group
* @return Some thread group if found, otherwise None
*/
@tailrec private def findThreadGroupByPredicate(
threadGroups: Seq[ThreadGroupInfo],
predicate: ThreadGroupInfo => Boolean
): Option[ThreadGroupInfo] = {
if (threadGroups.nonEmpty) {
val tg = threadGroups.find(predicate)
if (tg.nonEmpty) {
tg
} else {
findThreadGroupByPredicate(
threadGroups.flatMap(_.threadGroups),
predicate
)
}
} else {
None
}
}
/**
* Retrieves all thread groups contained in the remote JVM.
*
* @return Success containing the collection of thread group info profiles,
* otherwise a failure
*/
def tryThreadGroups: Try[Seq[ThreadGroupInfo]] = Try(threadGroups)
/**
* Retrieves all thread groups contained in the remote JVM.
*
* @return The collection of thread group info profiles
*/
def threadGroups: Seq[ThreadGroupInfo]
/**
* Retrieves all classes contained in the remote JVM in the form of
* reference type information.
*
* @return Success containing the collection of reference type info profiles,
* otherwise a failure
*/
def tryClasses: Try[Seq[ReferenceTypeInfo]] = Try(classes)
/**
* Retrieves all classes contained in the remote JVM in the form of
* reference type information.
*
* @return The collection of reference type info profiles
*/
def classes: Seq[ReferenceTypeInfo]
/**
* Retrieves a reference type profile for the given JDI reference type.
*
* @return The reference type info profile wrapping the JDI instance
*/
def `class`(referenceType: ReferenceType): ReferenceTypeInfo
/**
* Retrieves reference information for the class with the specified name.
*
* @param name The fully-qualified name of the class
* @return Success containing the reference type info profile for the class,
* otherwise a failure
*/
def tryClass(name: String): Try[ReferenceTypeInfo] = Try(`class`(name))
/**
* Retrieves reference information for the class with the specified name.
*
* @param name The fully-qualified name of the class
* @return The reference type info profile for the class
*/
def `class`(name: String): ReferenceTypeInfo = {
val c = classOption(name)
if (c.isEmpty)
throw new NoSuchElementException(s"Class with name '$name' not found!")
c.get
}
/**
* Retrieves reference information for the class with the specified name.
*
* @return Some reference type info profile for the class if found,
* otherwise None
*/
def classOption(name: String): Option[ReferenceTypeInfo] = {
classes.find(_.name == name)
}
/**
* Retrieves a field profile for the given JDI field.
*
* @param referenceType The reference type to associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return Success containing the variable profile representing the field,
* otherwise a failure
*/
def tryField(
referenceType: ReferenceType,
field: Field
): Try[FieldVariableInfo] = Try(this.field(referenceType, field))
/**
* Retrieves a field profile for the given JDI field.
*
* @param referenceType The reference type to associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return The variable profile representing the field
*/
def field(
referenceType: ReferenceType,
field: Field
): FieldVariableInfo
/**
* Retrieves a field profile for the given JDI field.
*
* @param referenceTypeInfo The information about the reference type to
* associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return Success containing the variable profile representing the field,
* otherwise a failure
*/
def tryField(
referenceTypeInfo: ReferenceTypeInfo,
field: Field
): Try[FieldVariableInfo] = Try(this.field(referenceTypeInfo, field))
/**
* Retrieves a field profile for the given JDI field.
*
* @param referenceTypeInfo The information about the reference type to
* associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return The variable profile representing the field
*/
def field(
referenceTypeInfo: ReferenceTypeInfo,
field: Field
): FieldVariableInfo =
this.field(referenceTypeInfo.toJdiInstance, field)
/**
* Retrieves a field profile for the given JDI field.
*
* @param objectReference The object reference to associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return Success containing the variable profile representing the field,
* otherwise a failure
*/
def tryField(
objectReference: ObjectReference,
field: Field
): Try[FieldVariableInfo] = Try(this.field(objectReference, field))
/**
* Retrieves a field profile for the given JDI field.
*
* @param objectReference The object reference to associate with the field
* @param field The JDI field with which to wrap in a variable info profile
* @return The variable profile representing the field
*/
def field(
objectReference: ObjectReference,
field: Field
): FieldVariableInfo
/**
* Retrieves a field profile for the given JDI field.
*
* @param objectInfo The information about the object to associate with
* the field
* @param field The JDI field with which to wrap in a variable info profile
* @return Success containing the variable profile representing the field,
* otherwise a failure
*/
def tryField(
objectInfo: ObjectInfo,
field: Field
): Try[FieldVariableInfo] = Try(this.field(objectInfo, field))
/**
* Retrieves a field profile for the given JDI field.
*
* @param objectInfo The information about the object to associate with
* the field
* @param field The JDI field with which to wrap in a variable info profile
* @return The variable profile representing the field
*/
def field(
objectInfo: ObjectInfo,
field: Field
): FieldVariableInfo = this.field(objectInfo.toJdiInstance, field)
/**
* Retrieves a local variable profile for the given JDI local variable.
*
* @param stackFrame The stack frame to associate with the
* local variable
* @param localVariable The JDI local variable with which to wrap in a
* variable info profile
* @return Success containing the variable profile representing the
* local variable, otherwise a failure
*/
def tryLocalVariable(
stackFrame: StackFrame,
localVariable: LocalVariable
): Try[VariableInfo] = Try(this.localVariable(
stackFrame, localVariable
))
/**
* Retrieves a localVariable profile for the given JDI local variable.
*
* @param stackFrame The stack frame to associate with the
* local variable
* @param localVariable The JDI local variable with which to wrap in a
* variable info profile
* @return The variable profile representing the local variable
*/
def localVariable(
stackFrame: StackFrame,
localVariable: LocalVariable
): VariableInfo
/**
* Retrieves a localVariable profile for the given JDI local variable.
*
* @param stackFrameInfo The information about the stack frame to
* associate with the localVariable
* @param localVariable The JDI local variable with which to wrap in a
* variable info profile
* @return Success containing the variable profile representing the
* local variable, otherwise a failure
*/
def tryLocalVariable(
stackFrameInfo: FrameInfo,
localVariable: LocalVariable
): Try[VariableInfo] = Try(this.localVariable(
stackFrameInfo, localVariable
))
/**
* Retrieves a localVariable profile for the given JDI local variable.
*
* @param stackFrameInfo The information about the stack frame to
* associate with the local variable
* @param localVariable The JDI local variable with which to wrap in a
* variable info profile
* @return The variable profile representing the local variable
*/
def localVariable(
stackFrameInfo: FrameInfo,
localVariable: LocalVariable
): VariableInfo = this.localVariable(
stackFrameInfo.toJdiInstance,
localVariable
)
/**
* Retrieves a location profile for the given JDI location.
*
* @param location The JDI location with which to wrap in a location
* info profile
* @return The new location info profile
*/
def location(location: Location): LocationInfo
/**
* Retrieves a method profile for the given JDI method.
*
* @param method The JDI method with which to wrap in a method info profile
* @return The new method info profile
*/
def method(method: Method): MethodInfo
/**
* Retrieves a stack frame profile for the given JDI stack frame.
*
* @param stackFrame The JDI stack frame with which to wrap in a
* frame info profile
* @return The new frame info profile
*/
def stackFrame(stackFrame: StackFrame): FrameInfo
/**
* Retrieves a type info profile for the given JDI type info.
*
* @param _type The JDI type with which to wrap in a type info profile
* @return The new type info profile
*/
def `type`(_type: Type): TypeInfo
/**
* Retrieves a value info profile for the given JDI value info.
*
* @param value The JDI value with which to wrap in a value info profile
* @return The new value info profile
*/
def value(value: Value): ValueInfo
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/traits/info/GrabInfoProfile.scala | Scala | apache-2.0 | 19,779 |
package com.themillhousegroup.mondrian
import reactivemongo.api._
import scala.concurrent.Future
import play.api.libs.json.{ JsValue, JsObject, Json }
import play.api.libs.concurrent.Execution.Implicits._
import scala.language.existentials
import reactivemongo.play.json.collection.JSONCollection
import reactivemongo.play.json._
import play.modules.reactivemongo.ReactiveMongoApi
import play.api.Logger
abstract class MongoService(collectionName: String) {
val reactiveMongoApi:ReactiveMongoApi
private val logger = Logger(classOf[MongoService])
implicit val defaultContext = play.api.libs.concurrent.Execution.defaultContext
val readPreference = ReadPreference.nearest
protected def theCollection: JSONCollection = reactiveMongoApi.db.collection[JSONCollection](collectionName)
protected val all = Json.obj()
protected def findWhere(jsQuery: JsValue, jsProjection:Option[JsValue] = None) = {
jsProjection.fold {
theCollection.find(jsQuery.as[JsObject])
} { jsProj =>
theCollection.find(jsQuery.as[JsObject], jsProj.as[JsObject])
}
}
protected def findAll = findWhere(all)
protected def findAllWithProjection(jsProjection:JsValue) = findWhere(all, Some(jsProjection))
def countWhere(jsQuery: JsValue):Future[Int] = theCollection.count(Some(jsQuery.as[JsObject]))
def countAll:Future[Int] = theCollection.count(None)
protected def idOf(id:String):JsValue = Json.obj("$oid" -> id)
protected def idSelector(id: String): JsObject = Json.obj("_id" -> idOf(id))
def deleteWhere(jsQuery: JsValue): Future[Boolean] = {
theCollection.remove(jsQuery.as[JsObject]).map(_.ok)
}
/**
* @param jsQuery the query that selects the objects to be deleted
* @return the overall success of the command, and the number that were actually deleted
*/
def deleteWhereAndCount(jsQuery: JsValue): Future[(Boolean, Int)] = {
theCollection.remove(jsQuery.as[JsObject]).map(wr => wr.ok -> wr.n)
}
/**
* @param id the identifier (in simple String form) of the object to be deleted
* @return true iff the operation succeeded AND exactly one object was deleted
*/
def deleteById(id: String): Future[Boolean] = deleteWhereAndCount(idSelector(id)).map { case (ok, n) =>
val deletedExactlyOne = ok && n == 1
logger.trace(s"Deletion status: OK: $ok Number actually deleted: $n; So returning $deletedExactlyOne")
deletedExactlyOne
}
}
| themillhousegroup/mondrian | src/main/scala/com/themillhousegroup/mondrian/MongoService.scala | Scala | mit | 2,427 |
/*
* JBoss, Home of Professional Open Source
* Copyright 2010 Red Hat Inc. and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.server.hotrod
import org.infinispan.manager.EmbeddedCacheManager
import test.HotRodTestingUtil._
import org.infinispan.server.hotrod.OperationStatus._
import org.testng.annotations.Test
import org.testng.Assert._
import org.infinispan.test.AbstractCacheTest._
import org.infinispan.configuration.cache.{CacheMode, ConfigurationBuilder}
/**
* Tests Hot Rod instances that are behind a proxy.
*
* @author Galder Zamarreño
* @since 4.1
*/
@Test(groups = Array("functional"), testName = "server.hotrod.HotRodProxyTest")
class HotRodProxyTest extends HotRodMultiNodeTest {
private val proxyHost1 = "1.2.3.4"
private val proxyHost2 = "2.3.4.5"
private val proxyPort1 = 8123
private val proxyPort2 = 9123
override protected def cacheName: String = "hotRodProxy"
override protected def createCacheConfig: ConfigurationBuilder = {
val config = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false)
config.clustering().stateTransfer().fetchInMemoryState(true)
config
}
override protected def protocolVersion = 10
override protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager) =
startHotRodServer(cacheManager, proxyHost1, proxyPort1)
override protected def startTestHotRodServer(cacheManager: EmbeddedCacheManager, port: Int) =
startHotRodServer(cacheManager, port, proxyHost2, proxyPort2)
def testTopologyWithProxiesReturned() {
val resp = clients.head.ping(2, 0)
assertStatus(resp, Success)
val topoResp = resp.asTopologyAwareResponse
assertEquals(topoResp.topologyId, currentServerTopologyId)
assertEquals(topoResp.members.size, 2)
topoResp.members.foreach(member => servers.map(_.getAddress).exists(_ == member))
}
}
| nmldiegues/stibt | infinispan/server/hotrod/src/test/scala/org/infinispan/server/hotrod/HotRodProxyTest.scala | Scala | apache-2.0 | 2,830 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2006-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package util.parsing.combinator
import java.util.regex.Pattern
import scala.util.matching.Regex
import scala.util.parsing.input._
import scala.collection.immutable.PagedSeq
import scala.language.implicitConversions
/** The ''most important'' differences between `RegexParsers` and
* [[scala.util.parsing.combinator.Parsers]] are:
*
* - `Elem` is defined to be [[scala.Char]]
* - There's an implicit conversion from [[java.lang.String]] to `Parser[String]`,
* so that string literals can be used as parser combinators.
* - There's an implicit conversion from [[scala.util.matching.Regex]] to `Parser[String]`,
* so that regex expressions can be used as parser combinators.
* - The parsing methods call the method `skipWhitespace` (defaults to `true`) and, if true,
* skip any whitespace before each parser is called.
* - Protected val `whiteSpace` returns a regex that identifies whitespace.
*
* For example, this creates a very simple calculator receiving `String` input:
*
* {{{
* object Calculator extends RegexParsers {
* def number: Parser[Double] = """\\d+(\\.\\d*)?""".r ^^ { _.toDouble }
* def factor: Parser[Double] = number | "(" ~> expr <~ ")"
* def term : Parser[Double] = factor ~ rep( "*" ~ factor | "/" ~ factor) ^^ {
* case number ~ list => (number /: list) {
* case (x, "*" ~ y) => x * y
* case (x, "/" ~ y) => x / y
* }
* }
* def expr : Parser[Double] = term ~ rep("+" ~ log(term)("Plus term") | "-" ~ log(term)("Minus term")) ^^ {
* case number ~ list => list.foldLeft(number) { // same as before, using alternate name for /:
* case (x, "+" ~ y) => x + y
* case (x, "-" ~ y) => x - y
* }
* }
*
* def apply(input: String): Double = parseAll(expr, input) match {
* case Success(result, _) => result
* case failure : NoSuccess => scala.sys.error(failure.msg)
* }
* }
* }}}
*/
trait RegexParsers extends Parsers {
type Elem = Char
protected val whiteSpace = """\\s+""".r
def skipWhitespace = whiteSpace.toString.length > 0
/** Method called to handle whitespace before parsers.
*
* It checks `skipWhitespace` and, if true, skips anything
* matching `whiteSpace` starting from the current offset.
*
* @param source The input being parsed.
* @param offset The offset into `source` from which to match.
* @return The offset to be used for the next parser.
*/
protected def handleWhiteSpace(source: java.lang.CharSequence, offset: Int): Int =
if (skipWhitespace)
(whiteSpace findPrefixMatchOf (new SubSequence(source, offset))) match {
case Some(matched) => offset + matched.end
case None => offset
}
else
offset
/** A parser that matches a literal string */
implicit def literal(s: String): Parser[String] = new Parser[String] {
def apply(in: Input) = {
val source = in.source
val offset = in.offset
val start = handleWhiteSpace(source, offset)
var i = 0
var j = start
while (i < s.length && j < source.length && s.charAt(i) == source.charAt(j)) {
i += 1
j += 1
}
if (i == s.length)
Success(source.subSequence(start, j).toString, in.drop(j - offset))
else {
val found = if (start == source.length()) "end of source" else "`"+source.charAt(start)+"'"
Failure("`"+s+"' expected but "+found+" found", in.drop(start - offset))
}
}
}
/** A parser that matches a regex string */
implicit def regex(r: Regex): Parser[String] = new Parser[String] {
def apply(in: Input) = {
val source = in.source
val offset = in.offset
val start = handleWhiteSpace(source, offset)
(r findPrefixMatchOf (new SubSequence(source, start))) match {
case Some(matched) =>
Success(source.subSequence(start, start + matched.end).toString,
in.drop(start + matched.end - offset))
case None =>
val found = if (start == source.length()) "end of source" else "`"+source.charAt(start)+"'"
Failure("string matching regex `"+r+"' expected but "+found+" found", in.drop(start - offset))
}
}
}
/** `positioned` decorates a parser's result with the start position of the input it consumed.
* If whitespace is being skipped, then it is skipped before the start position is recorded.
*
* @param p a `Parser` whose result conforms to `Positional`.
* @return A parser that has the same behaviour as `p`, but which marks its result with the
* start position of the input it consumed after whitespace has been skipped, if it
* didn't already have a position.
*/
override def positioned[T <: Positional](p: => Parser[T]): Parser[T] = {
val pp = super.positioned(p)
new Parser[T] {
def apply(in: Input) = {
val offset = in.offset
val start = handleWhiteSpace(in.source, offset)
pp(in.drop (start - offset))
}
}
}
/**
* A parser generator delimiting whole phrases (i.e. programs).
*
* `phrase(p)` succeeds if `p` succeeds and no input is left over after `p`.
*
* @param p the parser that must consume all input for the resulting parser
* to succeed.
*
* @return a parser that has the same result as `p`, but that only succeeds
* if `p` consumed all the input.
*/
override def phrase[T](p: Parser[T]): Parser[T] =
super.phrase(p <~ "".r)
/** Parse some prefix of reader `in` with parser `p`. */
def parse[T](p: Parser[T], in: Reader[Char]): ParseResult[T] =
p(in)
/** Parse some prefix of character sequence `in` with parser `p`. */
def parse[T](p: Parser[T], in: java.lang.CharSequence): ParseResult[T] =
p(new CharSequenceReader(in))
/** Parse some prefix of reader `in` with parser `p`. */
def parse[T](p: Parser[T], in: java.io.Reader): ParseResult[T] =
p(new PagedSeqReader(PagedSeq.fromReader(in)))
/** Parse all of reader `in` with parser `p`. */
def parseAll[T](p: Parser[T], in: Reader[Char]): ParseResult[T] =
parse(phrase(p), in)
/** Parse all of reader `in` with parser `p`. */
def parseAll[T](p: Parser[T], in: java.io.Reader): ParseResult[T] =
parse(phrase(p), in)
/** Parse all of character sequence `in` with parser `p`. */
def parseAll[T](p: Parser[T], in: java.lang.CharSequence): ParseResult[T] =
parse(phrase(p), in)
}
| KhanSuleyman/scala-parser-combinators | src/main/scala/scala/util/parsing/combinator/RegexParsers.scala | Scala | bsd-3-clause | 7,000 |
package coursier.publish.fileset
import java.time.Instant
import coursier.publish.Content
import coursier.core.{ModuleName, Organization}
import coursier.publish.Pom.{Developer, License}
import coursier.util.Task
import scala.collection.compat._
final case class FileSet(elements: Seq[(Path, Content)]) {
def ++(other: FileSet): FileSet = {
// complexity possibly not too optimal… (removeAll iterates on all elements)
val cleanedUp = other.elements.map(_._1).foldLeft(this)(_.removeAll(_))
FileSet(cleanedUp.elements ++ other.elements)
}
def filterOutExtension(extension: String): FileSet = {
val suffix = "." + extension
FileSet(elements.filter(_._1.elements.lastOption.forall(!_.endsWith(suffix))))
}
def isEmpty: Boolean =
elements.isEmpty
/** Removes anything looking like a checksum or signature related to `path` */
def removeAll(path: Path): FileSet = {
val prefix = path.repr + "."
val (remove, keep) = elements.partition {
case (p, _) =>
p == path || p.repr.startsWith(prefix)
}
if (remove.isEmpty)
this
else
FileSet(keep)
}
def update(path: Path, content: Content): FileSet =
++(FileSet(Seq(path -> content)))
def updateMetadata(
org: Option[Organization],
name: Option[ModuleName],
version: Option[String],
licenses: Option[Seq[License]],
developers: Option[Seq[Developer]],
homePage: Option[String],
gitDomainPath: Option[(String, String)],
distMgmtRepo: Option[(String, String, String)],
now: Instant
): Task[FileSet] = {
val split = Group.split(this)
val adjustOrgName =
if (org.isEmpty && name.isEmpty)
Task.point(split)
else {
val map = split.map {
case m: Group.Module =>
(m.organization, m.name) -> (org.getOrElse(m.organization), name.getOrElse(m.name))
case m: Group.MavenMetadata =>
(m.organization, m.name) -> (org.getOrElse(m.organization), name.getOrElse(m.name))
}.toMap
Task.gather.gather {
split.map { m =>
m.transform(map, now)
}
}
}
val adjustVersion: Task[Seq[Group]] =
version match {
case Some(ver) =>
adjustOrgName.flatMap { groups =>
val map = groups
.collect {
case m: Group.Module => (m.organization, m.name) -> (m.version -> ver)
}
.toMap
Task.sync.gather {
groups.map { group =>
group.transformVersion(map, now)
}
}
}
case None =>
adjustOrgName
}
adjustVersion.flatMap { l =>
Task.gather.gather {
l.map {
case m: Group.Module =>
m.updateMetadata(
org,
name,
version,
licenses,
developers,
homePage,
gitDomainPath,
distMgmtRepo,
now
)
case m: Group.MavenMetadata =>
m.updateContent(
org,
name,
version,
version.filter(!_.endsWith("SNAPSHOT")),
version.toSeq,
now
)
}
}
}.flatMap { groups =>
Group.merge(groups) match {
case Left(e) => Task.fail(new Exception(e))
case Right(fs) => Task.point(fs)
}
}
}
def order: Task[FileSet] = {
val split = Group.split(this)
def order(m: Map[Group.Module, Seq[coursier.core.Module]]): Stream[Group.Module] =
if (m.isEmpty)
Stream.empty
else {
val (now, later) = m.partition(_._2.isEmpty)
if (now.isEmpty)
// FIXME Report that properly
throw new Exception(s"Found cycle in input modules\\n$m")
val prefix = now
.keys
.toVector
.sortBy(_.module.toString) // sort to make output deterministic
.toStream
val done = now.keySet.map(_.module)
val later0 = later.mapValues(_.filterNot(done)).iterator.toMap
prefix #::: order(later0)
}
val sortedModulesTask = Task.gather
.gather {
split.collect {
case m: Group.Module =>
m.dependenciesOpt.map((m, _))
}
}
.map { l =>
val m = l.toMap
val current = m.keySet.map(_.module)
val interDependencies = m.mapValues(_.filter(current)).iterator.toMap
order(interDependencies).toVector
}
val mavenMetadataMap = split
.collect {
case m: Group.MavenMetadata =>
m.module -> m
}
.toMap // shouldn't discard values… assert it?
sortedModulesTask.map { sortedModules =>
val modules = sortedModules.map(_.module).toSet
val unknownMavenMetadata = mavenMetadataMap
.view
.filterKeys(!modules(_))
.map(_._2)
.toVector
.sortBy(_.module.toString) // sort to make output deterministic
val modulesWithMavenMetadata = sortedModules.flatMap { m =>
m +: mavenMetadataMap.get(m.module).toSeq
}
val sortedGroups = (modulesWithMavenMetadata ++ unknownMavenMetadata)
.map(_.ordered)
Group.mergeUnsafe(sortedGroups)
}
}
}
object FileSet {
val empty = FileSet(Nil)
}
| alexarchambault/coursier | modules/publish/src/main/scala/coursier/publish/fileset/FileSet.scala | Scala | apache-2.0 | 5,418 |
package io.gatling.amqp.data
// data for basicConsume(java.lang.String queue, boolean autoAck, java.lang.String consumerTag, boolean noLocal, boolean exclusive, java.util.Map<java.lang.String,java.lang.Object> arguments, Consumer callback)
case class ConsumeRequest(
queue: String,
autoAck: Boolean
) extends AmqpRequest
object ConsumeRequest {
}
| maiha/gatling-amqp | src/main/scala/io/gatling/amqp/data/ConsumeRequest.scala | Scala | mit | 354 |
import stainless.annotation._
import stainless.collection._
import stainless.lang._
import stainless.lang.Option._
import stainless.lang.StaticChecks._
import stainless.proof.check
object TaskParallelExample {
@mutable abstract class Task {
@ghost def readSet: Set[AnyHeapRef]
@ghost def writeSet: Set[AnyHeapRef] = { ??? : Set[AnyHeapRef] } ensuring (_.subsetOf(readSet))
def run(): Unit = {
reads(readSet)
modifies(writeSet)
??? : Unit
}
}
def parallel(task1: Task, task2: Task): Unit = {
reads(task1.readSet ++ task2.readSet)
modifies(task1.writeSet ++ task2.writeSet)
require(
(task1.writeSet & task2.readSet).isEmpty &&
(task2.writeSet & task1.readSet).isEmpty
)
task1.run()
task2.run()
// task1 and task2 join before this function returns
}
case class IntBox(var value: Int) extends AnyHeapRef
case class IncTask(box: IntBox) extends Task {
@ghost override def readSet: Set[AnyHeapRef] = Set[AnyHeapRef](box)
@ghost override def writeSet: Set[AnyHeapRef] = Set[AnyHeapRef](box)
@opaque
override def run(): Unit = {
reads(readSet)
modifies(writeSet)
box.value = (box.value & ((1 << 30) - 1)) + 1
}
}
def parallelInc(box1: IntBox, box2: IntBox): Unit = {
reads(Set(box1, box2))
modifies(Set(box1, box2))
require(box1 != box2)
val task1 = IncTask(box1)
val task2 = IncTask(box2)
parallel(task1, task2)
}
}
| epfl-lara/stainless | frontends/benchmarks/full-imperative/valid/TaskParallel.scala | Scala | apache-2.0 | 1,471 |
package org.fedoraproject.mobile.util
import java.security.MessageDigest
object Hashing {
def md5(s: String): String =
MessageDigest
.getInstance("MD5")
.digest(s.getBytes)
.map("%02x".format(_))
.mkString
}
| fedora-infra/mobile | src/main/scala/util/Hashing.scala | Scala | mpl-2.0 | 240 |
package org.vaadin.addons.vaactor
import Forwarder._
import TestComponent._
import TestServlet._
import TestUI._
import akka.actor.{ ActorIdentity, ActorRef, Identify }
class VaactorSpec extends WebBrowserSpec {
var forwarder: ActorRef = _
"remote ActorSystem should be found" in {
VaactorServlet.system.actorSelection(ForwarderPath) ! Identify("")
val id = expectMsgType[ActorIdentity]
id.ref should not be None
id.ref.get.path.toString shouldBe ForwarderPath
forwarder = id.ref.get // store for later use
forwarder ! Register(TestActorName)
}
"remote Vaactor Actor should" - {
"be created" in {
forwarder ! Lookup(VaactorActorName)
val reg = expectMsgType[Registered]
reg.name shouldBe VaactorActorName
reg.actor.path.toString should startWith(RemoteSystemPath + "/user/ui/ui-UiActor-")
reg.actor.path.toString should include("-VaactorProxyActor-")
}
}
"remote Vaactor should" - {
"set SessionState on ButtonClick" in {
val testState = "$Hurzi"
forwarder ! Lookup(SessionActorName)
val reg = expectMsgType[Registered]
reg.actor ! VaactorSession.RequestSessionState
expectMsgType[SessionState] shouldBe EmptySessionState
textField(CompTextName).value = testState
click on CompButtonName
Thread.sleep(100) // maybe Click needs some time
reg.actor ! VaactorSession.RequestSessionState
expectMsgType[SessionState] shouldBe SessionState(testState)
}
"respond with Text-Content on RequestText(sender)" in {
val testContent = "$Quaxi"
forwarder ! Lookup(VaactorActorName)
val reg = expectMsgType[Registered]
textField(CompTextName).value = testContent
click on CompButtonName // seems to trigger Vaadin transport
Thread.sleep(100) // maybe Click needs some time
reg.actor ! RequestText(self)
expectMsgType[ReplyText] shouldBe ReplyText(testContent)
lastSender shouldBe reg.actor
}
"respond with Text-Content on RequestText" in {
val testContent = "$Murksi"
forwarder ! Lookup(VaactorActorName)
val reg = expectMsgType[Registered]
textField(CompTextName).value = testContent
click on CompButtonName // seems to trigger Vaadin transport
Thread.sleep(100) // maybe Click needs some time
reg.actor ! RequestText
expectMsgType[ReplyText] shouldBe ReplyText(testContent)
lastSender shouldBe reg.actor
}
}
"remote VaactorComponentActor should" - {
"be created and terminated" in {
val testContent = "$Quaxi-dyn"
click on AddComponentButtonName
forwarder ! Lookup(VaactorActorName + NameSuffix)
val reg = expectMsgType[Registered]
textField(CompTextName + NameSuffix).value = testContent
click on CompButtonName + NameSuffix // seems to trigger Vaadin transport
reg.actor ! RequestText(self)
expectMsgType[ReplyText] shouldBe ReplyText(testContent)
lastSender shouldBe reg.actor
click on RemoveComponentButtonName
reg.actor ! RequestText(self)
expectNoMessage(noMessageWait)
}
"subscribe and unsubscribe" in {
forwarder ! Lookup(SessionActorName)
val reg = expectMsgType[Registered]
reg.actor ! Attach
expectMsgType[Attach.type]
click on AddSubscriberButtonName
expectMsgType[Attach.type]
click on RemoveSubscriberButtonName
expectMsgType[Detach.type]
}
}
}
| otto-ringhofer/vaactor | test/src/test/scala/org/vaadin/addons/vaactor/VaactorSpec.scala | Scala | apache-2.0 | 3,463 |
/**
* Created by yangguo on 15/12/3.
*/
object StartHe {
def toHexString(str:String):Int=12
}
| guoyang2011/flashbird | src/test/scala-2.10/StartHe.scala | Scala | apache-2.0 | 99 |
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
class CsvMacros(val c: Context) {
import c.universe._
def csvFormatMacro[A: c.WeakTypeTag] = {
val tpe = c.weakTypeOf[A]
val subseqs = tpe.decls collect {
case method: MethodSymbol if method.isCaseAccessor =>
q"implicitly[CsvFormat[${method.returnType}]].apply(value.${method.name})"
}
val appended =
subseqs.reduceLeft((a, b) => q"$a ++ $b")
q"""
new CsvFormat[$tpe] {
def apply(value: $tpe) = $appended
}
"""
}
}
| dbathily/essential-macros | csv/lib/src/main/scala/CsvMacros.scala | Scala | apache-2.0 | 572 |
/*
* This file is part of Sloth, an SMT solver for strings.
* Copyright (C) 2017 Philipp Ruemmer, Petr Janku
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
package strsolver
import ap.terfor.Term
object StringTheoryUtil {
private var _numOfVariables: Int = -1;
def setVariables(v: Int): Unit = _numOfVariables = v
def getVariable: Int = {
_numOfVariables += 1; _numOfVariables
}
private def createUnicodeChar(num: Int): String = {
"\\\\" + new String(Character.toChars(num))
}
private def getString(str: String) =
"""\\\\\\\\\\\\\\\\\\\\x\\\\([0-9a-f])\\\\([0-9a-f])""".r.replaceAllIn(str, { m =>
"\\\\" + Integer.parseInt(m.group(1) + m.group(2), 16).toChar
})
private def createString(list: List[Either[Int, Term]]): String = list match {
case Nil => ""
case x :: xs => (x match {
case Left(c) if (c > 39 && c < 44) ||
(c > 90 && c < 95) ||
c == 45 ||
c == 46 ||
c == 63 ||
c == 124 => c.toChar
case Left(c) => "\\\\" + c.toChar//createUnicodeChar(c)
case Right(s) => throw new Exception("Don't know how to handle " + s + " in createString")
}) + createString(xs)
}
def createString(it: Iterator[List[Either[Int, Term]]]): String = {
if (it.hasNext) {
val str = createString(it.next())
"""^\\\\/(?s)(.*)\\\\/$""".r.findFirstMatchIn(str) match {
case None =>
str
case Some(s) =>
s.group(1)
}
} else
""
}
} | uuverifiers/sloth | src/main/scala/StringTheoryUtil.scala | Scala | gpl-3.0 | 2,157 |
// Databricks notebook source exported at Sun, 19 Jun 2016 02:24:27 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
// COMMAND ----------
// MAGIC %md
// MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/xtraResources/LinearAlgebra/LAlgCheatSheet.html) of this databricks HOMEWORK notebook and its recorded Uji :
// MAGIC
// MAGIC [](https://www.youtube.com/v/y6F-e6m1m2s?rel=0&autoplay=1&modestbranding=1&start=1547&end=1673)
// COMMAND ----------
// MAGIC %md
// MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/week5/09_LinearAlgebraIntro/017_LAlgIntro.html) of the context/parent databricks notebook (for this databricks HOMEWORK notebook) and its recorded Uji :
// MAGIC
// MAGIC [](https://www.youtube.com/v/y6F-e6m1m2s?rel=0&autoplay=1&modestbranding=1&start=0&end=2634)
// COMMAND ----------
// MAGIC %md
// MAGIC This is from
// MAGIC * [https://github.com/scalanlp/breeze/wiki/Linear-Algebra-Cheat-Sheet](https://github.com/scalanlp/breeze/wiki/Linear-Algebra-Cheat-Sheet)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Core Concepts
// MAGIC
// MAGIC Compared to other numerical computing environments, Breeze matrices
// MAGIC default to column major ordering, like Matlab, but indexing is 0-based,
// MAGIC like Numpy. Breeze has as its core concepts matrices and column vectors.
// MAGIC Row vectors are normally stored as matrices with a single row. This
// MAGIC allows for greater type safety with the downside that conversion of row
// MAGIC vectors to column vectors is performed using a transpose-slice
// MAGIC (`a.t(::,0)`) instead of a simple transpose (`a.t`).
// MAGIC
// MAGIC [[UFunc|Universal Functions]]s are very important in Breeze. Once you get a feel for the syntax (i.e. what's in this section), it might be worthwhile to read the first half of the UFunc wiki page. (You can skip the last half that involves implementing your own UFuncs...until you're ready to contribute to Breeze!)
// COMMAND ----------
// MAGIC %md
// MAGIC ## Quick Reference
// MAGIC
// MAGIC The following table assumes that Numpy is used with `from numpy import *` and Breeze with:
// COMMAND ----------
import breeze.linalg._
import breeze.numerics._
// COMMAND ----------
// MAGIC %md
// MAGIC ### Creation
// MAGIC
// MAGIC | Operation | Breeze | Matlab | Numpy |R
// MAGIC | ------------------------------- | --------------------------------------------- | ----------------- | ----------------|-------------------
// MAGIC | Zeroed matrix | `DenseMatrix.zeros[Double](n,m)` | `zeros(n,m)` | `zeros((n,m))` |`mat.or.vec(n, m)`
// MAGIC | Zeroed vector | `DenseVector.zeros[Double](n)` | `zeros(n,1)` | `zeros(n)` |`mat.or.vec(n, 1)`
// MAGIC | Vector of ones | `DenseVector.ones[Double](n)` | `ones(n,1)` | `ones(n)` |`mat.or.vec(n, 1) + 1`
// MAGIC | Vector of particular number | `DenseVector.fill(n){5.0}` | `ones(n,1) * 5` | `ones(n) * 5` |`(mat.or.vec(5, 1) + 1) * 5`
// MAGIC | range given stepsize | `DenseVector.range(start,stop,step)` or `Vector.rangeD(start,stop,step)` | ||`seq(start,stop,step)`
// MAGIC | n element range | `linspace(start,stop,numvals)` | `linspace(0,20,15)` | ||
// MAGIC | Identity matrix | `DenseMatrix.eye[Double](n)` | `eye(n)` | `eye(n)` |`identity(n)`
// MAGIC | Diagonal matrix | `diag(DenseVector(1.0,2.0,3.0))` | `diag([1 2 3])` | `diag((1,2,3))` |`diag(c(1,2,3))`
// MAGIC | Matrix inline creation | `DenseMatrix((1.0,2.0), (3.0,4.0))` | `[1 2; 3 4]` | `array([ [1,2], [3,4] ])` |`matrix(c(1,2,3,4), nrow = 2, ncol = 2)`
// MAGIC | Column vector inline creation | `DenseVector(1,2,3,4)` | `[1 2 3 4]` | `array([1,2,3,4])`|`c(1,2,3,4)`
// MAGIC | Row vector inline creation | `DenseVector(1,2,3,4).t` | `[1 2 3 4]'` | `array([1,2,3]).reshape(-1,1)` |`t(c(1,2,3,4))`
// MAGIC | Vector from function | `DenseVector.tabulate(3){i => 2*i}` | | ||
// MAGIC | Matrix from function | `DenseMatrix.tabulate(3, 2){case (i, j) => i+j}` | | ||
// MAGIC | Vector creation from array | `new DenseVector(Array(1, 2, 3, 4))` | | ||
// MAGIC | Matrix creation from array | `new DenseMatrix(2, 3, Array(11, 12, 13, 21, 22, 23))` | | ||
// MAGIC | Vector of random elements from 0 to 1 | `DenseVector.rand(4)` | | |`runif(4)` (requires stats library)
// MAGIC | Matrix of random elements from 0 to 1 | `DenseMatrix.rand(2, 3)` | | |`matrix(runif(6),2)` (requires stats library)
// COMMAND ----------
DenseMatrix.zeros[Double](2,3)
// COMMAND ----------
// MAGIC %py
// MAGIC import numpy as np
// COMMAND ----------
// MAGIC %py
// MAGIC np.zeros((2,3))
// COMMAND ----------
// MAGIC %r
// MAGIC mat.or.vec(2,3)
// COMMAND ----------
// MAGIC %md
// MAGIC #### Reading and writing Matrices
// MAGIC
// MAGIC Currently, Breeze supports IO for Matrices in two ways: Java serialization and csv. The latter comes from two functions: `breeze.linalg.csvread` and `breeze.linalg.csvwrite`. `csvread` takes a File, and optionally parameters for how the CSV file is delimited (e.g. if it is actually a tsv file, you can set tabs as the field delimiter.) and returns a [DenseMatrix](Data-Structures#densematrix). Similarly, `csvwrite` takes a File and a DenseMatrix, and writes the contents of a matrix to a file.
// MAGIC
// MAGIC ### Indexing and Slicing
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |--------------------------|-------------------------------------------------|-------------|-------------|-----------
// MAGIC |Basic Indexing |`a(0,1)` |`a(1,2)` |`a[0,1]` |`a[1,2]`
// MAGIC |Extract subset of vector |`a(1 to 4)` or `a(1 until 5)` or `a.slice(1,5)` |`a(2:5)` |`a[1:5]` |`a[2:5]`
// MAGIC |(negative steps) |`a(5 to 0 by -1)` |`a(6:-1:1)` |`a[5:0:-1]` |
// MAGIC |(tail) |`a(1 to -1)` |`a(2:end)` |`a[1:]` |`a[2:length(a)]` or ` tail(a,n=length(a)-1)`
// MAGIC |(last element) |`a( -1 )` |`a(end)` |`a[-1]` |`tail(a, n=1)`
// MAGIC |Extract column of matrix |`a(::, 2)` |`a(:,3)` |`a[:,2]` |`a[,2]`
// COMMAND ----------
val matrix = DenseMatrix.rand(2, 3)
// COMMAND ----------
val two_one = matrix(1, 0) // Remember the index starts from zero
// COMMAND ----------
// MAGIC %md
// MAGIC ### Other Manipulation
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |--------------------------------|-----------------------------------------|--------------------|-------------------------------|-------------
// MAGIC |Reshaping |`a.reshape(3, 2)` |`reshape(a, 3, 2)` |`a.reshape(3,2)` |`matrix(a,nrow=3,byrow=T)`
// MAGIC |Flatten matrix |`a.toDenseVector` (Makes copy) |`a(:)` |`a.flatten()` |`as.vector(a)`
// MAGIC |Copy lower triangle |`lowerTriangular(a)` |`tril(a)` |`tril(a)` |`a[upper.tri(a)] <- 0`
// MAGIC |Copy upper triangle |`upperTriangular(a)` |`triu(a)` |`triu(a)` |`a[lower.tri(a)] <- 0`
// MAGIC |Copy (note, no parens!!) |`a.copy` | |`np.copy(a)` |
// MAGIC |Create view of matrix diagonal |`diag(a)` |NA |`diagonal(a)` (Numpy >= 1.9) |
// MAGIC |Vector Assignment to subset |`a(1 to 4) := 5.0` |`a(2:5) = 5` |`a[1:4] = 5` |`a[2:5] = 5`
// MAGIC |Vector Assignment to subset |`a(1 to 4) := DenseVector(1.0,2.0,3.0)` |`a(2:5) = [1 2 3]` |`a[1:4] = array([1,2,3])` |`a[2:5] = c(1,2,3)`
// MAGIC |Matrix Assignment to subset |`a(1 to 3,1 to 3) := 5.0` |`a(2:4,2:4) = 5` |`a[1:3,1:3] = 5` |`a[2:4,2:4] = 5`
// MAGIC |Matrix Assignment to column |`a(::, 2) := 5.0` |`a(:,3) = 5` |`a[:,2] = 5` |`a[,3] = 5`
// MAGIC |Matrix vertical concatenate |`DenseMatrix.vertcat(a,b)` |`[a ; b]` |`vstack((a,b))` |`rbind(a, b)`
// MAGIC |Matrix horizontal concatenate |`DenseMatrix.horzcat(d,e)` |`[d , e]` |`hstack((d,e))` |`cbind(d, e)`
// MAGIC |Vector concatenate |`DenseVector.vertcat(a,b)` |`[a b]` |`concatenate((a,b))` |`c(a, b)`
// MAGIC
// MAGIC ### Operations
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |------------------------------------|-------------|-------------------------------------------------------|-------------|---------------------
// MAGIC |Elementwise addition |`a + b` |`a + b` |`a + b` |`a + b`
// MAGIC |Shaped/Matrix multiplication |`a * b` |`a * b` |`dot(a, b)` |`a %*% b`
// MAGIC |Elementwise multiplication |`a :* b` |`a .* b` |`a * b` |`a * b`
// MAGIC |Elementwise division |`a :/ b` |`a ./ b` |`a / b` |`a / b`
// MAGIC |Elementwise comparison |`a :< b` |`a < b` (gives matrix of 1/0 instead of true/false) |`a < b` |`a < b`
// MAGIC |Elementwise equals |`a :== b` |`a == b` (gives matrix of 1/0 instead of true/false) |`a == b` |`a == b`
// MAGIC |Inplace addition |`a :+= 1.0` |`a += 1` |`a += 1` |`a = a + 1`
// MAGIC |Inplace elementwise multiplication |`a :*= 2.0` |`a *= 2` |`a *= 2` |`a = a * 2`
// MAGIC |Vector dot product |`a dot b`, `a.t * b`<sup>†</sup> |`dot(a,b)` |`dot(a,b)` |`crossprod(a,b)`
// MAGIC |Elementwise max |`max(a)` |`max(a)` |`a.max()` |`max(a)`
// MAGIC |Elementwise argmax |`argmax(a)` |`[v i] = max(a); i` |`a.argmax()` |`which.max(a)`
// MAGIC
// MAGIC
// MAGIC ### Sum
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |----------------------------------------------|--------------------------------------|----------------|--------------|----------
// MAGIC |Elementwise sum |`sum(a)` |`sum(sum(a))` |`a.sum()` |`sum(a)`
// MAGIC |Sum down each column (giving a row vector) |`sum(a, Axis._0)` or `sum(a(::, *))` |`sum(a)` |`sum(a,0)` |`apply(a,2,sum)`
// MAGIC |Sum across each row (giving a column vector) |`sum(a, Axis._1)` or `sum(a(*, ::))` |`sum(a')` |`sum(a,1)` |`apply(a,1,sum)`
// MAGIC |Trace (sum of diagonal elements) |`trace(a)` |`trace(a)` |`a.trace()` |`sum(diag(a))`
// MAGIC |Cumulative sum |`accumulate(a)` |`cumsum(a)` |`a.cumsum()` |`apply(a,2,cumsum)`
// MAGIC
// MAGIC ### Boolean Operators
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |----------------------------------------------|--------------------------------------|------------------|------------|--------
// MAGIC |Elementwise and |`a :& b` |`a && b` |`a & b` |`a & b`
// MAGIC |Elementwise or |`a :| b` |`a || b` |`a | b` |`a | b`
// MAGIC |Elementwise not |`!a` |`~a` |`~a` |`!a`
// MAGIC |True if any element is nonzero |`any(a)` |`any(a)` |any(a) |
// MAGIC |True if all elements are nonzero |`all(a)` |`all(a)` |all(a) |
// MAGIC
// MAGIC ### Linear Algebra Functions
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |----------------------------------------------|-------------------------------------------------------------|------------------|--------------------|-----------------
// MAGIC |Linear solve |`a \\ b` |`a \\ b` |`linalg.solve(a,b)` |`solve(a,b)`
// MAGIC |Transpose |`a.t` |`a'` |`a.conj.transpose()`|`t(a)`
// MAGIC |Determinant |`det(a)` |`det(a)` |`linalg.det(a)` |`det(a)`
// MAGIC |Inverse |`inv(a)` |`inv(a)` |`linalg.inv(a)` |`solve(a)`
// MAGIC |Moore-Penrose Pseudoinverse |`pinv(a)` |`pinv(a)` |`linalg.pinv(a)` |
// MAGIC |Vector Frobenius Norm |`norm(a)` |`norm(a)` |`norm(a)` |
// MAGIC |Eigenvalues (Symmetric) |`eigSym(a)` |`[v,l] = eig(a)` |`linalg.eig(a)[0]` |
// MAGIC |Eigenvalues |`val (er, ei, _) = eig(a)` (separate real & imaginary part) |`eig(a)` |`linalg.eig(a)[0]` |`eigen(a)$values`
// MAGIC |Eigenvectors |`eig(a)._3` |`[v,l] = eig(a)` |`linalg.eig(a)[1]` |`eigen(a)$vectors`
// MAGIC |Singular Value Decomposition |`val svd.SVD(u,s,v) = svd(a)` |`svd(a)` |`linalg.svd(a)` |`svd(a)$d`
// MAGIC |Rank |`rank(a)` |`rank(a)` |`rank(a)` |`rank(a)`
// MAGIC |Vector length |`a.length` |`size(a)` |`a.size` |`length(a)`
// MAGIC |Matrix rows |`a.rows` |`size(a,1)` |`a.shape[0]` |`nrow(a)`
// MAGIC |Matrix columns |`a.cols` |`size(a,2)` |`a.shape[1]` |`ncol(a)`
// MAGIC
// MAGIC ### Rounding and Signs
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |------------------------------------|-------------|----------------|-------------|--------------
// MAGIC |Round |`round(a)` |`round(a)` |`around(a)` |`round(a)`
// MAGIC |Ceiling |`ceil(a)` |`ceil(a)` |`ceil(a)` |`ceiling(a)`
// MAGIC |Floor |`floor(a)` |`floor(a)` |`floor(a)` |`floor(a)`
// MAGIC |Sign |`signum(a)` |`sign(a)` |`sign(a)` |`sign(a)`
// MAGIC |Absolute Value |`abs(a)` |`abs(a)` |`abs(a)` |`abs(a)`
// MAGIC
// MAGIC ### Constants
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |--------------|----------------|----------|----------|-----------
// MAGIC |Not a Number |`NaN` or `nan` |`NaN` |`nan` |`NA`
// MAGIC |Infinity |`Inf` or `inf` |`Inf` |`inf` |`Inf`
// MAGIC |Pi |`Constants.Pi` |`pi` |`math.pi` |`pi`
// MAGIC |e |`Constants.E` |`exp(1)` |`math.e` |`exp(1)`
// MAGIC
// MAGIC
// MAGIC ## Complex numbers
// MAGIC
// MAGIC If you make use of complex numbers, you will want to include a
// MAGIC `breeze.math._` import. This declares a `i` variable, and provides
// MAGIC implicit conversions from Scala’s basic types to complex types.
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |-------------------|-----------------------------|-----------|-----------------------------|------------
// MAGIC |Imaginary unit |`i` |`i` |`z = 1j` |`1i`
// MAGIC |Complex numbers |`3 + 4 * i` or `Complex(3,4)`|`3 + 4i` |`z = 3 + 4j` |`3 + 4i`
// MAGIC |Absolute Value |`abs(z)` or `z.abs` |`abs(z)` |`abs(z)` |`abs(z)`
// MAGIC |Real Component |`z.real` |`real(z)` |`z.real` |`Re(z)`
// MAGIC |Imaginary Component|`z.imag` |`imag(z)` |`z.imag()` |`Im(z)`
// MAGIC |Imaginary Conjugate|`z.conjugate` |`conj(z)` |`z.conj()` or `z.conjugate()`|`Conj(z)`
// MAGIC
// MAGIC ## Numeric functions
// MAGIC
// MAGIC Breeze contains a fairly comprehensive set of special functions under
// MAGIC the `breeze.numerics._` import. These functions can be applied to single
// MAGIC elements, vectors or matrices of Doubles. This includes versions of the
// MAGIC special functions from `scala.math` that can be applied to vectors and
// MAGIC matrices. Any function acting on a basic numeric type can “vectorized”,
// MAGIC to a [[UFunc|Universal Functions]] function, which can act elementwise on vectors and matrices:
// MAGIC ```scala
// MAGIC val v = DenseVector(1.0,2.0,3.0)
// MAGIC exp(v) // == DenseVector(2.7182818284590455, 7.38905609893065, 20.085536923187668)
// MAGIC ```
// MAGIC
// MAGIC UFuncs can also be used in-place on Vectors and Matrices:
// MAGIC ```scala
// MAGIC val v = DenseVector(1.0,2.0,3.0)
// MAGIC exp.inPlace(v) // == DenseVector(2.7182818284590455, 7.38905609893065, 20.085536923187668)
// MAGIC ```
// MAGIC
// MAGIC See [[Universal Functions]] for more information.
// MAGIC
// MAGIC Here is a (non-exhaustive) list of UFuncs in Breeze:
// MAGIC
// MAGIC ### Trigonometry
// MAGIC * `sin`, `sinh`, `asin`, `asinh`
// MAGIC * `cos`, `cosh`, `acos`, `acosh`
// MAGIC * `tan`, `tanh`, `atan`, `atanh`
// MAGIC * `atan2`
// MAGIC * `sinc(x) == sin(x)/x`
// MAGIC * `sincpi(x) == sinc(x * Pi)`
// MAGIC
// MAGIC ### Logarithm, Roots, and Exponentials
// MAGIC * `log`, `exp` `log10`
// MAGIC * `log1p`, `expm1`
// MAGIC * `sqrt`, `sbrt`
// MAGIC * `pow`
// MAGIC
// MAGIC ### Gamma Function and its cousins
// MAGIC
// MAGIC The [gamma function](http://en.wikipedia.org/wiki/Gamma_function) is the extension of the factorial function to the reals.
// MAGIC Numpy needs `from scipy.special import *` for this and subsequent sections.
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |------------------------------------|--------------------|------------------------|------------------------|----------------
// MAGIC |Gamma function |`exp(lgamma(a))` |`gamma(a)` |`gamma(a)` |`gamma(a)`
// MAGIC |log Gamma function |`lgamma(a)` |`gammaln(a)` |`gammaln(a)` |`lgamma(a)`
// MAGIC |Incomplete gamma function |`gammp(a, x)` |`gammainc(a, x)` |`gammainc(a, x)` |`pgamma(a, x)` (requires stats library)
// MAGIC |Upper incomplete gamma function |`gammq(a, x)` |`gammainc(a, x, tail)` |`gammaincc(a, x)` |`pgamma(x, a, lower = FALSE) * gamma(a)` (requires stats library)
// MAGIC |derivative of lgamma |`digamma(a)` |`psi(a)` |`polygamma(0, a)` |`digamma(a)`
// MAGIC |derivative of digamma |`trigamma(a)` |`psi(1, a)` |`polygamma(1, a)` |`trigama(a)`
// MAGIC |nth derivative of digamma | na |`psi(n, a)` |`polygamma(n, a)` |`psigamma(a, deriv = n)`
// MAGIC |Log [Beta function](http://en.wikipedia.org/wiki/Beta_function)| lbeta(a,b) |`betaln(a, b)` |`betaln(a,b)`|`lbeta(a, b)`
// MAGIC |Generalized Log [Beta function](http://en.wikipedia.org/wiki/Beta_function)| lbeta(a) | na|na |
// MAGIC
// MAGIC ### Error Function
// MAGIC
// MAGIC The [error function](http://en.wikipedia.org/wiki/Error_function)...
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |------------------------------------|-----------------|----------------|---------------------|-------------
// MAGIC | error function |`erf(a)` |`erf(a)` |`erf(a)` |`2 * pnorm(a * sqrt(2)) - 1`
// MAGIC | 1 - erf(a) |`erfc(a)` |`erfc(a)` |`erfc(a)` |`2 * pnorm(a * sqrt(2), lower = FALSE)`
// MAGIC | inverse error function |`erfinv(a)` |`erfinv(a)` |`erfinv(a)` |`qnorm((1 + a) / 2) / sqrt(2)`
// MAGIC | inverse erfc |`erfcinv(a)` |`erfcinv(a)` |`erfcinv(a)` |`qnorm(a / 2, lower = FALSE) / sqrt(2)`
// MAGIC
// MAGIC ### Other functions
// MAGIC
// MAGIC |Operation |Breeze |Matlab |Numpy |R
// MAGIC |------------------------------------|-----------------|----------------|---------------------|------------
// MAGIC | logistic sigmoid |`sigmoid(a)` | na | `expit(a)` |`sigmoid(a)` (requires pracma library)
// MAGIC | Indicator function |`I(a)` | not needed | `where(cond, 1, 0)` |`0 + (a > 0)`
// MAGIC | Polynominal evaluation |`polyval(coef,x)`| | |
// MAGIC
// MAGIC ### Map and Reduce
// MAGIC
// MAGIC For most simple mapping tasks, one can simply use vectorized, or universal functions.
// MAGIC Given a vector `v`, we can simply take the log of each element of a vector with `log(v)`.
// MAGIC Sometimes, however, we want to apply a somewhat idiosyncratic function to each element of a vector.
// MAGIC For this, we can use the map function:
// MAGIC
// MAGIC ```scala
// MAGIC val v = DenseVector(1.0,2.0,3.0)
// MAGIC v.map( xi => foobar(xi) )
// MAGIC ```
// MAGIC
// MAGIC Breeze provides a number of built in reduction functions such as sum, mean.
// MAGIC You can implement a custom reduction using the higher order function `reduce`.
// MAGIC For instance, we can sum the first 9 integers as follows:
// MAGIC
// MAGIC ```scala
// MAGIC val v = linspace(0,9,10)
// MAGIC val s = v.reduce( _ + _ )
// MAGIC ```
// MAGIC
// MAGIC ## Broadcasting
// MAGIC
// MAGIC Sometimes we want to apply an operation to every row or column of a
// MAGIC matrix, as a unit. For instance, you might want to compute the mean of
// MAGIC each row, or add a vector to every column. Adapting a matrix so that
// MAGIC operations can be applied columnwise or rowwise is called
// MAGIC **broadcasting**. Languages like R and numpy automatically and
// MAGIC implicitly do broadcasting, meaning they won’t stop you if you
// MAGIC accidentally add a matrix and a vector. In Breeze, you have to signal
// MAGIC your intent using the broadcasting operator `*`. The `*` is meant to
// MAGIC evoke “foreach” visually. Here are some examples:
// MAGIC
// MAGIC ```scala
// MAGIC val dm = DenseMatrix((1.0,2.0,3.0),
// MAGIC (4.0,5.0,6.0))
// MAGIC
// MAGIC val res = dm(::, *) + DenseVector(3.0, 4.0)
// MAGIC assert(res === DenseMatrix((4.0, 5.0, 6.0), (8.0, 9.0, 10.0)))
// MAGIC
// MAGIC res(::, *) := DenseVector(3.0, 4.0)
// MAGIC assert(res === DenseMatrix((3.0, 3.0, 3.0), (4.0, 4.0, 4.0)))
// MAGIC
// MAGIC val m = DenseMatrix((1.0, 3.0), (4.0, 4.0))
// MAGIC // unbroadcasted sums all elements
// MAGIC assert(sum(m) === 12.0)
// MAGIC assert(mean(m) === 3.0)
// MAGIC
// MAGIC assert(sum(m(*, ::)) === DenseVector(4.0, 8.0))
// MAGIC assert(sum(m(::, *)) === DenseMatrix((5.0, 7.0)))
// MAGIC
// MAGIC assert(mean(m(*, ::)) === DenseVector(2.0, 4.0))
// MAGIC assert(mean(m(::, *)) === DenseMatrix((2.5, 3.5)))
// MAGIC
// MAGIC
// MAGIC ```
// MAGIC
// MAGIC The UFunc trait is similar to numpy’s ufunc. See [[Universal Functions]] for more information on Breeze UFuncs.
// MAGIC
// MAGIC ## Casting and type safety
// MAGIC
// MAGIC Compared to Numpy and Matlab, Breeze requires you to be more explicit about the types of your variables. When you create a new vector for example, you must specify a type (such as in `DenseVector.zeros[Double](n)`) in cases where a type can not be inferred automatically. Automatic inference will occur when you create a vector by passing its initial values in (`DenseVector`). A common mistake is using integers for initialisation (e.g. `DenseVector`), which would give a matrix of integers instead of doubles. Both Numpy and Matlab would default to doubles instead.
// MAGIC
// MAGIC Breeze will not convert integers to doubles for you in most expressions. Simple operations like `a :+
// MAGIC 3` when `a` is a `DenseVector[Double]` will not compile. Breeze provides a convert function, which can be used to explicitly cast. You can also use `v.mapValues(_.toDouble)`.
// MAGIC
// MAGIC ### Casting
// MAGIC
// MAGIC | Operation | Breeze | Matlab | Numpy | R
// MAGIC | ------------------------------- | --------------------------------------------- | ------------------- | ----------------|--------------------
// MAGIC | Convert to Int | `convert(a, Int)` | `int(a)` | `a.astype(int)` |`as.integer(a)`
// MAGIC
// MAGIC ## Performance
// MAGIC
// MAGIC Breeze uses [netlib-java](https://github.com/fommil/netlib-java/) for
// MAGIC its core linear algebra routines. This includes all the cubic time
// MAGIC operations, matrix-matrix and matrix-vector multiplication. Special
// MAGIC efforts are taken to ensure that arrays are not copied.
// MAGIC
// MAGIC Netlib-java will attempt to load system optimised BLAS/LAPACK if they
// MAGIC are installed, falling back to the reference natives, falling back to
// MAGIC pure Java. Set your logger settings to `ALL` for the
// MAGIC `com.github.fommil.netlib` package to check the status, and to
// MAGIC `com.github.fommil.jniloader` for a more detailed breakdown. Read the
// MAGIC netlib-java project page for more details.
// MAGIC
// MAGIC Currently vectors and matrices over types other than `Double`, `Float`
// MAGIC and `Int` are boxed, so they will typically be a lot slower. If you find
// MAGIC yourself needing other AnyVal types like `Long` or `Short`, please ask
// MAGIC on the list about possibly adding support for them.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | lamastex/scalable-data-science | db/xtraResources/LinearAlgebra/LAlgCheatSheet.scala | Scala | unlicense | 30,754 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.bagel
import java.io.{InputStream, OutputStream, DataInputStream, DataOutputStream}
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
import scala.xml.{XML, NodeSeq}
import org.apache.spark._
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
object WikipediaPageRankStandalone {
def main(args: Array[String]) {
if (args.length < 5) {
System.err.println("Usage: WikipediaPageRankStandalone <inputFile> <threshold> <numIterations> <host> <usePartitioner>")
System.exit(-1)
}
val sparkConf = new SparkConf()
sparkConf.set("spark.serializer", "spark.bagel.examples.WPRSerializer")
val inputFile = args(0)
val threshold = args(1).toDouble
val numIterations = args(2).toInt
val host = args(3)
val usePartitioner = args(4).toBoolean
sparkConf.setMaster(host).setAppName("WikipediaPageRankStandalone")
val sc = new SparkContext(sparkConf)
val input = sc.textFile(inputFile)
val partitioner = new HashPartitioner(sc.defaultParallelism)
val links =
if (usePartitioner)
input.map(parseArticle _).partitionBy(partitioner).cache()
else
input.map(parseArticle _).cache()
val n = links.count()
val defaultRank = 1.0 / n
val a = 0.15
// Do the computation
val startTime = System.currentTimeMillis
val ranks =
pageRank(links, numIterations, defaultRank, a, n, partitioner, usePartitioner, sc.defaultParallelism)
// Print the result
System.err.println("Articles with PageRank >= "+threshold+":")
val top =
(ranks
.filter { case (id, rank) => rank >= threshold }
.map { case (id, rank) => "%s\\t%s\\n".format(id, rank) }
.collect().mkString)
println(top)
val time = (System.currentTimeMillis - startTime) / 1000.0
println("Completed %d iterations in %f seconds: %f seconds per iteration"
.format(numIterations, time, time / numIterations))
System.exit(0)
}
def parseArticle(line: String): (String, Array[String]) = {
val fields = line.split("\\t")
val (title, body) = (fields(1), fields(3).replace("\\\\n", "\\n"))
val id = new String(title)
val links =
if (body == "\\\\N")
NodeSeq.Empty
else
try {
XML.loadString(body) \\\\ "link" \\ "target"
} catch {
case e: org.xml.sax.SAXParseException =>
System.err.println("Article \\""+title+"\\" has malformed XML in body:\\n"+body)
NodeSeq.Empty
}
val outEdges = links.map(link => new String(link.text)).toArray
(id, outEdges)
}
def pageRank(
links: RDD[(String, Array[String])],
numIterations: Int,
defaultRank: Double,
a: Double,
n: Long,
partitioner: Partitioner,
usePartitioner: Boolean,
numPartitions: Int
): RDD[(String, Double)] = {
var ranks = links.mapValues { edges => defaultRank }
for (i <- 1 to numIterations) {
val contribs = links.groupWith(ranks).flatMap {
case (id, (linksWrapper, rankWrapper)) =>
if (linksWrapper.length > 0) {
if (rankWrapper.length > 0) {
linksWrapper(0).map(dest => (dest, rankWrapper(0) / linksWrapper(0).size))
} else {
linksWrapper(0).map(dest => (dest, defaultRank / linksWrapper(0).size))
}
} else {
Array[(String, Double)]()
}
}
ranks = (contribs.combineByKey((x: Double) => x,
(x: Double, y: Double) => x + y,
(x: Double, y: Double) => x + y,
partitioner)
.mapValues(sum => a/n + (1-a)*sum))
}
ranks
}
}
class WPRSerializer extends org.apache.spark.serializer.Serializer {
def newInstance(): SerializerInstance = new WPRSerializerInstance()
}
class WPRSerializerInstance extends SerializerInstance {
def serialize[T](t: T): ByteBuffer = {
throw new UnsupportedOperationException()
}
def deserialize[T](bytes: ByteBuffer): T = {
throw new UnsupportedOperationException()
}
def deserialize[T](bytes: ByteBuffer, loader: ClassLoader): T = {
throw new UnsupportedOperationException()
}
def serializeStream(s: OutputStream): SerializationStream = {
new WPRSerializationStream(s)
}
def deserializeStream(s: InputStream): DeserializationStream = {
new WPRDeserializationStream(s)
}
}
class WPRSerializationStream(os: OutputStream) extends SerializationStream {
val dos = new DataOutputStream(os)
def writeObject[T](t: T): SerializationStream = t match {
case (id: String, wrapper: ArrayBuffer[_]) => wrapper(0) match {
case links: Array[String] => {
dos.writeInt(0) // links
dos.writeUTF(id)
dos.writeInt(links.length)
for (link <- links) {
dos.writeUTF(link)
}
this
}
case rank: Double => {
dos.writeInt(1) // rank
dos.writeUTF(id)
dos.writeDouble(rank)
this
}
}
case (id: String, rank: Double) => {
dos.writeInt(2) // rank without wrapper
dos.writeUTF(id)
dos.writeDouble(rank)
this
}
}
def flush() { dos.flush() }
def close() { dos.close() }
}
class WPRDeserializationStream(is: InputStream) extends DeserializationStream {
val dis = new DataInputStream(is)
def readObject[T](): T = {
val typeId = dis.readInt()
typeId match {
case 0 => {
val id = dis.readUTF()
val numLinks = dis.readInt()
val links = new Array[String](numLinks)
for (i <- 0 until numLinks) {
val link = dis.readUTF()
links(i) = link
}
(id, ArrayBuffer(links)).asInstanceOf[T]
}
case 1 => {
val id = dis.readUTF()
val rank = dis.readDouble()
(id, ArrayBuffer(rank)).asInstanceOf[T]
}
case 2 => {
val id = dis.readUTF()
val rank = dis.readDouble()
(id, rank).asInstanceOf[T]
}
}
}
def close() { dis.close() }
}
| cloudera/spark | examples/src/main/scala/org/apache/spark/examples/bagel/WikipediaPageRankStandalone.scala | Scala | apache-2.0 | 7,037 |
package views.html
package account
import play.api.data.Form
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import lila.security.EmailConfirm.Help._
import controllers.routes
object emailConfirmHelp {
private val title = "Help with email confirmation"
def apply(form: Form[_], status: Option[Status])(implicit ctx: Context) =
views.html.base.layout(
title = title,
moreCss = cssTag("email-confirm")
)(
frag(
main(cls := "page-small box box-pad email-confirm-help")(
h1(title),
p("You signed up, but didn't receive your confirmation email?"),
st.form(cls := "form3", action := routes.Account.emailConfirmHelp, method := "get")(
form3.split(
form3.group(
form("username"),
trans.username(),
help = raw("What username did you create?").some
) { f =>
form3.input(f)(pattern := lila.user.User.newUsernameRegex.regex)
},
div(cls := "form-group")(
form3.submit(trans.apply())
)
)
),
div(cls := "replies")(
status map {
case NoSuchUser(name) =>
frag(
p("We couldn't find any user by this name: ", strong(name), "."),
p(
"You can use it to ",
a(href := routes.Auth.signup)("create a new account"),
"."
)
)
case EmailSent(name, email) =>
frag(
p("We have sent an email to ", email.conceal, "."),
p(
"It can take some time to arrive.",
br,
strong("Wait 5 minutes and refresh your email inbox.")
),
p("Also check your spam folder, it might end up there. If so, mark it as NOT spam."),
p("If everything else fails, then send us this email:"),
hr,
p(i(s"Hello, please confirm my account: $name")),
hr,
p(
"Copy and paste the above text and send it to ",
a(href := s"mailto:$contactEmailInClear?subject=Confirm account $name")(
contactEmailInClear
)
),
p("We will come back to you shortly to help you complete your signup.")
)
case Confirmed(name) =>
frag(
p("The user ", strong(name), " is successfully confirmed."),
p("You can ", a(href := routes.Auth.login)("login right now as ", name), "."),
p("You do not need a confirmation email.")
)
case Closed(name) =>
p("The account ", strong(name), " is closed.")
case NoEmail(name) =>
p("The account ", strong(name), " was registered without an email.")
}
)
)
)
)
}
| luanlv/lila | app/views/account/emailConfirmHelp.scala | Scala | mit | 3,181 |
package com.larry.da.jobs.userdigest
import org.apache.spark.{HashPartitioner, SparkContext}
import scala.collection.mutable.ArrayBuffer
import com.google.common.hash.Hashing.md5
/**
* Created by larry on 13/1/16.
*/
object RealTimeTag {
var sc: SparkContext = _
def tt(): Unit ={
val log = sc.textFile("/user/tracking/userdigest/log/hourly/*realtime_tags*2016-01-12*gz").map(x=>{
val Array(sendType,channid,agsid,tag,time) = x.split("\\t")
val chn = sendType match {
case "0" => 2
case "1" => 3
case "3" => 4
case _ => 1
}
val dspid = if(chn == 3 && channid.length > 32) md5.hashString(channid,com.larry.da.jobs.idmap.Config.chaset_utf8).toString else channid
(chn,dspid,agsid,tag)
}).filter(x=>x._2 != "" || x._3 != "")
val line = sc.textFile("/tmp/mapred/userdigest/rttag/2016-01-12").filter(_.contains("FU_2@160112"))
val chnDic = sc.textFile("/user/dauser/aguid/idmapHistory/channel/2016-01-12").map(x=>{ val Array(cid,uid,idType) = x.split("\\t").take(3); ((idType.toInt,cid),uid) })
val agsidDic = sc.textFile("/user/dauser/aguid/idmapHistory/agsid/2016-01-12").map(x=>{ val Array(cid,uid) = x.split("\\t").take(2); (cid,uid) })
val agsidMapped = log.filter(_._3 != "").map(x=>{
val (chn,channid,agsid,tag) = x;
(agsid,(chn,channid,tag))
}).leftOuterJoin(agsidDic).map(x=>{
val (agsid,((chn,channid,tag),aguid)) =x
val uid = aguid match {case Some(u:String)=> u; case _ => ""}
((chn,channid),(uid,agsid,tag))
})
val chlData = log.filter(_._3 == "").map(x=>{
val (chn,channid,agsid,tag) = x;
((chn,channid),("",agsid,tag))
}).union(
agsidMapped.filter(_._2._1 == "")
)
val chlMapped = chlData.join(chnDic).map(x=>{
val ((chn,channid),((xx,agsid,tag),uid))= x;
(uid,tag)
})
val res = agsidMapped.map(x=>{
val ((chn,channid),(uid,agsid,tag)) = x;
(uid,tag)
}).union(chlMapped).aggregateByKey(new ArrayBuffer[String]())(
(a,b) => a += b,
(a,b) => a ++= b
).mapValues(_.mkString("|~|"))
res.count
}
}
| larry88/spark_da | src/main/scala/com/larry/da/jobs/userdigest/RealTimeTag.scala | Scala | gpl-2.0 | 2,153 |
/**
* Copyright 2012 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda.basic
import java.util.Date
import org.joda.time.DateTime
import com.netflix.edda.Record
import org.slf4j.LoggerFactory
import org.scalatest.FunSuite
import org.scalatest.matchers.MustMatchers
class BasicRecordMatcherTest extends FunSuite with MustMatchers {
val logger = LoggerFactory.getLogger(getClass)
val aMatch = true
val aMisMatch = false
test("boolean") {
val matcher = new BasicRecordMatcher
val record = Record("id", true).toMap
matcher.doesMatch(Map("data" -> true), record) must be(aMatch)
matcher.doesMatch(Map("data" -> false), record) must be(aMisMatch)
matcher.doesMatch(Map("data" -> "true"), record) must be(aMatch)
matcher.doesMatch(Map("data" -> "false"), record) must be(aMisMatch)
}
test("time") {
val matcher = new BasicRecordMatcher
val record = Record("id", new Date(0)).toMap
matcher.doesMatch(Map("data" -> new Date(0)), record) must be(aMatch)
matcher.doesMatch(Map("data" -> new DateTime(0)), record) must be(aMatch)
matcher.doesMatch(Map("data" -> 0L), record) must be(aMatch)
matcher.doesMatch(Map("data" -> new Date(1)), record) must be(aMisMatch)
matcher.doesMatch(Map("data" -> new DateTime(1)), record) must be(aMisMatch)
matcher.doesMatch(Map("data" -> 1L), record) must be(aMisMatch)
val dtRecord = Record("id", new DateTime(0)).toMap
matcher.doesMatch(Map("data" -> new Date(0)), dtRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> new DateTime(0)), dtRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> 0L), dtRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> new Date(1)), dtRecord) must be(aMisMatch)
matcher.doesMatch(Map("data" -> new DateTime(1)), dtRecord) must be(aMisMatch)
matcher.doesMatch(Map("data" -> 1L), dtRecord) must be(aMisMatch)
}
test("string") {
val matcher = new BasicRecordMatcher
val record = Record("id", "true").toMap
// case (found: String, expected: Boolean) => found.toBoolean.compareTo(expected)
matcher.doesMatch(Map("data" -> true), record) must be(aMatch)
matcher.doesMatch(Map("data" -> false), record) must be(aMisMatch)
matcher.doesMatch(Map("data" -> "true"), record) must be(aMatch)
matcher.doesMatch(Map("data" -> "false"), record) must be(aMisMatch)
val numRecord = Record("id", "10").toMap
// case (found: String, expected) => found.compareTo(expected.toString)
matcher.doesMatch(Map("data" -> 10), numRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> 10L), numRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> "10"), numRecord) must be(aMatch)
matcher.doesMatch(Map("data" -> "10 "), numRecord) must be(aMisMatch)
matcher.doesMatch(Map("data" -> 11), numRecord) must be(aMisMatch)
matcher.doesMatch(Map("data" -> 11L), numRecord) must be(aMisMatch)
val charRecord = Record("id", "C").toMap
matcher.doesMatch(Map("data" -> 'C'), charRecord) must be(aMatch)
// case (found: String, expected: Byte) => found.compareTo(expected.toChar.toString)
matcher.doesMatch(Map("data" -> 'C'.toByte), charRecord) must be(aMatch)
// case (found, expected: String) => found.toString.compareTo(expected)
matcher.doesMatch(Map("data" -> "10.0"), Record("id", 10.0F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> "10.0"), Record("id", 10.0D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> "10"), Record("id", 10).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> "10"), Record("id", 10L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> "10"), Record("id", 10.toShort).toMap) must be(aMatch)
}
test("double") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1.2345D), Record("id", 1.2345D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 0.012345e2D), Record("id", 1.2345D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.2345F), Record("id", 1.2345D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 0.012345e2F), Record("id", 1.2345D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1L), Record("id", 1.0D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", 1.0D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", 1.0D).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toByte), Record("id", 1.0D).toMap) must be(aMatch)
}
test("float") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1.2345D), Record("id", 1.2345F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 0.012345e2D), Record("id", 1.2345F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.2345F), Record("id", 1.2345F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 0.012345e2F), Record("id", 1.2345F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1L), Record("id", 1.0F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", 1.0F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", 1.0F).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toByte), Record("id", 1.0F).toMap) must be(aMatch)
}
test("long") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1D), Record("id", 1L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1F), Record("id", 1L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1L), Record("id", 1L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", 1L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", 1L).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toByte), Record("id", 1L).toMap) must be(aMatch)
}
test("int") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1D), Record("id", 1).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1F), Record("id", 1).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1L), Record("id", 1).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", 1).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", 1).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toByte), Record("id", 1).toMap) must be(aMatch)
}
test("short") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1D), Record("id", 1.toShort).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1F), Record("id", 1.toShort).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1L), Record("id", 1.toShort).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", 1.toShort).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", 1.toShort).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toByte), Record("id", 1.toShort).toMap) must be(aMatch)
}
test("char") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1L), Record("id", '1').toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1), Record("id", '1').toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 1.toShort), Record("id", '1').toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> '1'), Record("id", '1').toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> '1'.toByte), Record("id", '1').toMap) must be(aMatch)
}
test("byte") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 49L), Record("id", '1'.toByte).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 49), Record("id", '1'.toByte).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 49.toShort), Record("id", '1'.toByte).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> '1'), Record("id", '1'.toByte).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> '1'.toByte), Record("id", '1'.toByte).toMap) must be(aMatch)
}
test("null") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> null), Record("id", null).toMap) must be(aMatch)
// key is explicitly set to null
matcher.doesMatch(Map("data.key" -> null), Record("id", Map("key" -> null)).toMap) must be(aMatch)
// key is explicitly non null
matcher.doesMatch(Map("data.key" -> null), Record("id", Map("key" -> true)).toMap) must be(aMisMatch)
// key is completly missing
matcher.doesMatch(Map("data.key" -> null), Record("id", Map("bar" -> null)).toMap) must be(aMisMatch)
}
test("map") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data.a" -> 1), Record("id", Map("a" -> 1, "b" -> 2, "c" -> 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.b" -> 2), Record("id", Map("a" -> 1, "b" -> 2, "c" -> 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.c" -> 3), Record("id", Map("a" -> 1, "b" -> 2, "c" -> 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.d" -> 1), Record("id", Map("a" -> 1, "b" -> 2, "c" -> 3)).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data.a.x" -> 1), Record("id", Map("a" -> Map("x" -> 1, "y" -> 2))).toMap) must be(aMatch)
}
test("list") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> 1), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 2), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 3), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> 4), Record("id", List(1, 2, 3)).toMap) must be(aMisMatch)
// when 2 lists are compared we want to check if the expected result is a subset of the record value
matcher.doesMatch(Map("data" -> List(1)), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> List(1, 2)), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
// ordering is irrelevent
matcher.doesMatch(Map("data" -> List(2, 1)), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> List(1, 2, 3)), Record("id", List(1, 2, 3)).toMap) must be(aMatch)
// 4 is not in list
matcher.doesMatch(Map("data" -> List(1, 2, 3, 4)), Record("id", List(1, 2, 3)).toMap) must be(aMisMatch)
// again 4 is not in list
matcher.doesMatch(Map("data" -> List(4)), Record("id", List(1, 2, 3)).toMap) must be(aMisMatch)
}
test("$or") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("$or" -> Seq(Map("data" -> 3), Map("data" -> 4))), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("$or" -> Seq(Map("data" -> 3), Map("data" -> 5))), Record("id", 4).toMap) must be(aMisMatch)
}
test("$and") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("$and" -> Seq(Map("data" -> 4), Map("data" -> "4"))), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("$and" -> Seq(Map("data" -> 4), Map("data" -> "5"))), Record("id", 4).toMap) must be(aMisMatch)
}
test("$eq") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$eq" -> 4)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$eq" -> "4")), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$eq" -> "foo")), Record("id", 4).toMap) must be(aMisMatch)
}
test("$ne") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$ne" -> 4)), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$ne" -> "4")), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$ne" -> "foo")), Record("id", 4).toMap) must be(aMatch)
}
test("$gt") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$gt" -> 3)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gt" -> "4")), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$gt" -> 4)), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$gt" -> "a")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gt" -> "c")), Record("id", "b").toMap) must be(aMisMatch)
}
test("$gte") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$gte" -> 3)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gte" -> "4")), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gte" -> 4)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gte" -> "a")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gte" -> "b")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$gte" -> "c")), Record("id", "b").toMap) must be(aMisMatch)
}
test("$lt") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$lt" -> 5)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lt" -> "4")), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$lt" -> 4)), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$lt" -> "c")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lt" -> "a")), Record("id", "b").toMap) must be(aMisMatch)
}
test("$lte") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$lte" -> 5)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lte" -> "4")), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lte" -> 4)), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lte" -> "c")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lte" -> "b")), Record("id", "b").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$lte" -> "a")), Record("id", "b").toMap) must be(aMisMatch)
}
test("$exists") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data.foo" -> Map("$exists" -> true)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.foo" -> Map("$exists" -> false)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data.bar" -> Map("$exists" -> true)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data.bar" -> Map("$exists" -> false)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.baz" -> Map("$exists" -> true)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMatch)
matcher.doesMatch(Map("data.baz" -> Map("$exists" -> false)), Record("id", Map("foo" -> null, "baz" -> false)).toMap) must be(aMisMatch)
}
test("$in") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$in" -> Seq(3, 4, 5))), Record("id", 4).toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$in" -> Seq(3, 5))), Record("id", 4).toMap) must be(aMisMatch)
}
test("$nin") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$nin" -> Seq(3, 4, 5))), Record("id", 4).toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$nin" -> Seq(3, 5))), Record("id", 4).toMap) must be(aMatch)
}
test("$regex") {
val matcher = new BasicRecordMatcher
matcher.doesMatch(Map("data" -> Map("$regex" -> "^test")), Record("id", "testing 123").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$regex" -> "123$")), Record("id", "testing 123").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$regex" -> "testing.123")), Record("id", "testing 123").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$regex" -> "^[a-z]+ \\\\d+$")), Record("id", "testing 123").toMap) must be(aMatch)
matcher.doesMatch(Map("data" -> Map("$regex" -> "^Test")), Record("id", "testing 123").toMap) must be(aMisMatch)
matcher.doesMatch(Map("data" -> Map("$regex" -> "^(?i)TESTING")), Record("id", "testing 123").toMap) must be(aMatch)
}
}
| gitlon/edda | src/test/scala/com/netflix/edda/basic/BasicRecordMatcherTest.scala | Scala | apache-2.0 | 16,906 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.policies.nodeconfig
import java.io.File
import java.io.PrintWriter
import org.joda.time.DateTime
import com.normation.cfclerk.domain.Cf3PolicyDraft
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.policies.RuleId
import com.normation.rudder.domain.policies.RuleWithCf3PolicyDraft
import com.normation.rudder.exceptions.TechniqueException
import com.normation.rudder.repository.FullActiveTechniqueCategory
import com.normation.rudder.services.policies.TemplateWriter
import com.normation.utils.Control._
import net.liftweb.common._
import net.liftweb.json.NoTypeHints
import net.liftweb.json.Serialization
import net.liftweb.json.Serialization.writePretty
import com.normation.rudder.domain.reports.NodeConfigId
/**
* A class implementing the logic about node configuration change.
* Extracted from NodeConfigurationServiceImpl to make it testable.
*/
class DetectChangeInNodeConfiguration extends Loggable {
def detectChangeInNode(currentOpt: Option[NodeConfigurationCache], targetConfig: NodeConfiguration, directiveLib: FullActiveTechniqueCategory) : Set[RuleId] = {
/*
* Check if a policy draft (Cf3PolicyDraft) has a technique updated more recently
* than the given date.
*
* If no date is passed, we consider that it's "now" and so that
* nothing was updated.
*/
def wasUpdatedSince(draft: Cf3PolicyDraft, optDate: Option[DateTime], directiveLib: FullActiveTechniqueCategory): Boolean = {
//it's updated if any of the technique in the draft acceptation date is more recent than the given one
optDate match {
case None => false
case Some(date) =>
directiveLib.allTechniques.get(draft.technique.id) match {
case None => //technique not available: consider it's an update
true
case Some((_, None)) => //no acceptation date available: consider it's an update
true
case Some((_, Some(d))) =>
d.isAfter(date)
}
}
}
logger.trace(s"Checking changes in node '${targetConfig.nodeInfo.id.value}'")
val changedRuleIds = currentOpt match {
case None =>
//what do we do if we don't have a cache for the node ? All the target rules are "changes" ?
logger.trace("`-> No node configuration cache availabe for that node")
targetConfig.policyDrafts.map( _.ruleId ).toSet
case Some(current) =>
val target = NodeConfigurationCache(targetConfig)
val allRuleIds = (current.policyCache.map( _.ruleId ) ++ target.policyCache.map( _.ruleId )).toSet
// First case : a change in the minimalnodeconfig is a change of all CRs
if (current.nodeInfoCache != target.nodeInfoCache) {
logger.trace(s"`-> there was a change in the node inventory information")
allRuleIds
// Second case : a change in the system variable is a change of all CRs
} else if(current.nodeContextCache != target.nodeContextCache) {
logger.trace(s"`-> there was a change in the system variables of the node")
allRuleIds
// Third case : a change in the parameters is a change of all CRs
} else if(current.parameterCache != target.parameterCache) {
logger.trace(s"`-> there was a change in the parameters of the node")
allRuleIds
} else {
//check for different policy draft.
val currentDrafts = current.policyCache.map( x => (x.draftId, x) ).toMap
val targetDrafts = target.policyCache.map( x => (x.draftId, x) ).toMap
//draftid in one and not the other are new,
//for the one in both, check both ruleId and cacheValue
((currentDrafts.keySet ++ targetDrafts.keySet).map(id => (currentDrafts.get(id), targetDrafts.get(id))).flatMap {
case (None, None) => //should not happen
Set[RuleId]()
case (Some(PolicyCache(ruleId, _, _)), None) =>
logger.trace(s"`-> rule with ID '${ruleId.value}' was deleted")
Set(ruleId)
case (None, Some(PolicyCache(ruleId, _, _))) =>
logger.trace(s"`-> rule with ID '${ruleId.value}' was added")
Set(ruleId)
case (Some(PolicyCache(r0, d0, c0)), Some(PolicyCache(r1, d1, c1))) =>
//d0 and d1 are equals by construction, but keep them for future-proofing
if(d0 == d1) {
if(
//check that the rule is the same
r0 == r1
//and that the policy draft is the same (it's cache value, actually)
&& c0 == c1
) {
Set[RuleId]() //no modification
} else {
logger.trace(s"`-> there was a change in the promise with draft ID '${d0.value}'")
Set(r0,r1)
}
} else Set[RuleId]()
}) ++ {
//we also have to add all Rule ID for a draft whose technique has been accepted since last cache generation
//(because we need to write template again)
val ids = (targetConfig.policyDrafts.collect {
case r:RuleWithCf3PolicyDraft if(wasUpdatedSince(r.cf3PolicyDraft, current.writtenDate, directiveLib)) => r.ruleId
}).toSet
if(ids.nonEmpty) {
logger.trace(s"`-> there was a change in the applied techniques (technique was updated) for rules ID [${ids.mkString(", ")}]")
}
ids
}
}
}
logger.trace(s"`-> change rules for node ${targetConfig.nodeInfo.id.value}: [${changedRuleIds.map( _.value).mkString(", ")}]")
changedRuleIds
}
}
/**
* Implementation of the Node Configuration service
* It manages the NodeConfiguration content (the cache of the deployed conf)
*
* That implementation is not thread safe at all, and all call to its
* methods should be made in the context of an actor
* (deployment service and it's actor model is a good example)
*
*/
class NodeConfigurationServiceImpl(
policyTranslator : TemplateWriter
, repository : NodeConfigurationCacheRepository
, logNodeConfig : NodeConfigurationLogger
) extends NodeConfigurationService with Loggable {
private[this] val detect = new DetectChangeInNodeConfiguration()
//delegate to repository for nodeconfig persistence
def deleteNodeConfigurations(nodeIds:Set[NodeId]) : Box[Set[NodeId]] = repository.deleteNodeConfigurations(nodeIds)
def deleteAllNodeConfigurations() : Box[Unit] = repository.deleteAllNodeConfigurations
def onlyKeepNodeConfiguration(nodeIds:Set[NodeId]) : Box[Set[NodeId]] = repository.onlyKeepNodeConfiguration(nodeIds)
def cacheNodeConfiguration(nodeConfigurations: Set[NodeConfiguration]): Box[Set[NodeId]] = repository.save(nodeConfigurations.map(x => NodeConfigurationCache(x)))
def getNodeConfigurationCache(): Box[Map[NodeId, NodeConfigurationCache]] = repository.getAll
def sanitize(targets : Seq[NodeConfiguration]) : Box[Map[NodeId, NodeConfiguration]] = {
/**
* Sanitize directive to the node configuration, returning a new node configuration with
* updated directives.
*
* That method check that:
* - the directive added is not already in the NodeConfiguration (why ? perhaps a note to dev is better ?)
* - there is at most one directive for each "unique" technique
*/
def sanitizeOne(nodeConfig: NodeConfiguration) : Box[NodeConfiguration] = {
//first of all: be sure to keep only one draft for a given draft id
val deduplicateDraft = nodeConfig.policyDrafts.groupBy(_.draftId).map { case (draftId, set) =>
val main = set.head
//compare policy draft
//Following parameter are not relevant in that comparison (we compare directive, not rule, here:)
if(set.size > 1) {
logger.error(s"The directive '${set.head.directiveId.value}' on rule '${set.head.ruleId.value}' was added several times on node '${nodeConfig.nodeInfo.id.value}' WITH DIFFERENT PARAMETERS VALUE. It's a bug, please report it. Taking one set of parameter at random for the promise generation.")
import net.liftweb.json._
implicit val formats = Serialization.formats(NoTypeHints)
def r(j:JValue) = if(j == JNothing) "{}" else pretty(render(j))
val jmain = Extraction.decompose(main)
logger.error("First directivedraft: " + pretty(render(jmain)))
set.tail.foreach{ x =>
val diff = jmain.diff(Extraction.decompose(x))
logger.error(s"Diff with other draft: \\nadded:${r(diff.added)} \\nchanged:${r(diff.changed)} \\ndeleted:${r(diff.deleted)}")
}
}
main
}
//now, we have to case to process:
// - directives based on "unique" technique: we must keep only one. And to attempt to get a little stability in
// our generated promises, for a given technique, we will try to always choose the same directive
// (in case of ambiguity)
// - other: just add them all!
val (otherDrafts, uniqueTechniqueBasedDrafts) = deduplicateDraft.partition(_.cf3PolicyDraft.technique.isMultiInstance)
//sort unique based draft by technique, and then check priority on each groups
val keptUniqueDraft = uniqueTechniqueBasedDrafts.groupBy(_.cf3PolicyDraft.technique.id).map { case (techniqueId, setDraft) =>
val withSameTechnique = setDraft.toSeq.sortBy( _.cf3PolicyDraft.priority )
//we know that the size is at least one, so keep the head, and log discard tails
//two part here: discart less priorized directive,
//and for same priority, take one at random (the first sorted by rule id, to keep some consistency)
//and add a big warning
val priority = withSameTechnique.head.cf3PolicyDraft.priority
val lesserPriority = withSameTechnique.dropWhile( _.cf3PolicyDraft.priority == priority)
//keep the directive with the first (alpha-num) ID - as good as other comparison.
val samePriority = withSameTechnique.takeWhile( _.cf3PolicyDraft.priority == priority).sortBy(_.directiveId.value)
val keep = samePriority.head
//only one log for all discared draft
if(samePriority.size > 1) {
logger.warn(s"Unicity check: NON STABLE POLICY ON NODE '${nodeConfig.nodeInfo.hostname}' for mono-instance (unique) technique '${keep.cf3PolicyDraft.technique.id}'. Several directives with same priority '${keep.cf3PolicyDraft.priority}' are applied. Keeping (ruleId@@directiveId) '${keep.draftId.value}', discarding: ${samePriority.tail.map(_.draftId.value).mkString("'", "', ", "'")}")
}
logger.trace(s"Unicity check: on node '${nodeConfig.nodeInfo.id.value}' for mono-instance (unique) technique '${keep.cf3PolicyDraft.technique.id}': keeping (ruleId@@directiveId) '${keep.draftId.value}', discarding less priorize: ${lesserPriority.map(_.draftId.value).mkString("'", "', ", "'")}")
keep.copy(overrides = (samePriority.tail.map( _.draftId) ++ lesserPriority.map( _.draftId )).toSet)
}
Full(nodeConfig.copy(policyDrafts = (otherDrafts.toSet ++ keptUniqueDraft)))
}
for {
sanitized <- sequence(targets) { sanitizeOne(_) }
} yield {
sanitized.map(c => (c.nodeInfo.id, c)).toMap
}
}
def selectUpdatedNodeConfiguration(nodeConfigurations: Map[NodeId, NodeConfiguration], cache: Map[NodeId, NodeConfigurationCache]): Set[NodeId] = {
val newConfigCache = nodeConfigurations.map{ case (_, conf) => NodeConfigurationCache(conf) }
val (updatedConfig, notUpdatedConfig) = newConfigCache.toSeq.partition{ p =>
cache.get(p.id) match {
case None => true
case Some(e) => e != p
}
}
if(notUpdatedConfig.size > 0) {
logger.debug(s"Not updating non-modified node configuration: [${notUpdatedConfig.map( _.id.value).mkString(", ")}]")
}
if(updatedConfig.size == 0) {
logger.info("No node configuration was updated, no promises to write")
Set()
} else {
val nodeToKeep = updatedConfig.map( _.id ).toSet
logger.info(s"Configuration of following nodes were updated, their promises are going to be written: [${updatedConfig.map(_.id.value).mkString(", ")}]")
nodeConfigurations.keySet.intersect(nodeToKeep)
}
}
/**
* Write templates for node configuration that changed since the last write.
*
*/
def writeTemplate(rootNodeId: NodeId, nodesToWrite: Set[NodeId], allNodeConfigs: Map[NodeId, NodeConfiguration], versions: Map[NodeId, NodeConfigId]) : Box[Seq[NodeConfiguration]] = {
val nodeConfigsToWrite = allNodeConfigs.filterKeys(nodesToWrite.contains(_))
//debug - but don't fails for debugging !
logNodeConfig.log(nodeConfigsToWrite.values.toSeq) match {
case eb:EmptyBox =>
val e = eb ?~! "Error when trying to write node configurations for debugging"
logger.error(e)
e.rootExceptionCause.foreach { ex =>
logger.error("Root exception cause was:", ex)
}
case _ => //nothing to do
}
val result = policyTranslator.writePromisesForMachines(nodesToWrite, rootNodeId, allNodeConfigs, versions).map(_ => nodeConfigsToWrite.values.toSeq )
policyTranslator.reloadCFEnginePromises()
result
}
override def detectChangeInNodes(nodes : Seq[NodeConfiguration], cache: Map[NodeId, NodeConfigurationCache], directiveLib: FullActiveTechniqueCategory) : Set[RuleId] = {
nodes.flatMap{ x =>
detectChangeInNode(cache.get(x.nodeInfo.id), x, directiveLib)
}.toSet
}
override def detectChangeInNode(currentOpt: Option[NodeConfigurationCache], targetConfig: NodeConfiguration, directiveLib: FullActiveTechniqueCategory) : Set[RuleId] =
detect.detectChangeInNode(currentOpt, targetConfig, directiveLib)
}
| Kegeruneku/rudder | rudder-core/src/main/scala/com/normation/rudder/services/policies/nodeconfig/NodeConfigurationServiceImpl.scala | Scala | agpl-3.0 | 15,620 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.compiler
import org.junit.Test
import org.junit.Assert._
import org.junit.Assume._
import org.scalajs.testsuite.utils.Platform._
/* General note on the way these tests are written:
* We leverage the constant folding applied by the Scala compiler to write
* sound tests. We always perform the same operation, on the same operands,
* once in a way constant folding understands, and once in a way it doesn't.
* Since constant folding is performed on the JVM, we know it has the right
* semantics.
*/
class IntJSTest {
// final val without type ascription to make sure these are constant-folded
final val MinVal = Int.MinValue
final val MaxVal = Int.MaxValue
final val AlmostMinVal = Int.MinValue + 43
final val AlmostMaxVal = Int.MaxValue - 36
@Test def remainder(): Unit = {
def test(a: Int, b: Int, expected: Int): Unit =
assertEquals(expected, a % b)
test(654, 56, 654 % 56)
test(0, 25, 0 % 25)
test(-36, 13, -36 % 13)
test(-55, -6, -55 % -6)
test(MinVal, 1, MinVal % 1)
test(MinVal, -1, MinVal % -1)
test(MaxVal, 1, MaxVal % 1)
test(MaxVal, -1, MaxVal % -1)
test(MaxVal, MinVal, MaxVal % MinVal)
test(MaxVal, MaxVal, MaxVal % MaxVal)
test(MinVal, MaxVal, MinVal % MaxVal)
test(MinVal, MinVal, MinVal % MinVal)
test(AlmostMaxVal, 2, AlmostMaxVal % 2)
test(AlmostMaxVal, 5, AlmostMaxVal % 5)
test(AlmostMaxVal, -7, AlmostMaxVal % -7)
test(AlmostMaxVal, -14, AlmostMaxVal % -14)
test(AlmostMinVal, 100, AlmostMinVal % 100)
test(AlmostMaxVal, -123, AlmostMaxVal % -123)
}
}
| scala-js/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/compiler/IntJSTest.scala | Scala | apache-2.0 | 1,886 |
package ru.arigativa.akka.streams
import akka.stream._
import akka.stream.stage.{AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, OutHandler}
import akka.util.ByteString
import org.postgresql.copy.CopyOut
import scala.concurrent.{ExecutionContext, Future, Promise, blocking}
import scala.util.{Failure, Success, Try}
/**
* Sinks ByteString as postgres COPY data, returns count of rows copied
*/
private[streams] class PgCopySourceStage(
query: String,
settings: PgCopySourceSettings
) extends GraphStageWithMaterializedValue[SourceShape[ByteString], Future[Long]] {
private val out = Outlet[ByteString]("PgCopySource.out")
def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Long]) = {
val completePromise = Promise[Long]()
val connectionProvider = settings.connectionProvider
val stageLogic = new GraphStageLogic(shape) with OutHandler {
private var copyOut: CopyOut = _
private var connIsAcq: Boolean = false
private var bytesCopied: Long = 0
override def onPull(): Unit = {
Try {
if (copyOut == null && !connIsAcq) {
connIsAcq = true
val conn = connectionProvider.acquire().get
copyOut = conn.getCopyAPI.copyOut(query)
}
Option(copyOut.readFromCopy())
.map { bytes =>
bytesCopied += bytes.length
ByteString(bytes)
}
} match {
case Success(Some(elem)) => push(out, elem)
case Success(None) => success(bytesCopied)
case Failure(ex) => fail(ex)
}
}
override def onDownstreamFinish(): Unit = {
if (copyOut != null && copyOut.isActive) {
copyOut.cancelCopy()
}
success(bytesCopied)
}
private def success(bytesCopied: Long): Unit = {
if (connIsAcq) {
connIsAcq = false
connectionProvider.release(None)
}
completePromise.trySuccess(bytesCopied)
completeStage()
}
private def fail(ex: Throwable): Unit = {
if (connIsAcq) {
connIsAcq = false
connectionProvider.release(Some(ex))
}
completePromise.tryFailure(ex)
failStage(ex)
}
setHandler(out, this)
}
stageLogic -> completePromise.future
}
override def shape: SourceShape[ByteString] = SourceShape.of(out)
}
| klpx/akka-streams-postgresql-copy | src/main/scala/ru/arigativa/akka/streams/PgCopySourceStage.scala | Scala | apache-2.0 | 2,574 |
package odfi.server
import org.odfi.indesign.core.module.IndesignModule
import org.odfi.tcl.module.TCLModule
import org.odfi.indesign.core.harvest.Harvest
import org.odfi.indesign.core.brain.Brain
import org.odfi.indesign.core.heart.HearthUtilTrait
import com.idyria.osi.tea.io.TeaIOUtils
import java.net.URL
import org.odfi.indesign.core.heart.Heart
import java.util.concurrent.TimeUnit
import java.io.File
import org.odfi.indesign.ide.module.maven.utils.Version
import com.idyria.osi.ooxoo.core.buffers.structural.AnyXList
import odfi.server.manager.modules.run.RunConfiguration
import org.odfi.eda.h2dl.H2DLModule
import org.odfi.indesign.core.harvest.fs.FileSystemHarvester
import org.odfi.indesign.core.module.HarvesterModule
import org.odfi.indesign.ide.module.maven.MavenModule
import org.odfi.indesign.core.harvest.fs.HarvestedFile
import org.odfi.indesign.ide.module.maven.MavenProjectHarvester
import org.odfi.wsb.fwapp.SiteApp
import odfi.server.manager.modules.run.SiteAppClass
object ODFIManagerModule extends IndesignModule with HearthUtilTrait with HarvesterModule {
// Config data
//-----------------
AnyXList(classOf[RunConfiguration])
// Versions
//------------------------
var latestOnlineVersion: Option[Version] = None
val versionTask = createHearthTask {
//-- Get Version Text
var onlineVersion = new String(TeaIOUtils.swallow(new URL("https://www.opendesignflow.org/cd/org.odfi/win32/odfi-version.ini").openStream()))
println("Found ONline version: " + onlineVersion)
latestOnlineVersion = Some(Version(onlineVersion))
}
versionTask.timeUnit = TimeUnit.HOURS
versionTask.scheduleEvery = Some(2)
def isOnlineNewer = this.latestOnlineVersion match {
case Some(v) if (v > ODFI.version.get) => true
case _ => false
}
def saveOnlineInstallertoFile(f: File) = {
latestOnlineVersion match {
case Some(v) =>
TeaIOUtils.writeToFile(f, new URL(s"http://www.idyria.com/access/osi/files/builds/odfi/win32/odfi-installer-${v.toString}.exe").openStream())
case None =>
sys.error("Cannot save ODFI installer because online version was not detected")
}
}
// Harvesting
//------------
override def doHarvest = {
val conf = this.config.get
// Run Tool
//----------------
//-- Load maven runs in FS harvester
/*runConfiguration.mavenRuns.foreach {
mavenRun =>
// Make sure it is in search path
val f = HarvestedFile(new File(mavenRun.path))
FileSystemHarvester.addPath(f)
// look up site classes
mavenRun.cleanDerivedResourcesOfType[SiteAppClass[_]]
MavenProjectHarvester.getMavenProjectAtLocation(mavenRun.path) match {
case Some(p) =>
val siteApps = p.discoverType[SiteApp]
mavenRun.addDerivedResources(siteApps.map(new SiteAppClass(_)))
case None =>
}
}*/
}
// Run tool
//---------------
def getRunConfigurations = {
this.config.get.custom.content.getAllOfType[RunConfiguration]
}
/**
* This method does not save the config back to its source, make sure to sync the config!!
*/
def saveRunConfiguration(rc:RunConfiguration) = this.config.get.custom.content += rc
// Lifecycle
//--------------
this.onLoad {
//println(s"Loading ODFIDModule")
//-- Load TCL
try {
this.requireModule(TCLModule)
} catch {
case e: Throwable =>
e.printStackTrace()
}
//-- Load H2DL
requireModule(H2DLModule)
//-- Load maven support
requireModule(MavenModule)
}
this.onInit {
//-- Add ODFI Harvester
Harvest.addHarvester(ODFIHarvester)
}
this.onStart {
Heart.pump(versionTask)
Harvest.run
}
} | richnou/odfi-manager | server/src/main/scala/odfi/server/ODFIManagerModule.scala | Scala | lgpl-3.0 | 3,767 |
package monocle.std
import monocle.MonocleSuite
import monocle.law.discipline.function._
import scala.collection.immutable.SortedMap
import cats.kernel.instances.sortedMap._
import scala.annotation.nowarn
class SortedMapSpec extends MonocleSuite {
checkAll("at SortedMap", AtTests[SortedMap[Int, String], Int, Option[String]])
checkAll("each SortedMap", EachTests[SortedMap[Int, String], String])
checkAll("empty SortedMap", EmptyTests[SortedMap[Int, String]]): @nowarn
checkAll("index SortedMap", IndexTests[SortedMap[Int, String], Int, String])
checkAll("filterIndex SortedMap", FilterIndexTests[SortedMap[Int, Char], Int, Char])
}
| julien-truffaut/Monocle | test/shared/src/test/scala/monocle/std/SortedMapSpec.scala | Scala | mit | 648 |
package naming
import eu.inn.binders.naming.{PascalCaseBuilder, SnakeCaseParser}
import org.scalatest.{FlatSpec, Matchers}
class TestPascalCaseBuilder extends FlatSpec with Matchers {
"PascalCaseBuilder " should " build stringLikeThis" in {
val parser = new SnakeCaseParser()
val builder = new PascalCaseBuilder()
parser.parse("string_like_this", builder)
val result = builder.toString
assert(result == "StringLikeThis")
}
} | InnovaCo/binders | src/test/scala/naming/TestPascalCaseBuilder.scala | Scala | bsd-3-clause | 453 |
package com.wlangiewicz.xbot
import com.codahale.metrics.MetricRegistry
import com.typesafe.config.{Config, ConfigFactory}
import com.wlangiewicz.xbot.domain.{Order, MyOrder}
import com.wlangiewicz.xbot.exceptions.{ExchangeException, BreakingException}
import com.wlangiewicz.xbot.helpers.FakeBitBayHttpClient
import com.wlangiewicz.xbot.providers.{BitBayProvider, ExchangeProvider}
import com.wlangiewicz.xbot.test.UnitSpec
import com.wlangiewicz.xbot.util.SimpleSpreadOfferRateEstimator
class BotTest extends UnitSpec {
var bot: Bot = _
var conf: Config = _
var exchange: ExchangeProvider = _
before {
conf = ConfigFactory.load("application-test")
val exchange = new BitBayProvider(
new FakeBitBayHttpClient, "http://localhost-trading/ddd", "http://localhost-trading/ddd", "localhost-trading", "http://localhost-trading/ddd", "LTC", "PLN", 10, new MetricRegistry())
val estimator = new SimpleSpreadOfferRateEstimator(0.005, 0.01, 1)
bot = new Bot(exchange, estimator, new MetricRegistry())
}
"A Bot" should "run" in {
bot.loop("both")
}
it should "throw NoSuchElementException when my orders == orders when testing bids" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","bid","active",875797219,10710999988L,10710999988L,1223000000))
val notMyOrders = List(Order(1223000000,875797219,"bid"))
intercept[NoSuchElementException] {
bot.highestNotMyBid(notMyOrders, myOrders).rate
}
}
it should "select highest bid when I don't have any orders active" in {
val myOrders = List()
val notMyOrders = List(Order(1223000000,875797219,"bid"), Order(1222000000,875797219,"bid"))
assert(1223000000 === bot.highestNotMyBid(notMyOrders, myOrders).rate)
}
it should "select highest bid when my bid is not the highest" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","bid","active",875797219,10710999988L,10710999988L,1221000000))
val notMyOrders = List(Order(1223000000,875797219,"bid"), Order(1222000000,875797219,"bid"))
assert(1223000000 === bot.highestNotMyBid(notMyOrders, myOrders).rate)
}
it should "select highest bid when my bid matches highest not my bid" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","bid","active",875797219,10710999988L,10710999988L,1223000000))
val notMyOrders = List(Order(1223000000,875797219,"bid"), Order(1222000000,875797219,"bid"))
assert(1222000000 === bot.highestNotMyBid(notMyOrders, myOrders).rate)
}
it should "select highest bid when my bid is the highest" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","bid","active",875797219,10710999988L,10710999988L,1224000000))
val notMyOrders = List(Order(1223000000,875797219,"bid"), Order(1222000000,875797219,"bid"))
assert(1223000000 === bot.highestNotMyBid(notMyOrders, myOrders).rate)
}
it should "throw NoSuchElementException when my orders == orders when testing asks" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","ask","active",875797219,10710999988L,10710999988L,1223000000))
val notMyOrders = List(Order(1223000000,875797219,"ask"))
intercept[NoSuchElementException] {
bot.lowestNotMyAsk(notMyOrders, myOrders).rate
}
}
it should "select lowest ask when I don't have any orders active" in {
val myOrders = List()
val notMyOrders = List(Order(1223000000,875797219,"ask"), Order(1222000000,875797219,"ask"))
assert(1222000000 === bot.lowestNotMyAsk(notMyOrders, myOrders).rate)
}
it should "select lowest ask when my ask is not the lowest" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","ask","active",875797219,10710999988L,10710999988L,1224000000))
val notMyOrders = List(Order(1223000000,875797219,"ask"), Order(1222000000,875797219,"ask"))
assert(1222000000 === bot.lowestNotMyAsk(notMyOrders, myOrders).rate)
}
it should "select lowest when my ask matches lowest not my ask" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","ask","active",875797219,10710999988L,10710999988L,1222000000))
val notMyOrders = List(Order(1223000000,875797219,"ask"), Order(1222000000,875797219,"ask"))
assert(1223000000 === bot.lowestNotMyAsk(notMyOrders, myOrders).rate)
}
it should "select lowest ask when my ask is the lowest" in {
val myOrders = List(MyOrder("6649190","LTC",1416737109,"PLN","ask","active",875797219,10710999988L,10710999988L,1224000000))
val notMyOrders = List(Order(1223000000,875797219,"ask"), Order(1222000000,875797219,"ask"))
assert(1222000000 === bot.lowestNotMyAsk(notMyOrders, myOrders).rate)
}
} | wlk/xbot | src/test/scala/com/wlangiewicz/xbot/BotTest.scala | Scala | apache-2.0 | 4,659 |
/*
* Copyright 2015 Philip L. McMahon
*
* Philip L. McMahon licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package rascql.postgresql.stream
import java.nio.charset.Charset
import akka.stream._
import akka.stream.stage._
import akka.util.ByteString
import rascql.postgresql.protocol.{FrontendMessage, StartupMessage}
/**
* Reads the requested `client_encoding` from the initial [[StartupMessage]]
* and uses this to encode the initial and all future messages.
*
* @author Philip L. McMahon
*/
private[stream] class EncoderStage(charset: Charset)
extends GraphStage[FlowShape[FrontendMessage, ByteString]] {
private val `client_encoding` = "client_encoding" -> charset.displayName()
val in: Inlet[FrontendMessage] = Inlet("encoder.in")
val out: Outlet[ByteString] = Outlet("encoder.out")
val shape = FlowShape(in, out)
def createLogic(attr: Attributes) = new GraphStageLogic(shape) {
val starting: InHandler = new InHandler {
def onPush() = grab(in) match {
case StartupMessage(user, params) =>
// Replace existing client encoding value, if any, with .
// When the encoding is not specified, the database default value
// will be used.
push(out, StartupMessage(user, params + `client_encoding`).encode(charset))
setHandler(in, started)
case msg =>
failStage(UnexpectedFrontendMessage(msg))
}
}
val started: InHandler = new InHandler {
def onPush() = push(out, grab(in).encode(charset))
}
setHandler(in, starting)
setHandler(out, new OutHandler {
def onPull() = pull(in)
})
}
}
| rascql/rascql | src/main/scala/rascql/postgresql/stream/EncoderStage.scala | Scala | apache-2.0 | 2,164 |
package com.arcusys.valamis.certificate.storage
import com.arcusys.valamis.certificate.model.goal.AssignmentGoal
import com.arcusys.valamis.model.PeriodTypes
trait AssignmentGoalStorage {
def create(certificateId: Long,
assignmentId: Long,
periodValue: Int,
periodType: PeriodTypes.Value,
arrangementIndex: Int,
isOptional: Boolean = false,
groupId: Option[Long] = None): AssignmentGoal
def getByAssignmentId(assignmentId: Long): Seq[AssignmentGoal]
def get(certificateId: Long, assignmentId: Long): Option[AssignmentGoal]
def getBy(goalId: Long): Option[AssignmentGoal]
def getByCertificateId(certificateId: Long): Seq[AssignmentGoal]
} | igor-borisov/valamis | valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/storage/AssignmentGoalStorage.scala | Scala | gpl-3.0 | 728 |
package com.benoj.janus.behavior
import com.benoj.janus.PersistentLoggingActor
import com.benoj.janus.behavior.Created.{Create, `Exist?`}
import com.benoj.janus.common.NotFound
object Created {
case class Create(id: String)
case object `Exist?`
case class Created(id: String)
}
trait Created extends CommandProcess {
self: PersistentLoggingActor with JanusEventProcessing =>
private def exists: Receive = {
case `Exist?` => Created
}
def postCreation: Receive
override def processCommand: Receive = createdProcessCommand orElse super.processCommand
def createdProcessCommand: Receive = {
case Create(id) =>
log.info(s"Creating ${this.getClass.getName}")
context.become(postCreation orElse exists orElse processCommand)
log.info(s"Sender: ${sender()}")
sender() ! Created.Created(id)
case msg@_ =>
log.info(s"Attempting to send message $msg with non existing actor ${this.self.path.name}")
sender() ! NotFound
}
}
| benoj/janus | core/src/main/scala/com/benoj/janus/behavior/Created.scala | Scala | mit | 994 |
package org.igye.logic
import org.igye.logic.LogicalOperationsOnPredicate.predicateToLogicalOperationsOnPredicate
import org.junit.{Assert, Test}
class PredicateStorageTest {
val A = StringPredicate("A")
val B = StringPredicate("B")
val C = StringPredicate("C")
val D = StringPredicate("D")
val E = StringPredicate("E")
@Test
def getTrueOrFalse(): Unit = {
val storage = new PredicateStorage
storage.save(A)
Assert.assertTrue(storage.getTrueOrFalse(A).get)
storage.saveTrue(!(A&B))
Assert.assertFalse(storage.getTrueOrFalse(A&B).get)
storage.saveFalse(C)
Assert.assertTrue(storage.getTrueOrFalse(!C).get)
storage.save(C is D)
Assert.assertFalse(storage.getTrueOrFalse(!(C is D)).get)
storage.saveTrue(!(!(!(E))))
Assert.assertFalse(storage.getTrueOrFalse(E).get)
Assert.assertTrue(storage.getTrueOrFalse(!(!(!(E)))).get)
Assert.assertFalse(storage.getTrueOrFalse(!(!(!(!(E))))).get)
}
@Test
def saveIsStmtThrowsException(): Unit = {
val storage = new PredicateStorage
try {
storage.saveTrue((A is B) & !(C is D))
storage.saveFalse((A is B) & !(C is D))
Assert.fail("exception should be thrown")
} catch {
case ex: IllegalArgumentException => Assert.assertEquals(
"statement [(A is B) & !(C is D) is false] contradicts to [(A is B) & !(C is D) is true]",
ex.getMessage
)
case ex: Throwable => Assert.fail(s"IllegalArgumentException should be thrown, but was $ex")
}
}
}
| Igorocky/logicproc | src/test/scala/org/igye/logic/PredicateStorageTest.scala | Scala | mit | 1,663 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.Map
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.AccumulatorV2
/**
* Low-level task scheduler interface, currently implemented exclusively by
* [[org.apache.spark.scheduler.TaskSchedulerImpl]].
* This interface allows plugging in different task schedulers. Each TaskScheduler schedules tasks
* for a single SparkContext. These schedulers get sets of tasks submitted to them from the
* DAGScheduler for each stage, and are responsible for sending the tasks to the cluster, running
* them, retrying if there are failures, and mitigating stragglers. They return events to the
* DAGScheduler.
*/
private[spark] trait TaskScheduler {
private val appId = "spark-application-" + System.currentTimeMillis
def rootPool: Pool
def schedulingMode: SchedulingMode
def start(): Unit
// Invoked after system has successfully initialized (typically in spark context).
// Yarn uses this to bootstrap allocation of resources based on preferred locations,
// wait for slave registrations, etc.
def postStartHook(): Unit = { }
// Disconnect from the cluster.
def stop(): Unit
// Submit a sequence of tasks to run.
def submitTasks(taskSet: TaskSet): Unit
// Kill all the tasks in a stage and fail the stage and all the jobs that depend on the stage.
// Throw UnsupportedOperationException if the backend doesn't support kill tasks.
def cancelTasks(stageId: Int, interruptThread: Boolean): Unit
/**
* Kills a task attempt.
* Throw UnsupportedOperationException if the backend doesn't support kill a task.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(taskId: Long, interruptThread: Boolean, reason: String): Boolean
// Kill all the running task attempts in a stage.
// Throw UnsupportedOperationException if the backend doesn't support kill tasks.
def killAllTaskAttempts(stageId: Int, interruptThread: Boolean, reason: String): Unit
// Notify the corresponding `TaskSetManager`s of the stage, that a partition has already completed
// and they can skip running tasks for it.
def notifyPartitionCompletion(stageId: Int, partitionId: Int): Unit
// Set the DAG scheduler for upcalls. This is guaranteed to be set before submitTasks is called.
def setDAGScheduler(dagScheduler: DAGScheduler): Unit
// Get the default level of parallelism to use in the cluster, as a hint for sizing jobs.
def defaultParallelism(): Int
/**
* Update metrics for in-progress tasks and executor metrics, and let the master know that the
* BlockManager is still alive. Return true if the driver knows about the given block manager.
* Otherwise, return false, indicating that the block manager should re-register.
*/
def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId,
executorUpdates: Map[(Int, Int), ExecutorMetrics]): Boolean
/**
* Get an application ID associated with the job.
*
* @return An application ID
*/
def applicationId(): String = appId
/**
* Process a decommissioning executor.
*/
def executorDecommission(executorId: String): Unit
/**
* Process a lost executor
*/
def executorLost(executorId: String, reason: ExecutorLossReason): Unit
/**
* Process a removed worker
*/
def workerRemoved(workerId: String, host: String, message: String): Unit
/**
* Get an application's attempt ID associated with the job.
*
* @return An application's Attempt ID
*/
def applicationAttemptId(): Option[String]
}
| goldmedal/spark | core/src/main/scala/org/apache/spark/scheduler/TaskScheduler.scala | Scala | apache-2.0 | 4,589 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package io.impl
//case class ParlSignal[A](results: List[Either[Signal, A]]) extends Signal
private[io] case class Shutdown[R](val r: R) extends Signal
private[io] case object Handover extends Signal
| molecule-labs/molecule | molecule-io/src/main/scala/molecule/io/impl/Signals.scala | Scala | apache-2.0 | 940 |
package org.killbill.billing.client.util
import spray.json._
/**
* Created by jgomez on 29/10/2015.
*/
object JsonHelper {
def jsonEnum[T <: Enumeration](enu: T) = new JsonFormat[T#Value] {
def write(obj: T#Value) = JsString(obj.toString)
def read(json: JsValue) = json match {
case JsString(txt) => enu.withName(txt)
case something => throw new DeserializationException(s"Expected a value from enum $enu instead of $something")
}
}
}
| jgomez-vp/killbill-client-scala | src/main/scala/org/killbill/billing/client/util/JsonHelper.scala | Scala | apache-2.0 | 468 |
package org.jetbrains.plugins.hocon.editor
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.Result
import com.intellij.codeInsight.editorActions.enter.{EnterBetweenBracesHandler, EnterHandlerDelegate}
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.editor.actionSystem.EditorActionHandler
import com.intellij.openapi.util.Ref
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.hocon.lang.HoconLanguage
/**
* Like [[com.intellij.json.formatter.JsonEnterBetweenBracesHandler]]
*
* @author ghik
*/
class HoconEnterBetweenBracesHandler extends EnterBetweenBracesHandler {
override def preprocessEnter(file: PsiFile, editor: Editor, caretOffsetRef: Ref[Integer],
caretAdvance: Ref[Integer], dataContext: DataContext,
originalHandler: EditorActionHandler): Result =
if (file.getLanguage is HoconLanguage)
super.preprocessEnter(file, editor, caretOffsetRef, caretAdvance, dataContext, originalHandler)
else
EnterHandlerDelegate.Result.Continue
override def isBracePair(c1: Char, c2: Char): Boolean =
c1 == '{' && c2 == '}' || c1 == '[' && c2 == ']'
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/hocon/editor/HoconEnterBetweenBracesHandler.scala | Scala | apache-2.0 | 1,254 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.s3
import org.locationtech.geomesa.index.api.ShardStrategy.ZShardStrategy
import org.locationtech.geomesa.index.api.{GeoMesaFeatureIndex, IndexKeySpace}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.index.ConfiguredIndex
import org.locationtech.geomesa.index.strategies.SpatioTemporalFilterStrategy
import org.locationtech.geomesa.utils.index.IndexMode.IndexMode
import org.opengis.feature.simple.SimpleFeatureType
class S3Index protected (
ds: GeoMesaDataStore[_],
sft: SimpleFeatureType,
version: Int,
geom: String,
dtg: String,
mode: IndexMode
) extends GeoMesaFeatureIndex[S3IndexValues, S3IndexKey](ds, sft, S3Index.name, version, Seq(geom, dtg), mode)
with SpatioTemporalFilterStrategy[S3IndexValues, S3IndexKey] {
def this(ds: GeoMesaDataStore[_], sft: SimpleFeatureType, geomField: String, dtgField: String, mode: IndexMode) =
this(ds, sft, S3Index.version, geomField, dtgField, mode)
override val keySpace: IndexKeySpace[S3IndexValues, S3IndexKey] =
new S3IndexKeySpace(sft, ZShardStrategy(sft), geom, dtg)
override def tieredKeySpace: Option[IndexKeySpace[_, _]] = None
}
object S3Index extends ConfiguredIndex {
override val name = "s3"
override val version = 1
override def supports(sft: SimpleFeatureType, attributes: Seq[String]): Boolean =
S3IndexKeySpace.supports(sft, attributes)
override def defaults(sft: SimpleFeatureType): Seq[Seq[String]] = Seq.empty
}
| aheyne/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/s3/S3Index.scala | Scala | apache-2.0 | 2,008 |
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scouter.server.netio.service.handle;
import java.util.ArrayList
import java.util.HashMap
import java.util.HashSet
import scouter.lang.CounterKey
import scouter.lang.DigestKey
import scouter.lang.TimeTypeEnum
import scouter.lang.constants.StatusConstants
import scouter.lang.pack.MapPack
import scouter.lang.pack.StatusPack
import scouter.lang.value.DoubleValue
import scouter.lang.value.FloatValue
import scouter.lang.value.MapValue
import scouter.io.DataInputX
import scouter.io.DataOutputX
import scouter.net.TcpFlag
import scouter.server.core.AgentManager
import scouter.server.core.cache.CounterCache
import scouter.server.core.cache.StatusCache
import scouter.server.db.DailyCounterRD
import scouter.server.db.StatusRD
import scouter.server.netio.AgentCall
import scouter.server.netio.service.anotation.ServiceHandler
import scouter.util.ArrayUtil
import scouter.util.CastUtil
import scouter.util.DateUtil
import scouter.net.RequestCmd
import scouter.server.Logger
import scouter.util.{CastUtil, DateUtil, IntKeyMap, StringUtil}
import scouter.server.util.TimedSeries
import scouter.server.db.{ObjectRD, RealtimeCounterRD}
import scouter.lang.counters.CounterConstants
import scala.collection.JavaConversions._
import scouter.util.HashUtil
import scouter.util.DataUtil
class CubridService {
@ServiceHandler(RequestCmd.CUBRID_DB_REALTIME_DML)
def realtimeDML(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val objHashLv = param.getList("objHash");
if (objHashLv.size() == 0) {
return;
}
var dbname = "";
var select = 0L;
var update = 0L;
var insert = 0L;
var delete = 0L;
for (i <- 0 to objHashLv.size()-1) {
val objHash = objHashLv.getInt(i);
val key1 = new CounterKey(objHash, "num_query_selects", TimeTypeEnum.REALTIME);
val v1 = CounterCache.get(key1);
if (v1 != null) {
select += CastUtil.clong(v1);
}
val key2 = new CounterKey(objHash, "num_query_updates", TimeTypeEnum.REALTIME);
val v2 = CounterCache.get(key2);
if (v2 != null) {
update += CastUtil.clong(v2);
}
val key3 = new CounterKey(objHash, "num_query_inserts", TimeTypeEnum.REALTIME);
val v3 = CounterCache.get(key3);
if (v3 != null) {
insert += CastUtil.clong(v3);
}
val key4 = new CounterKey(objHash, "num_query_deletes", TimeTypeEnum.REALTIME);
val v4 = CounterCache.get(key4);
if (v4 != null) {
delete += CastUtil.clong(v4);
}
val key5 = new CounterKey(objHash, "db_num", TimeTypeEnum.REALTIME);
val v5 = CounterCache.get(key5);
if (v5 != null) {
dbname += CastUtil.clong(v5);
}
}
val value = new MapValue();
value.put("dbname", dbname);
value.put("select", select);
value.put("update", update);
value.put("insert", insert);
value.put("delete", delete);
dout.writeByte(TcpFlag.HasNEXT);
dout.writeValue(value);
}
@ServiceHandler(RequestCmd.CUBRID_ACTIVE_DB_LIST)
def realtimeActiveDBList(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val objHashLv = param.getList("objHash");
val key = param.getText("key");
val time = param.getLong("time");
val date = DateUtil.yyyymmdd(time);
for (i <- 0 to objHashLv.size()-1) {
val objHash = objHashLv.getInt(i);
val status = StatusCache.get(objHash, key);
if(status != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(status);
}
}
}
@ServiceHandler(RequestCmd.CUBRID_DB_REALTIME_STATUS)
def realtimeDMLStatus(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val objHashLv = param.getList("objHash");
val key = param.getText("key");
val date = param.getText("date");
val etime = param.getLong("etime");
val stime = param.getLong("stime");
val time = param.getLong("time");
val objArray = new ArrayList[Int]();
for (i <- 0 to objHashLv.size() - 1) {
val p = new MapPack();
val objHash = objHashLv.getInt(i);
objArray.add(objHash);
}
val handler = (time: Long, data: Array[Byte]) => {
val pk = new DataInputX(data).readPack().asInstanceOf[StatusPack];
if (objArray.contains(pk.objHash) && pk.key == key) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(pk);
return
}
}
for (i <- 0 to objArray.size() - 1) {
StatusRD.readFromEndTime(date, stime, etime, handler);
}
}
@ServiceHandler(RequestCmd.CUBRID_DB_SERVER_INFO)
def DbServerInfo(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val objHashLv = param.getList("objHash");
val key = param.getText("key");
val date = param.getText("date");
val etime = param.getLong("etime");
val stime = param.getLong("stime");
val time = param.getLong("time");
val objArray = new ArrayList[Int]();
for (i <- 0 to objHashLv.size() - 1) {
val p = new MapPack();
val objHash = objHashLv.getInt(i);
objArray.add(objHash);
}
val handler = (time: Long, data: Array[Byte]) => {
val pk = new DataInputX(data).readPack().asInstanceOf[StatusPack];
if (objArray.contains(pk.objHash) && pk.key == key) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(pk);
return
}
}
for (i <- 0 to objHashLv.size() - 1) {
StatusRD.readFromEndTime(date, stime, etime, handler);
}
}
@ServiceHandler(RequestCmd.CUBRID_DB_PERIOD_MULTI_DATA)
def realtimeCouterTest(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val objHashLv = param.getList("objHash");
val counter = param.getText("counter");
val stime = param.getLong("stime");
val etime = param.getLong("etime");
val date = DateUtil.yyyymmdd(stime);
val objectName = param.getText("objName");
val mapPackMap = new IntKeyMap[MapPack]();
for (i <- 0 to objHashLv.size() - 1) {
val objHash = objHashLv.getInt(i);
val mapPack = new MapPack();
mapPack.put("objHash", objHash);
val timeLv = mapPack.newList("time");
val valueLv = mapPack.newList("value");
mapPackMap.put(objHash, mapPack);
}
val handler = (mapValue: MapValue) => {
if (mapValue != null) {
val objHash = mapValue.getInt(CounterConstants.COMMON_OBJHASH)
val time = mapValue.getLong(CounterConstants.COMMON_TIME)
val value = mapValue.get(counter)
if(value != null) {
val curMapPack = mapPackMap.get(objHash)
if(curMapPack != null) {
curMapPack.getList("time").add(time)
curMapPack.getList("value").add(value)
}
}
}
}
RealtimeCounterRD.readBulk(date, stime, etime, handler)
for (i <- 0 to objHashLv.size() - 1) {
dout.writeByte(TcpFlag.HasNEXT);
var mpack = mapPackMap.get(objHashLv.getInt(i))
dout.writePack(mpack)
dout.flush()
}
}
@ServiceHandler(RequestCmd.CUBRID_DB_REALTIME_MULTI_DATA)
def realtimeMultiData(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readMapPack();
val objHashLv = param.getList("objHash");
val counter = param.getText("counter");
if (objHashLv.size() == 0) {
return;
}
var counterData = 0L;
for (i <- 0 to objHashLv.size()-1) {
val objHash = objHashLv.getInt(i);
val key1 = new CounterKey(objHash, counter, TimeTypeEnum.REALTIME);
val v1 = CounterCache.get(key1);
if (v1 != null) {
counterData += CastUtil.clong(v1);
}
}
val value = new MapValue();
value.put(counter, counterData);
dout.writeByte(TcpFlag.HasNEXT);
dout.writeValue(value);
}
@ServiceHandler(RequestCmd.CUBRID_DB_LONG_PERIOD_MULTI_DATA)
def getPastLongDateAll(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readMapPack();
val counter = param.getText("counter");
val sDate = param.getText("sDate");
val eDate = param.getText("eDate");
val objHashLv = param.getList("objHash");
var stime = DateUtil.yyyymmdd(sDate);
var etime = DateUtil.yyyymmdd(eDate) + DateUtil.MILLIS_PER_DAY;
var date = stime;
while (date <= (etime - DateUtil.MILLIS_PER_DAY)) {
val d = DateUtil.yyyymmdd(date);
for (i <- 0 to ArrayUtil.len(objHashLv) - 1) {
val objHash = objHashLv.getInt(i);
try {
val mpack = new MapPack();
mpack.put("objHash", objHash);
val timeLv = mpack.newList("time");
val valueLv = mpack.newList("value");
val v = DailyCounterRD.getValues(d, new CounterKey(objHash, counter, TimeTypeEnum.FIVE_MIN));
for (j <- 0 to ArrayUtil.len(v) - 1) {
val time = date + DateUtil.MILLIS_PER_MINUTE * 5 * j;
timeLv.add(time);
valueLv.add(v(j));
}
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(mpack);
dout.flush();
} catch {
case e: Throwable =>
val op = AgentManager.getAgent(objHash);
println(op.objName + " invalid data : " + e.getMessage())
e.printStackTrace()
}
}
date += DateUtil.MILLIS_PER_DAY;
};
}
def getObjName(date: String, objHash: Int): String = {
return ObjectRD.getObjName(date, objHash);
}
@ServiceHandler(RequestCmd.CUBRID_DB_LONG_TRANSACTION_DATA)
def DbLongTransactionInfo(din: DataInputX, dout: DataOutputX, login: Boolean): Unit = {
val param = din.readPack().asInstanceOf[MapPack];
val objHashLv = param.getList("objHash");
val key = param.getText("key");
val date = param.getText("date");
val etime = param.getLong("etime");
val stime = param.getLong("stime");
val time = param.getLong("time");
val objArray = new ArrayList[Int]();
for (i <- 0 to objHashLv.size() - 1) {
val p = new MapPack();
val objHash = objHashLv.getInt(i);
objArray.add(objHash);
}
val handler = (time: Long, data: Array[Byte]) => {
val pk = new DataInputX(data).readPack().asInstanceOf[StatusPack];
if (objArray.contains(pk.objHash) && pk.key == key) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(pk);
return
}
}
for (i <- 0 to objHashLv.size() - 1) {
StatusRD.readFromEndTime(date, stime, etime, handler);
}
}
@ServiceHandler(RequestCmd.CUBRID_GET_ALERT_CONFIGURE)
def setConfigureAgent(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readPack().asInstanceOf[MapPack];
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.CUBRID_GET_ALERT_CONFIGURE, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
@ServiceHandler(RequestCmd.CUBRID_SET_ALERT_CONFIGURE)
def listConfigureWas(din: DataInputX, dout: DataOutputX, login: Boolean) {
val param = din.readPack().asInstanceOf[MapPack];
val objHash = param.getInt("objHash");
val o = AgentManager.getAgent(objHash);
val p = AgentCall.call(o, RequestCmd.CUBRID_SET_ALERT_CONFIGURE, param);
if (p != null) {
dout.writeByte(TcpFlag.HasNEXT);
dout.writePack(p);
}
}
}
| scouter-project/scouter | scouter.server/src/main/scala/scouter/server/netio/service/handle/CubridService.scala | Scala | apache-2.0 | 12,934 |
package com.sksamuel.scapegoat.inspections.inference
import scala.reflect.internal.Flags
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
/**
* @author
* Stephen Samuel
*/
class MethodReturningAny
extends Inspection(
text = "Method returning Any",
defaultLevel = Levels.Warning,
description = "Checks for functions that are defined or inferred to return Any.",
explanation = "Method returns Any. Consider using a more specialized type."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser: context.Traverser =
new context.Traverser {
import context.global._
override def inspect(tree: Tree): Unit = {
tree match {
case DefDef(mods, _, _, _, _, _) if mods.isSynthetic =>
case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flags.SetterFlags) =>
case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flags.GetterFlags) =>
case DefDef(mods, _, _, _, _, _) if mods.hasFlag(Flags.ACCESSOR) =>
// ignore overridden methods as the parent will receive the warning
case DefDef(mods, _, _, _, _, _) if mods.isOverride =>
case DefDef(_, _, _, _, tpt, _) if tpt.tpe =:= typeOf[Any] || tpt.tpe =:= typeOf[AnyRef] =>
context.warn(tree.pos, self, tree.toString.take(300))
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/inspections/inference/MethodReturningAny.scala | Scala | apache-2.0 | 1,555 |
package com.github.gigurra.glasciia
import com.badlogic.gdx.scenes.scene2d.Actor
import scala.collection.mutable
import scala.language.implicitConversions
class Signal[T] {
protected val slots = new mutable.ArrayBuffer[T => Unit](initialSize = 2)
def apply(t: T): Unit = {
slots.foreach(_.apply(t))
}
def foreach(slot: T => Unit): this.type = connect(slot)
def connect(slot: T => Unit): this.type = {
slots += slot
this
}
def foreach(slot: => Unit): this.type = connect(slot)
def connect(slot: => Unit): this.type = {
slots += ((_: T) => slot)
this
}
def map[R](f: T => R): Signal[R] = {
val out = new Signal[R]
this.connect(v => out.apply(f(v)))
out
}
def flatMap[R](f: T => Signal[R]): Signal[R] = {
val out = new Signal[R]
this.connect { v =>
f(v).connect(v2 => out.apply(v2))
}
out
}
def filter(f: T => Boolean): Signal[T] = {
val out = new Signal[T]
this.connect(v => if (f(v)) out.apply(v))
out
}
def unit: Signal[Unit] = {
map(_ => ())
}
}
object Signal {
def apply[T]: Signal[T] = new Signal[T]
implicit def toSignal(s: Signal.type): Signal[Unit] = Signal.apply[Unit]
implicit class BindingImpl[T](signal: Signal[T => Unit]) {
def option: Option[T] = {
var out: Option[T] = None
signal.apply(v => out = Some(v))
out
}
def get: T = {
option.getOrElse(throw new NoSuchElementException(s"No data bound to Getter/Signal"))
}
def bind(f: => T): Signal[T => Unit] = {
signal.connect(_(f))
signal
}
def apply(): T = get
def onChange(action: T => Unit)(implicit actor: Actor): Signal[T => Unit] = {
actor.addAction(ActionOnChange(get)(action))
signal
}
}
implicit def binding2Value[T](signal: Signal[T => Unit]): T = signal.get
}
object Binding {
def apply[T]: Signal[T => Unit] = new Signal[T => Unit]
}
| GiGurra/glasciia | glasciia-core/src/main/scala/com/github/gigurra/glasciia/Signal.scala | Scala | mit | 1,925 |
import collection.mutable.UnrolledBuffer
object Test {
def main(args: Array[String]): Unit = {
val buf = UnrolledBuffer(1 to 50*)
val dub = buf ++ buf
println(dub)
}
}
| dotty-staging/dotty | tests/run/t5867.scala | Scala | apache-2.0 | 190 |
package mf2
import java.util.Random
import org.jblas.DoubleMatrix
import utilities.SparseMatrix
import utilities.Vector
import OptimizerType._
import CoordinateDescent._
class LocalModel (val factorsR: Array[Array[Float]], val factorsC: Array[Array[Float]],
val precsR: Array[Array[Float]], val precsC: Array[Array[Float]],
val lagsR: Array[Array[Float]], val lagsC: Array[Array[Float]],
val gammaR: Array[Float], val gammaC: Array[Float], var numIter: Int)
extends Serializable {
def getSE(data: SparseMatrix): Double = data.getSE(factorsR, factorsC, true)
def getRMSE(data: SparseMatrix): Double = {
math.sqrt(data.getSE(factorsR, factorsC, true)/data.col_idx.length)
}
def getStatsR(map: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
LocalModel.getStats(factorsR, lagsR, gammaR, map, multicore)
}
def getStatsR(map: Array[Int], ptr: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
LocalModel.getStats(factorsR, lagsR, gammaR, map, ptr, multicore)
}
def getStatsC(map: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
LocalModel.getStats(factorsC, lagsC, gammaC, map, multicore)
}
def getStatsC(map: Array[Int], ptr: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
LocalModel.getStats(factorsC, lagsC, gammaC, map, ptr, multicore)
}
def updateLagPriorsR(priorsR: Array[Array[Float]], multicore: Boolean): LocalModel = {
LocalModel.updateLagPriors(factorsR, lagsR, priorsR, multicore)
this
}
def updateLagPriorsC(priorsC: Array[Array[Float]], multicore: Boolean): LocalModel = {
LocalModel.updateLagPriors(factorsC, lagsC, priorsC, multicore)
this
}
def train(data: SparseMatrix, optType: OptimizerType, maxIter: Int, stopCrt: Float,
isVB: Boolean, emBayes: Boolean, weightedReg: Boolean, multicore: Boolean,
priorsR: Array[Array[Float]], priorsC: Array[Array[Float]])
: LocalModel = {
numIter = optType match {
case CD => runCD(data, maxIter, stopCrt, isVB, weightedReg, multicore,
priorsR, factorsR, precsR, gammaR, priorsC, factorsC, precsC, gammaC)
case CDPP => runCDPP(data, maxIter, 1, stopCrt, isVB, weightedReg, multicore,
priorsR, factorsR, precsR, gammaR, priorsC, factorsC, precsC, gammaC)
case _ => {System.err.println("only supports CD and CDPP"); 0}
}
if (emBayes) {
if (weightedReg) updateGamma(factorsR, precsR, priorsR, data.row_ptr, gammaR)
else updateGamma(factorsR, precsR, priorsR, gammaR)
if (weightedReg) updateGamma(factorsC, precsC, priorsC, data.col_ptr, gammaC)
else updateGamma(factorsC, precsC, priorsC, gammaC)
}
this
}
}
object LocalModel {
def apply (numFactors: Int, rowMap: Array[Int], colMap: Array[Int],
rowPtr: Array[Int], colPtr: Array[Int], gamma_r_init: Float, gamma_c_init: Float,
ecR: Boolean, ecC: Boolean, isVB: Boolean, weightedReg: Boolean)
: LocalModel = {
def hash(x: Int): Int = {
val r = x ^ (x >>> 20) ^ (x >>> 12)
r ^ (r >>> 7) ^ (r >>> 4)
}
val numRows = rowMap.length
val numCols = colMap.length
val factorsR = Array.ofDim[Float](numFactors, numRows)
var r = 0
val rand = new Random()
while (r < numRows) {
var k = 0
rand.setSeed(rowMap(r))
while (k < numFactors) {
factorsR(k)(r) = 0.1f*(rand.nextFloat-0.5f)
k += 1
}
r += 1
}
val factorsC = Array.ofDim[Float](numFactors, numCols)
val gammaR = Array.fill(numFactors)(gamma_r_init)
val gammaC = Array.fill(numFactors)(gamma_c_init)
val precsR =
if (isVB) {
if (weightedReg) {
val numObs = getNumObs(rowPtr)
Array.tabulate(numFactors, numRows)((k, r) => gammaR(k)*numObs(r))
}
else Array.tabulate(numFactors, numRows)((k, r) => gammaR(k))
}
else null
val precsC =
if (isVB) {
if (weightedReg) {
val numObs = getNumObs(colPtr)
Array.tabulate(numFactors, numCols)((k, c) => gammaC(k)*numObs(c))
}
else Array.tabulate(numFactors, numCols)((k, c) => gammaC(k))
}
else null
val lagsR =
if (ecR) Array.ofDim[Float](numFactors, numRows)
else null
val lagsC =
if (ecC) Array.ofDim[Float](numFactors, numCols)
else null
new LocalModel(factorsR, factorsC, precsR, precsC, lagsR, lagsC, gammaR, gammaC, 0)
}
def toGlobal(factors: Array[Array[Float]], map: Array[Int])
: Array[(Int, Array[Float])] = {
val length = factors(0).length
val results = Array.ofDim[(Int, Array[Float])](length);
var i = 0
while (i < length) {
results(i) = (map(i), factors.map(array => array(i)))
i += 1
}
results
// transformed.sortBy(pair => pair._1)
}
def binarySearch(arr: Array[Int], start: Int, end: Int, target: Int) : Int = {
val pos =
if (start > end) -1
else{
val mid = (start + end)/2
if (arr(mid) > target) binarySearch(arr, start, mid-1, target)
else if (arr(mid) == target) mid
else binarySearch(arr, mid+1, end, target)
}
pos
}
def getStats(factors: Array[Array[Float]], lags: Array[Array[Float]],
gamma: Array[Float], map: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
val length = factors(0).length
val numFactors = factors.length
val isEC = lags != null
val statsR = new Array[(Int, (Vector, Vector))](length)
if (multicore) {
for (r <- (0 until length).par) {
val nume = new Array[Float](numFactors)
val deno = new Array[Float](numFactors)
var k = 0
while (k < numFactors) {
val ga = gamma(k)
nume(k) =
if (isEC) (factors(k)(r)+lags(k)(r))*ga
else factors(k)(r)*ga
deno(k) = ga
k += 1
}
statsR(r) = (map(r), (Vector(nume), Vector(deno)))
}
}
else {
var r = 0
while (r < length) {
val nume = new Array[Float](numFactors)
val deno = new Array[Float](numFactors)
var k = 0
while (k < numFactors) {
val ga = gamma(k)
nume(k) =
if (isEC) (factors(k)(r)+lags(k)(r))*ga
else factors(k)(r)*ga
deno(k) = ga
k += 1
}
statsR(r) = (map(r), (Vector(nume), Vector(deno)))
r += 1
}
}
statsR
} // end of getStats
def getStats(factors: Array[Array[Float]], lags: Array[Array[Float]],
gamma: Array[Float], map: Array[Int], ptr: Array[Int], multicore: Boolean)
: Array[(Int, (Vector, Vector))] = {
val length = factors(0).length
val numFactors = factors.length
val isEC = lags != null
val statsR = new Array[(Int, (Vector, Vector))](length)
if (multicore) {
for (r <- (0 until length).par) {
val nume = new Array[Float](numFactors)
val deno = new Array[Float](numFactors)
val weight = ptr(r+1)-ptr(r)
var k = 0
while (k < numFactors) {
val ga = gamma(k)*weight
nume(k) =
if (isEC) (factors(k)(r)+lags(k)(r))*ga
else factors(k)(r)*ga
deno(k) = ga
k += 1
}
statsR(r) = (map(r), (Vector(nume), Vector(deno)))
}
}
else {
var r = 0
while (r < length) {
val nume = new Array[Float](numFactors)
val deno = new Array[Float](numFactors)
val weight = ptr(r+1)-ptr(r)
var k = 0
while (k < numFactors) {
val ga = gamma(k)*weight
nume(k) =
if (isEC) (factors(k)(r)+lags(k)(r))*ga
else factors(k)(r)*ga
deno(k) = ga
k += 1
}
statsR(r) = (map(r), (Vector(nume), Vector(deno)))
r += 1
}
}
statsR
} // end of getStats
def updateLagPriors(factorsR: Array[Array[Float]], lagsR: Array[Array[Float]],
priorsR: Array[Array[Float]], multicore: Boolean) = {
//update the scaled Lagrangian multipilers
val numFactors = lagsR.length
val numRows = priorsR.length
if (multicore) {
for (r <- (0 until numRows).par) {
if (priorsR(r) != null) {
var k = 0
while (k < numFactors) {
lagsR(k)(r) += factorsR(k)(r) - priorsR(r)(k)
priorsR(r)(k) -= lagsR(k)(r)
k += 1
}
}
}
}
else {
var r = 0
while (r < numRows) {
if (priorsR(r) != null) {
var k = 0
while (k < numFactors) {
lagsR(k)(r) += factorsR(k)(r) - priorsR(r)(k)
priorsR(r)(k) -= lagsR(k)(r)
k += 1
}
}
r += 1
}
}
} //end of updateLagPriors
def getNumObs(ptr: Array[Int]): Array[Int] = {
val length = ptr.length - 1
val numObs = new Array[Int](length)
var l = 0
while (l < length) {
numObs(l) = ptr(l+1) - ptr(l)
l += 1
}
numObs
}
} | XianXing/bdl | src/main/scala/bdl/mf2/LocalModel.scala | Scala | apache-2.0 | 9,146 |
package archery
import scala.collection.mutable.{ArrayBuffer, PriorityQueue}
import scala.math.{min, max}
/**
* Some useful constants that we don't want to hardcode.
*/
object Constants {
// $COVERAGE-OFF$
@inline final val MaxEntries = 50
// $COVERAGE-ON$
}
import Constants._
/**
* Abstract data type that has a geom element.
*
* This generalizes Node[A] (the nodes of the tree) and Entry[A] (the
* values being put in the tree). It functions like a structural type
* (but isn't one, because structural types are slow).
*/
sealed abstract class HasGeom {
def geom: Geom
}
/**
* Abstract data type for nodes of the tree.
*
* There are two types of Node: Branch and Leaf. Confusingly, leaves
* don't actaully hold values, but rather a leaf contains a sequence
* of entries. This design is commmon to RTree implementations and it
* seemed like a good idea to keep the nomenclature the same.
*/
sealed abstract class Node[A] extends HasGeom { self =>
def box: Box
def geom: Geom = box
def children: Vector[HasGeom]
/**
* Put all the entries this node contains (directly or indirectly)
* into a vector. Obviously this could be quite large in the case of
* a root node, so it should not be used for traversals.
*/
def entries: Vector[Entry[A]] = {
val buf = ArrayBuffer.empty[Entry[A]]
def recur(node: Node[A]): Unit = node match {
case Leaf(children, _) =>
buf ++= children
case Branch(children, _) =>
children.foreach(recur)
}
recur(this)
buf.toVector
}
/**
* Returns an iterator over all the entires this node contains
* (directly or indirectly). Since nodes are immutable there is no
* concern over concurrent updates while using the iterator.
*/
def iterator: Iterator[Entry[A]] = this match {
case Leaf(children, _) =>
children.iterator
case Branch(children, _) =>
children.iterator.flatMap(_.iterator)
}
/**
* Method to pretty-print an r-tree.
*
* This method should only be called on small-ish trees! It will
* print one line for every branch, leaf, and entry, so for a tree
* with thousands of entries this will result in a very large
* string!
*/
def pretty: String = {
def prettyRecur(node: Node[A], i: Int, sb: StringBuilder): Unit = {
val pad = " " * i
val a = node.box.area
node match {
case lf @ Leaf(children, box) =>
val pad2 = " " * (i + 1)
sb.append(s"$pad leaf $a $box:\n")
children.foreach { case Entry(pt, value) =>
sb.append(s"$pad2 entry $pt: $value\n")
}
case Branch(children, box) =>
sb.append(s"$pad branch $a $box:\n")
children.foreach(c => prettyRecur(c, i + 1, sb))
}
}
val sb = new StringBuilder
prettyRecur(this, 0, sb)
sb.toString
}
/**
* Insert a new Entry into the tree.
*
* Since this node is immutable, the method will return a
* replacement. There are two possible situations:
*
* 1. We can replace this node with a new node. This is the common
* case.
*
* 2. This node was already "full", so we can't just replace it with
* a single node. Instead, we will split this node into
* (presumably) two new nodes, and return a vector of them.
*
* The reason we are using vector here is that it simplifies the
* implementation, and also because eventually we may support bulk
* insertion, where more than two nodes might be returned.
*/
def insert(entry: Entry[A]): Either[Vector[Node[A]], Node[A]] = {
this match {
case Leaf(children, box) =>
val cs = children :+ entry
if (cs.length <= MaxEntries) {
Right(Leaf(cs, box.expand(entry.geom)))
} else {
Left(Node.splitLeaf(cs))
}
case Branch(children, box) =>
assert(children.length > 0)
// here we need to find the "best" child to put the entry
// into. we define that as the child that needs to add the
// least amount of area to its own bounding box to accomodate
// the new entry.
//
// the results are "node", the node to add to, and "n", the
// position of that node in our vector.
val pt = entry.geom
var node = children(0)
var n = 0
var area = node.box.expandArea(pt)
var i = 1
while (i < children.length) {
val curr = children(i)
val a = curr.box.expandArea(pt)
if (a < area) {
area = a
n = i
node = curr
}
i += 1
}
// now we perform the actual insertion into the node. as
// stated above, that node will either return a single new
// node (Right) or a vector of new nodes (Left).
node.insert(entry) match {
case Left(rs) =>
val cs = children.take(n) ++ children.drop(n + 1) ++ rs
if (cs.length <= MaxEntries) {
val b = rs.foldLeft(box)(_ expand _.box)
Right(Branch(cs, b))
} else {
Left(Node.splitBranch(cs))
}
case Right(r) =>
val cs = children.updated(n, r)
if (cs.length <= MaxEntries) {
Right(Branch(children.updated(n, r), box.expand(r.box)))
} else {
Left(Node.splitBranch(cs))
}
}
}
}
/**
* Determine if we need to try contracting our bounding box based on
* the loss of 'geom'. If so, use the by-name parameter 'regen' to
* recalculate. Since regen is by-name, it won't be evaluated unless
* we need it.
*/
def contract(gone: Geom, regen: => Box): Box =
if (box.wraps(gone)) box else regen
/**
* Remove this entry from the tree.
*
* The implementations for Leaf and Branch are somewhat involved, so
* they are defined in each subclass.
*
* The return value can be understood as follows:
*
* 1. None: the entry was not found in this node. This is the most
* common case.
*
* 2. Some((es, None)): the entry was found, and this node was
* removed (meaning after removal it had too few other
* children). The 'es' vector are entries that need to be readded
* to the RTree.
*
* 3. Some((es, Some(node))): the entry was found, and this node
* should be replaced by 'node'. Like above, the 'es' vector
* contains entries that should be readded.
*
* Because adding entries may require rebalancing the tree, we defer
* the insertions until after the removal is complete and then readd
* them in RTree. While 'es' will usually be quite small, it's
* possible that in some cases it may be very large.
*/
def remove(entry: Entry[A]): Option[(Joined[Entry[A]], Option[Node[A]])]
/**
* Search for all entries contained in the search space.
*
* Points on the boundary of the search space will be included.
*/
def search(space: Box, f: Entry[A] => Boolean): Seq[Entry[A]] =
genericSearch(space, space.contains, f)
/**
* Search for all entries intersecting the search space.
*
* Points on the boundary of the search space will be included.
*/
def searchIntersection(space: Box, f: Entry[A] => Boolean): Seq[Entry[A]] =
genericSearch(space, space.intersects, f)
/**
* Search for all entries given a search space, spatial checking
* function, and criteria function.
*
* This method abstracts search and searchIntersection, where the
* `check` function is either space.contains or space.intersects,
* respectively.
*/
def genericSearch(space: Box, check: Geom => Boolean, f: Entry[A] => Boolean): Seq[Entry[A]] =
if (!space.isFinite) Nil else {
val buf = ArrayBuffer.empty[Entry[A]]
def recur(node: Node[A]): Unit = node match {
case Leaf(children, box) =>
children.foreach { c =>
if (check(c.geom) && f(c)) buf.append(c)
}
case Branch(children, box) =>
children.foreach { c =>
if (space.intersects(box)) recur(c)
}
}
if (space.intersects(box)) recur(this)
buf
}
/**
* Combine the results of a search(space) into a single result.
*/
def foldSearch[B](space: Box, init: B)(f: (B, Entry[A]) => B): B =
searchIterator(space, _ => true).foldLeft(init)(f)
/**
* Return an iterator over the results of a search.
*
* This produces the same elements as search(space, f).iterator(),
* without having to build an entire vector at once.
*/
def searchIterator(space: Box, f: Entry[A] => Boolean): Iterator[Entry[A]] =
if (children.isEmpty || !box.intersects(space)) {
Iterator.empty
} else {
this match {
case Leaf(cs, _) =>
cs.iterator.filter(c => space.contains(c.geom) && f(c))
case Branch(cs, _) =>
cs.iterator.flatMap(c => c.searchIterator(space, f))
}
}
/**
* Find the closest entry to `pt` that is within `d0`.
*
* This method will either return Some((distance, entry)) or None.
*/
def nearest(pt: Point, d0: Double): Option[(Double, Entry[A])] = {
var dist: Double = d0
var result: Option[(Double, Entry[A])] = None
this match {
case Leaf(children, box) =>
children.foreach { entry =>
val d = entry.geom.distance(pt)
if (d < dist) {
dist = d
result = Some((d, entry))
}
}
case Branch(children, box) =>
val cs = children.map(node => (node.box.distance(pt), node)).sortBy(_._1)
cs.foreach { case (d, node) =>
if (d >= dist) return result //scalastyle:ignore
node.nearest(pt, dist) match {
case some @ Some((d, _)) =>
dist = d
result = some
case None =>
}
}
}
result
}
/**
* Find the closest `k` entries to `pt` that are within `d0`, and
* add them to the given priority queue `pq`.
*
* This method returns the distance of the farthest entry that is
* still included.
*/
def nearestK(pt: Point, k: Int, d0: Double, pq: PriorityQueue[(Double, Entry[A])]): Double = {
var dist: Double = d0
this match {
case Leaf(children, box) =>
children.foreach { entry =>
val d = entry.geom.distance(pt)
if (d < dist) {
pq += ((d, entry))
if (pq.size > k) {
pq.dequeue
dist = pq.head._1
}
}
}
case Branch(children, box) =>
val cs = children.map(node => (node.box.distance(pt), node)).sortBy(_._1)
cs.foreach { case (d, node) =>
if (d >= dist) return dist //scalastyle:ignore
dist = node.nearestK(pt, k, dist, pq)
}
}
dist
}
/**
* Count the number of entries contained within `space`.
*/
def count(space: Box): Int =
if (!space.isFinite) 0 else {
def recur(node: Node[A]): Int = node match {
case Leaf(children, box) =>
var n = 0
var i = 0
while (i < children.length) {
if (space.contains(children(i).geom)) n += 1
i += 1
}
n
case Branch(children, box) =>
var n = 0
var i = 0
while (i < children.length) {
val c = children(i)
if (space.intersects(c.box)) n += recur(c)
i += 1
}
n
}
if (space.intersects(box)) recur(this) else 0
}
/**
* Determine if entry is contained in the tree.
*
* This method depends upon reasonable equality for A. It can only
* match an Entry(pt, x) if entry.value == x.value.
*/
def contains(entry: Entry[A]): Boolean =
searchIterator(entry.geom.toBox, _ == entry).nonEmpty
/**
* Transform each entry's value using the given `f`, returning a new
* node.
*/
def map[B](f: A => B): Node[B] = this match {
case Leaf(cs, box) =>
Leaf(cs.map(e => Entry(e.geom, f(e.value))), box)
case Branch(cs, box) =>
Branch(cs.map(_.map(f)), box)
}
}
case class Branch[A](children: Vector[Node[A]], box: Box) extends Node[A] {
def remove(entry: Entry[A]): Option[(Joined[Entry[A]], Option[Node[A]])] = {
def loop(i: Int): Option[(Joined[Entry[A]], Option[Node[A]])] =
if (i < children.length) {
val child = children(i)
child.remove(entry) match {
case None =>
loop(i + 1)
case Some((es, None)) =>
if (children.length == 1) {
Some((es, None))
} else if (children.length == 2) {
Some((Joined.wrap(children(1 - i).entries) ++ es, None))
} else {
val cs = children.take(i) ++ children.drop(i + 1)
val b = contract(child.geom, cs.foldLeft(Box.empty)(_ expand _.geom))
Some((es, Some(Branch(cs, b))))
}
case Some((es, Some(node))) =>
val cs = children.updated(i, node)
val b = contract(child.geom, cs.foldLeft(Box.empty)(_ expand _.geom))
Some((es, Some(Branch(cs, b))))
}
} else {
None
}
if (!box.contains(entry.geom)) None else loop(0)
}
}
case class Leaf[A](children: Vector[Entry[A]], box: Box) extends Node[A] {
def remove(entry: Entry[A]): Option[(Joined[Entry[A]], Option[Node[A]])] = {
if (!box.contains(entry.geom)) return None //scalastyle:ignore
val i = children.indexOf(entry)
if (i < 0) {
None
} else if (children.length == 1) {
Some((Joined.empty[Entry[A]], None))
} else if (children.length == 2) {
Some((Joined(children(1 - i)), None))
} else {
val cs = children.take(i) ++ children.drop(i + 1)
val b = contract(entry.geom, cs.foldLeft(Box.empty)(_ expand _.geom))
Some((Joined.empty[Entry[A]], Some(Leaf(cs, b))))
}
}
}
/**
* Represents a point with a value.
*
* We frequently use value.== so it's important that A have a
* reasonable equality definition. Otherwise things like remove and
* contains may not work very well.
*/
case class Entry[A](geom: Geom, value: A) extends HasGeom
object Node {
def empty[A]: Node[A] = Leaf(Vector.empty, Box.empty)
/**
* Splits the children of a leaf node.
*
* See splitter for more information.
*/
def splitLeaf[A](children: Vector[Entry[A]]): Vector[Leaf[A]] = {
val ((es1, box1), (es2, box2)) = splitter(children)
Vector(Leaf(es1, box1), Leaf(es2, box2))
}
/**
* Splits the children of a branch node.
*
* See splitter for more information.
*/
def splitBranch[A](children: Vector[Node[A]]): Vector[Branch[A]] = {
val ((ns1, box1), (ns2, box2)) = splitter(children)
Vector(Branch(ns1, box1), Branch(ns2, box2))
}
/**
* Splits a collection of members into two new collections, grouped
* according to the rtree algorithm.
*
* The results (a vector and a bounding box) will be used to create
* new nodes.
*
* The goal is to minimize the area and overlap of the pairs'
* bounding boxes. We are using a linear seeding strategy since it
* is simple and has worked well for us in the past.
*/
def splitter[M <: HasGeom](children: Vector[M]): ((Vector[M], Box), (Vector[M], Box)) = {
val buf = ArrayBuffer(children: _*)
val (seed1, seed2) = pickSeeds(buf)
var box1: Box = seed1.geom.toBox
var box2: Box = seed2.geom.toBox
val nodes1 = ArrayBuffer(seed1)
val nodes2 = ArrayBuffer(seed2)
def add1(node: M): Unit = { nodes1 += node; box1 = box1.expand(node.geom) }
def add2(node: M): Unit = { nodes2 += node; box2 = box2.expand(node.geom) }
while (buf.nonEmpty) {
if (nodes1.length >= 2 && nodes2.length + buf.length <= 2) {
// We should put the remaining buffer all in one bucket.
nodes2 ++= buf
box2 = buf.foldLeft(box2)(_ expand _.geom)
buf.clear()
} else if (nodes2.length >= 2 && nodes1.length + buf.length <= 2) {
// We should put the remaining buffer all in the other bucket.
nodes1 ++= buf
box1 = buf.foldLeft(box1)(_ expand _.geom)
buf.clear()
} else {
// We want to find the bucket whose bounding box requires the
// smallest increase to contain this member. If both are the
// same, we look for the bucket with the smallest area. If
// those are the same, we flip a coin.
val node = buf.remove(buf.length - 1)
val e1 = box1.expandArea(node.geom)
val e2 = box2.expandArea(node.geom)
if (e1 < e2) {
add1(node)
} else if (e2 < e1) {
add2(node)
} else {
val b1 = box1.expand(node.geom)
val b2 = box2.expand(node.geom)
val a1 = b1.area
val a2 = b2.area
if (a1 < a2) {
add1(node)
} else if (a2 < a1) {
add2(node)
} else if (Math.random() > 0.5) {
add1(node)
} else {
add2(node)
}
}
}
}
((nodes1.toVector, box1), (nodes2.toVector, box2))
}
/**
* Given a collection of members, we want to find the two that have
* the greatest distance from each other in some dimension. This is
* the "linear" strategy.
*
* Other strategies (like finding the greatest distance in both
* dimensions) might give better seeds but would be slower. This
* seems to work OK for now.
*/
def pickSeeds[M <: HasGeom](nodes: ArrayBuffer[M]): (M, M) = {
// find the two geometries that have the most space between them
// in this particular dimension. the sequence is (lower, upper) points
def handleDimension(pairs: IndexedSeq[(Float, Float)]): (Float, Int, Int) = {
val (a0, b0) = pairs(0)
var amin = a0 // min lower coord
var amax = a0 // max lower coord
var bmin = b0 // min upper coord
var bmax = b0 // max upper coord
var left = 0
var right = 0
var i = 1
while (i < pairs.length) {
val (a, b) = pairs(i)
if (a < amin) { amin = a }
if (a > amax) { amax = a; right = i }
if (b > bmax) { bmax = b }
if (b < bmin) { bmin = b; left = i }
i += 1
}
if (left != right) ((bmin - amax) / (bmax - amin), left, right) else (0.0F, 0, 1)
}
// get back the maximum distance in each dimension, and the coords
val (w1, i1, j1) = handleDimension(nodes.map(n => (n.geom.x, n.geom.x2)))
val (w2, i2, j2) = handleDimension(nodes.map(n => (n.geom.y, n.geom.y2)))
// figure out which dimension "won"
val (i, j) = if (w1 > w2) (i1, j1) else (i2, j2)
// remove these nodes and return them
// make sure to remove the larger index first.
val (a, b) = if (i > j) (i, j) else (j, i)
val node1 = nodes.remove(a)
val node2 = nodes.remove(b)
(node1, node2)
}
}
| arunma/archery | core/src/main/scala/archery/Node.scala | Scala | mit | 19,023 |
package com.mohiva.play.silhouette
import scala.collection.immutable.Map
private[silhouette] object ScalaCompat {
implicit class MapOps[K, V](val map: Map[K, V]) extends AnyVal {
def transformValues[W](f: V => W): Map[K, W] = map.mapValues(f)
}
val JavaConverters = scala.collection.JavaConverters
}
| mohiva/play-silhouette | silhouette/app-2.13-/com/mohiva/play/silhouette/ScalaCompat.scala | Scala | apache-2.0 | 314 |
package chapter16
/**
* 16.4 리스트 기본 연산
*
* 리스트의 모든 연산은 다음 세가지를 가지고 표현할 수 있다.
* - head는 리스트의 첫번째 원소 반환
* - tail은 어떤 리스트의 첫번째 원소를 제외한 나머지 원소로 이뤄짐
* - isEmpty는 리스트가 비어 있다면 true를 반환
* 이러한 연산은 모두 List 클래스의 메소드다.
*/
object c16_i04 extends App {
// 빈 리스트에 head, tail 적용하면 익셉션
// Nil.head // head of empty list
/*
* 수의 리스트를 오름차순으로 정렬하는 방법 중 간단한건 삽입 정렬(insertion sort)이다.
*/
var cnt = 0
def isort(xs: List[Int]): List[Int] = {
cnt+=1
println("isort",cnt,xs)
if (xs.isEmpty) Nil
else insert(cnt,xs.head, isort(xs.tail))
}
def insert(cnt: Int, x: Int, xs: List[Int]): List[Int] = {
println(cnt,x,xs)
if (xs.isEmpty || x <= xs.head) {print(" < " + x + " >< "+xs+" >");x :: xs}
else {print(" << " + xs.head+" >><< "+x+" >>");xs.head :: insert(cnt,x, xs.tail)}
}
val list = List(5,3,4,2,7,8,2,9,1)
println("=>"+isort(list))
} | seraekim/srkim-lang-scala | src/main/java/chapter16/c16_i04.scala | Scala | bsd-3-clause | 1,154 |
package es.weso.main
import org.rogach.scallop._
import org.rogach.scallop.exceptions._
import es.weso.shacl._
class MainOpts(
arguments: Array[String],
onError: (Throwable, Scallop) => Nothing) extends ScallopConf(arguments) {
banner("""| ShExperiments
| Options:
|""".stripMargin)
footer("Enjoy!")
val data = opt[String]("data",
short = 'd',
required = true,
descr = "Data to validate")
/* val dataFormat = opt[String]("data-format",
noshort = true,
required = false,
default = Some("TURTLE"),
descr = "Data to validate") */
val schema = opt[String]("schema",
short = 'x',
required = true,
descr = "Schema")
val processor = opt[String]("processor",
short = 'p',
default = Some(Processors.default),
required = true,
descr = "Processor. Possible processors: " + Processors.toString,
validate = (x => Processors.available(x))
)
/* val schemaFormat = opt[String]("schema-format",
noshort = true,
required = false,
default = Some("TURTLE"),
descr = "Data to validate") */
val time = toggle("time",
prefix = "no-",
default = Some(false),
descrYes = "show time",
descrNo = "don't show time",
short = 't')
override protected def onError(e: Throwable) = onError(e, builder)
}
| labra/shexperiments | src/main/scala/es/weso/main/MainOpts.scala | Scala | mit | 1,345 |
package com.norbitltd.spoiwo.model
import com.norbitltd.spoiwo.model.enums.CellBorderStyle
object CellBorders {
def apply(leftStyle: CellBorderStyle = null, leftColor: Color = null,
topStyle: CellBorderStyle = null, topColor: Color = null,
rightStyle: CellBorderStyle = null, rightColor: Color = null,
bottomStyle: CellBorderStyle = null, bottomColor: Color = null): CellBorders =
CellBorders(
Option(leftStyle), Option(leftColor),
Option(topStyle), Option(topColor),
Option(rightStyle), Option(rightColor),
Option(bottomStyle), Option(bottomColor)
)
}
case class CellBorders(leftStyle: Option[CellBorderStyle], leftColor: Option[Color],
topStyle: Option[CellBorderStyle], topColor: Option[Color],
rightStyle: Option[CellBorderStyle], rightColor: Option[Color],
bottomStyle: Option[CellBorderStyle], bottomColor: Option[Color]) {
def withLeftStyle(leftStyle: CellBorderStyle) =
copy(leftStyle = Option(leftStyle))
def withLeftColor(leftColor: Color) =
copy(leftColor = Option(leftColor))
def withTopStyle(topStyle: CellBorderStyle) =
copy(topStyle = Option(topStyle))
def withTopColor(topColor: Color) =
copy(topColor = Option(topColor))
def withRightStyle(rightStyle: CellBorderStyle) =
copy(rightStyle = Option(rightStyle))
def withRightColor(rightColor: Color) =
copy(rightColor = Option(rightColor))
def withBottomStyle(bottomStyle: CellBorderStyle) =
copy(bottomStyle = Option(bottomStyle))
def withBottomColor(bottomColor: Color) =
copy(bottomColor = Option(bottomColor))
def withStyle(style: CellBorderStyle) = {
val styleOption = Option(style)
copy(leftStyle = styleOption, topStyle = styleOption, rightStyle = styleOption, bottomStyle = styleOption)
}
def withColor(color: Color) = {
val colorOption = Option(color)
copy(leftColor = colorOption, topColor = colorOption, rightColor = colorOption, bottomColor = colorOption)
}
override def toString = "Cell Borders[" + List(
leftStyle.map("left style" + _),
leftColor.map("left color" + _),
topStyle.map("top style" + _),
topColor.map("top color" + _),
rightStyle.map("right style" + _),
rightColor.map("right color" + _),
bottomStyle.map("bottom style" + _),
bottomColor.map("bottom color" + _)
)
}
| intracer/spoiwo | src/main/scala/com/norbitltd/spoiwo/model/CellBorders.scala | Scala | mit | 2,421 |
package bootstrap
import javax.inject.Inject
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.{Random, Try}
import com.google.inject.AbstractModule
import models.PhonebookEntry
import play.{Configuration, Environment}
import services.Phonebook
/** Module used to fill phonebook with random entries once on application start. */
class PhonebookDatabaseModule(environment: Environment,
configuration: Configuration) extends AbstractModule {
override def configure(): Unit = {
// Random entries are unwieldy for testing, so tests will create their own module with fixed DB values.
if (!environment.isTest) {
bind(classOf[RandomEntriesDBFiller]).asEagerSingleton()
}
}
}
/** Populates DB with a set of random entries. */
private[bootstrap] class RandomEntriesDBFiller @Inject()(phonebook: Phonebook)
(implicit executionContext: ExecutionContext) {
val randomEntriesCount = 20
Await.result(phonebook.count().map(entriesCount =>
if (entriesCount == 0) {
populatePhonebook(randomEntriesCount)
}), Duration.Inf)
lazy val names = List(
"Anatoliy", "Artemiy", "Bogdan ", "Bratislav", "Eduard", "Fyodor", "Gennadiy", "Gerasim", "Gleb",
"Ignatiy", "Igor", "Ilya", "Jaromir", "Konstantin", "Lazar", "Leontiy", "Makar", "Nikita", "Panteley",
"Timofey", "Vasiliy", "Viktor", "Vitaliy", "Vladimir", "Vyacheslav", "Yaroslav", "Yefrem", "Yegor"
)
lazy val surnames = List("Aleyev", "Beriya", "Bukov", "Bulgakov", "Gagarin", "Savvin", "Madulin", "Lipov",
"Kandinsky", "Klimov", "Kurpatov", "Ipatyev", "Konnikov", "Zimin", "Tabakov", "Varushkin",
"Nosachyov", "Preobrazhensky", "Sokolov", "Skorobogatov", "Kudryashov", "Strekalov", "Schastlivtsev", "Russkikh",
"Shubin", "Kutuzov", "Kustov", "Vinokurov", "Khantsev", "Golubev")
/**
* Generates random phonebook entries and inserts them into DB.
*
* @param generatedEntriesCount Number of entries to generate.
* @return Affected rows count.
*/
def populatePhonebook(generatedEntriesCount: Int): Future[Try[Int]] = {
require(generatedEntriesCount >= 0)
val generatedEntries = for (_ <- 0 until generatedEntriesCount) yield randomPhonebookEntry()
phonebook.insert(generatedEntries)
}
/**
* Generates a single random phonebook entry.
*
* @return Random phonebook entry.
*/
def randomPhonebookEntry(): PhonebookEntry = {
def randNumber(ra: Range): Int = ra.head + Random.nextInt(ra.end - ra.head)
def randElem[A](xs: List[A]): A = xs.apply(Random.nextInt(xs.size))
def randPhoneNumber(): String = {
s"+7 ${randNumber(100 until 1000)} " +
s"${randNumber(100 until 1000)}-${randNumber(10 until 100)}-${randNumber(10 until 100)}"
}
def randName(): String = List(names, surnames).map(randElem).mkString(" ")
PhonebookEntry(randName(), randPhoneNumber())
}
} | EnoughTea/play-phonebook | app/bootstrap/PhonebookDatabaseModule.scala | Scala | mit | 3,017 |
package org.scaladebugger.api.lowlevel.requests.filters
import org.scaladebugger.api.lowlevel.requests.JDIRequestProcessor
import org.scaladebugger.api.lowlevel.requests.filters.processors.ClassExclusionFilterProcessor
/**
* Represents a filter used to limit requests to any class not specified
* by this filter. Requests are checked by verifying the class containing the
* current method being invoked.
*
* @note Only used by AccessWatchpointRequest, ClassPrepareRequest,
* ClassUnloadRequest, ExceptionRequest, MethodEntryRequest,
* MethodExitRequest, ModificationWatchpointRequest,
* MonitorContendedEnteredRequest, MonitorContendedEnterRequest,
* MonitorWaitedRequest, MonitorWaitRequest, and StepRequest.
*
* @param classPattern Classes whose names do not match this pattern will be
* excluded, can only take normal characters and wildcard
* "*", meaning "*.Foo" or "java.*"
*/
case class ClassExclusionFilter(classPattern: String) extends JDIRequestFilter {
/**
* Creates a new JDI request processor based on this filter.
*
* @return The new JDI request processor instance
*/
override def toProcessor: JDIRequestFilterProcessor =
new ClassExclusionFilterProcessor(this)
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/requests/filters/ClassExclusionFilter.scala | Scala | apache-2.0 | 1,278 |
package views.html
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object mail_detail extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template3[PMessage,String,String,play.api.templates.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(mail: PMessage, username: String, userId: String):play.api.templates.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.52*/("""
<html>
<head>
<link href='"""),_display_(Seq[Any](/*5.22*/routes/*5.28*/.Assets.at("css/bootstrap.min.css"))),format.raw/*5.63*/("""' rel="stylesheet"/>
<link href='"""),_display_(Seq[Any](/*6.22*/routes/*6.28*/.Assets.at("css/header.css"))),format.raw/*6.56*/("""' rel="stylesheet"/>
<script src='"""),_display_(Seq[Any](/*7.23*/routes/*7.29*/.Assets.at("js/jquery-1.1.js"))),format.raw/*7.59*/("""'></script>
</head>
<body>
<div id="header">"""),_display_(Seq[Any](/*10.27*/fixed/*10.32*/.header(username, Long.parseLong(userId), Long.parseLong(userId)))),format.raw/*10.97*/("""</div>
<div class="container">
<h1>Mail Detail</h1>
<p>From: """),_display_(Seq[Any](/*13.23*/mail/*13.27*/.getFromUserMail)),format.raw/*13.43*/("""</p>
<p>To: """),_display_(Seq[Any](/*14.21*/mail/*14.25*/.getToUserMail)),format.raw/*14.39*/("""</p>
<p>Title: """),_display_(Seq[Any](/*15.24*/mail/*15.28*/.getMailTitle)),format.raw/*15.41*/("""</p>
<p>Content: """),_display_(Seq[Any](/*16.26*/mail/*16.30*/.getMailContent)),format.raw/*16.45*/("""</p>
<p>Date: """),_display_(Seq[Any](/*17.23*/mail/*17.27*/.getMailDate)),format.raw/*17.39*/("""</p>
<h2>Reply</h2>
"""),_display_(Seq[Any](/*20.14*/fixed/*20.19*/.write_message(mail.getFromUserMail))),format.raw/*20.55*/("""
</div>
</body>
</html>
"""))}
}
def render(mail:PMessage,username:String,userId:String): play.api.templates.HtmlFormat.Appendable = apply(mail,username,userId)
def f:((PMessage,String,String) => play.api.templates.HtmlFormat.Appendable) = (mail,username,userId) => apply(mail,username,userId)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Thu Apr 07 14:52:40 PDT 2016
SOURCE: /home/dimitris/CMU/SA&D/Project/ApacheCMDA-Frontend/app/views/mail_detail.scala.html
HASH: 7110c46759fc0d4b0c517b9f7790e0eec0228825
MATRIX: 796->1|940->51|1016->92|1030->98|1086->133|1163->175|1177->181|1226->209|1304->252|1318->258|1369->288|1466->349|1480->354|1567->419|1697->513|1710->517|1748->533|1809->558|1822->562|1858->576|1922->604|1935->608|1970->621|2036->651|2049->655|2086->670|2149->697|2162->701|2196->713|2278->759|2292->764|2350->800
LINES: 26->1|29->1|33->5|33->5|33->5|34->6|34->6|34->6|35->7|35->7|35->7|38->10|38->10|38->10|41->13|41->13|41->13|42->14|42->14|42->14|43->15|43->15|43->15|44->16|44->16|44->16|45->17|45->17|45->17|48->20|48->20|48->20
-- GENERATED --
*/
| dsarlis/SAD-Spring-2016-Project-Team4 | ApacheCMDA-Frontend/target/scala-2.10/src_managed/main/views/html/mail_detail.template.scala | Scala | mit | 3,662 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.text
import java.nio.charset.{Charset, StandardCharsets}
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, CompressionCodecs}
/**
* Options for the Text data source.
*/
private[text] class TextOptions(@transient private val parameters: CaseInsensitiveMap[String])
extends Serializable {
import TextOptions._
def this(parameters: Map[String, String]) = this(CaseInsensitiveMap(parameters))
/**
* Compression codec to use.
*/
val compressionCodec = parameters.get(COMPRESSION).map(CompressionCodecs.getCodecClassName)
/**
* wholetext - If true, read a file as a single row and not split by "\\n".
*/
val wholeText = parameters.getOrElse(WHOLETEXT, "false").toBoolean
val encoding: Option[String] = parameters.get(ENCODING)
val lineSeparator: Option[String] = parameters.get(LINE_SEPARATOR).map { lineSep =>
require(lineSep.nonEmpty, s"'$LINE_SEPARATOR' cannot be an empty string.")
lineSep
}
// Note that the option 'lineSep' uses a different default value in read and write.
val lineSeparatorInRead: Option[Array[Byte]] = lineSeparator.map { lineSep =>
lineSep.getBytes(encoding.map(Charset.forName(_)).getOrElse(StandardCharsets.UTF_8))
}
val lineSeparatorInWrite: Array[Byte] =
lineSeparatorInRead.getOrElse("\\n".getBytes(StandardCharsets.UTF_8))
}
private[datasources] object TextOptions {
val COMPRESSION = "compression"
val WHOLETEXT = "wholetext"
val ENCODING = "encoding"
val LINE_SEPARATOR = "lineSep"
}
| ahnqirage/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/text/TextOptions.scala | Scala | apache-2.0 | 2,354 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.killrweather
class WeatherSettingsSpec extends AbstractSpec with TestFileHelper {
val settings = new WeatherSettings()
import settings._
"WeatherSettings" must {
"get the expected configurations" in {
fileFeed(DataLoadPath, DataFileExtension).nonEmpty should be (true)
}
}
}
| chbatey/killrweather | killrweather-app/src/test/scala/com/datastax/killrweather/WeatherSettingsSpec.scala | Scala | apache-2.0 | 1,119 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_10.scalatest2_2_1
import com.intellij.psi.search.ProjectScope
import org.jetbrains.plugins.scala.testingSupport.util.scalatest.ScalaTestFailureLocationFilter
/**
* @author Roman.Shein
* @since 31.01.2015.
*/
class GoToFailureLocationTest extends Scalatest2_10_2_2_1_Base {
def testFailureLocationHyperlink(): Unit = {
addFileToProject("FailureLocationTest.scala",
"""
|import org.scalatest._
|
|class FailureLocationTest extends FlatSpec with GivenWhenThen {
|
| "failed test" should "fail" in {
| fail
| }
|}
|
""".stripMargin
)
val project = getProject
val projectScope = ProjectScope.getProjectScope(project)
val filter = new ScalaTestFailureLocationFilter(projectScope)
val errorLocationString = "ScalaTestFailureLocation: FailureLocationTest at (FailureLocationTest.scala:6)"
val filterRes = filter.applyFilter(errorLocationString, errorLocationString.length)
assert(filterRes != null)
assert(filterRes.getFirstHyperlinkInfo != null)
}
}
| advancedxy/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_10/scalatest2_2_1/GoToFailureLocationTest.scala | Scala | apache-2.0 | 1,188 |
package messages.parser
import messages.parser.MessageConstants._
class MessageTranslator {
def translate(operation: Operation): TranslatedMessage = operation match {
case Send(content, recipients) => evaluateSend(content, recipients)
}
private def evaluateSend(content: Seq[Content], to: To): TranslatedMessage = {
val recipients = evaluateRecipients(to)
val (message, attachments) = evaluateContent(content)
TranslatedMessage(message, attachments, recipients)
}
private def evaluateRecipients(to: To): Seq[String] = to.recipients.map(_.value)
private def evaluateContent(content: Seq[Content]): (String, Seq[String]) = {
val message = content
.collect {
case Content(entity, MessageOperator) => entity.value
}
.mkString("\n\n")
val attachments = content.collect {
case Content(entity, AttachmentOperator) => entity.value
}
(message, attachments)
}
}
case class TranslatedMessage(content: String, attachments: Seq[String], recipients: Seq[String])
| lymr/fun-chat | fun-chat-server/src/main/scala/messages/parser/MessageTranslator.scala | Scala | mit | 1,048 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.builder
import com.twitter.algebird.{ Monoid, Semigroup }
import com.twitter.bijection.Codec
import com.twitter.chill.IKryoRegistrar
import com.twitter.chill.java.IterableRegistrar
import com.twitter.storehaus.algebra.MergeableStore
import com.twitter.storehaus.ReadableStore
import com.twitter.summingbird._
import com.twitter.summingbird.batch.{ BatchID, Batcher }
import com.twitter.summingbird.scalding.{ Scalding, ScaldingEnv }
import com.twitter.summingbird.scalding.batch.BatchedStore
import com.twitter.summingbird.service.CompoundService
import com.twitter.summingbird.sink.{ CompoundSink, BatchedSinkFromOffline }
import com.twitter.summingbird.source.EventSource
import com.twitter.summingbird.store.CompoundStore
import com.twitter.summingbird.online._
import com.twitter.summingbird.storm.{
Storm,
StormEnv,
StormSink
}
import java.io.Serializable
import java.util.Date
/**
* The (deprecated) Summingbird builder API builds up a single
* MapReduce job using a SourceBuilder. After any number of calls to
* flatMap, leftJoin, filter, merge, etc, the user calls
* "groupAndSumTo", equivalent to "sumByKey" in the Producer
* API. This call converts the SourceBuilder into a CompletedBuilder
* and prevents and future flatMap operations.
*
* @author Oscar Boykin
* @author Sam Ritchie
* @author Ashu Singhal
*/
object SourceBuilder {
type PlatformPair = OptionalPlatform2[Scalding, Storm]
type Node[T] = Producer[PlatformPair, T]
private val nextId = new java.util.concurrent.atomic.AtomicLong(0)
def adjust[T](m: Map[T, Options], k: T)(f: Options => Options) =
m.updated(k, f(m.getOrElse(k, Options())))
implicit def sg[T]: Semigroup[SourceBuilder[T]] =
Semigroup.from(_ ++ _)
def nextName[T: Manifest]: String =
"%s_%d".format(manifest[T], nextId.getAndIncrement)
def apply[T](eventSource: EventSource[T], timeOf: T => Date)(implicit mf: Manifest[T], eventCodec: Codec[T]) = {
implicit val te = TimeExtractor[T](timeOf(_).getTime)
val newID = nextName[T]
val scaldingSource =
eventSource.offline.map(s => Scalding.pipeFactory(s.scaldingSource(_)))
val stormSource = eventSource.spout.map(Storm.toStormSource(_))
new SourceBuilder[T](
Source[PlatformPair, T]((scaldingSource, stormSource)),
CompletedBuilder.injectionRegistrar[T](eventCodec),
newID
)
}
}
case class SourceBuilder[T: Manifest] private (
@transient node: SourceBuilder.Node[T],
@transient registrar: IKryoRegistrar,
id: String,
@transient opts: Map[String, Options] = Map.empty) extends Serializable {
import SourceBuilder.{ adjust, Node, nextName }
def map[U: Manifest](fn: T => U): SourceBuilder[U] = copy(node = node.map(fn))
def filter(fn: T => Boolean): SourceBuilder[T] = copy(node = node.filter(fn))
def flatMap[U: Manifest](fn: T => TraversableOnce[U]): SourceBuilder[U] =
copy(node = node.flatMap(fn))
/**
* This may be more efficient if you know you are not changing the values in
* you flatMap.
*/
def flatMapKeys[K1, K2, V](fn: K1 => TraversableOnce[K2])(implicit ev: T <:< (K1, V),
key1Mf: Manifest[K1], key2Mf: Manifest[K2], valMf: Manifest[V]): SourceBuilder[(K2, V)] =
copy(node = node.asInstanceOf[Node[(K1, V)]].flatMapKeys(fn))
def flatMapBuilder[U: Manifest](newFlatMapper: FlatMapper[T, U]): SourceBuilder[U] =
flatMap(newFlatMapper(_))
def write[U](sink: CompoundSink[U])(conversion: T => TraversableOnce[U])(implicit batcher: Batcher, mf: Manifest[U]): SourceBuilder[T] = {
val newNode =
node.flatMap(conversion).write((
sink.offline.map(new BatchedSinkFromOffline[U](batcher, _)),
sink.online.map { supplier => new StormSink[U] { lazy val toFn = supplier() } }
))
copy(
node = node.either(newNode).flatMap[T] {
case Left(t) => Some(t)
case Right(u) => None
}
)
}
def write(sink: CompoundSink[T])(implicit batcher: Batcher): SourceBuilder[T] =
copy(
node = node.write((
sink.offline.map(new BatchedSinkFromOffline[T](batcher, _)),
sink.online.map { supplier => new StormSink[T] { lazy val toFn = supplier() } }
))
)
def leftJoin[K, V, JoinedValue](service: CompoundService[K, JoinedValue])(implicit ev: T <:< (K, V), keyMf: Manifest[K], valMf: Manifest[V], joinedMf: Manifest[JoinedValue]): SourceBuilder[(K, (V, Option[JoinedValue]))] =
copy(
node = node.asInstanceOf[Node[(K, V)]].leftJoin((
service.offline,
service.online.map { fn: Function0[ReadableStore[K, JoinedValue]] =>
ReadableServiceFactory(fn)
}
))
)
/** Set's an Option on all nodes ABOVE this point */
def set(opt: Any): SourceBuilder[T] = copy(opts = adjust(opts, id)(_.set(opt)))
/**
* Complete this builder instance with a BatchStore. At this point,
* the Summingbird job can be executed on Hadoop.
*/
def groupAndSumTo[K, V](store: BatchedStore[K, V])(
implicit ev: T <:< (K, V),
env: Env,
keyMf: Manifest[K],
valMf: Manifest[V],
keyCodec: Codec[K],
valCodec: Codec[V],
batcher: Batcher,
monoid: Monoid[V]): CompletedBuilder[_, K, V] =
groupAndSumTo(CompoundStore.fromOffline(store))
/**
* Complete this builder instance with a MergeableStore. At this point,
* the Summingbird job can be executed on Storm.
*/
def groupAndSumTo[K, V](store: => MergeableStore[(K, BatchID), V])(
implicit ev: T <:< (K, V),
env: Env,
keyMf: Manifest[K],
valMf: Manifest[V],
keyCodec: Codec[K],
valCodec: Codec[V],
batcher: Batcher,
monoid: Monoid[V]): CompletedBuilder[_, K, V] =
groupAndSumTo(CompoundStore.fromOnline(store))
/**
* Complete this builder instance with a CompoundStore. At this
* point, the Summingbird job can be executed on Storm or Hadoop.
*/
def groupAndSumTo[K, V](store: CompoundStore[K, V])(
implicit ev: T <:< (K, V),
env: Env,
keyMf: Manifest[K],
valMf: Manifest[V],
keyCodec: Codec[K],
valCodec: Codec[V],
batcher: Batcher,
monoid: Monoid[V]): CompletedBuilder[_, K, V] = {
val cb = env match {
case scalding: ScaldingEnv =>
val givenStore = store.offlineStore.getOrElse(sys.error("No offline store given in Scalding mode"))
// Set the store to reset if needed
val batchSetStore = scalding
.initialBatch(batcher)
.map { givenStore.withInitialBatch(_) }
.getOrElse(givenStore)
val newNode = OptionalUnzip2[Scalding, Storm]()(node)._1.map { p =>
Producer.evToKeyed(p.name(id))
.sumByKey(batchSetStore)
}.getOrElse(sys.error("Scalding mode specified alongside some online-only Source, Service or Sink."))
CompletedBuilder(newNode, registrar, batcher, keyCodec, valCodec, nextName[(K, V)], opts)
case storm: StormEnv =>
val supplier = store.onlineSupplier.getOrElse(sys.error("No online store given in Storm mode"))
val givenStore = MergeableStoreFactory.from(supplier())
val newNode = OptionalUnzip2[Scalding, Storm]()(node)._2.map { p =>
Producer.evToKeyed(p.name(id))
.sumByKey(givenStore)
}.getOrElse(sys.error("Storm mode specified alongside some offline-only Source, Service or Sink."))
CompletedBuilder(newNode, registrar, batcher, keyCodec, valCodec, nextName[(K, V)], opts)
case _ => sys.error("Unknown environment: " + env)
}
env.builder = cb
cb
}
// useful when you need to merge two different Event sources
def ++(other: SourceBuilder[T]): SourceBuilder[T] =
copy(
node = node.name(id).merge(other.node.name(other.id)),
registrar = new IterableRegistrar(registrar, other.registrar),
id = "merge_" + nextName[T],
opts = opts ++ other.opts
)
}
| twitter/summingbird | summingbird-builder/src/main/scala/com/twitter/summingbird/builder/SourceBuilder.scala | Scala | apache-2.0 | 8,433 |
package net.technowizardry
import java.io.InputStream
class TracingInputStream(inner : InputStream) extends InputStream {
def read() : Int = {
val t = inner.read()
print(new String(Array[Byte] ( t.toByte )))
return t
}
} | ajacques/XmppClient | SeniorProject/src/net/technowizardry/TracingInputStream.scala | Scala | mit | 240 |
package com.datastax.spark.connector.rdd
import org.apache.spark.metrics.InputMetricsUpdater
import com.datastax.driver.core.Session
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.rdd.reader._
import com.datastax.spark.connector.util.CqlWhereParser.{EqPredicate, InListPredicate, InPredicate, RangePredicate}
import com.datastax.spark.connector.util.{CountingIterator, CqlWhereParser}
import com.datastax.spark.connector.writer._
import com.datastax.spark.connector.util.Quote._
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, TaskContext}
import scala.reflect.ClassTag
/**
* An [[org.apache.spark.rdd.RDD RDD]] that will do a selecting join between `left` RDD and the specified
* Cassandra Table This will perform individual selects to retrieve the rows from Cassandra and will take
* advantage of RDDs that have been partitioned with the
* [[com.datastax.spark.connector.rdd.partitioner.ReplicaPartitioner]]
*
* @tparam L item type on the left side of the join (any RDD)
* @tparam R item type on the right side of the join (fetched from Cassandra)
*/
class CassandraJoinRDD[L, R] private[connector](
left: RDD[L],
val keyspaceName: String,
val tableName: String,
val connector: CassandraConnector,
val columnNames: ColumnSelector = AllColumns,
val joinColumns: ColumnSelector = PartitionKeyColumns,
val where: CqlWhereClause = CqlWhereClause.empty,
val limit: Option[Long] = None,
val clusteringOrder: Option[ClusteringOrder] = None,
val readConf: ReadConf = ReadConf(),
manualRowReader: Option[RowReader[R]] = None,
manualRowWriter: Option[RowWriter[L]] = None)(
implicit
val leftClassTag: ClassTag[L],
val rightClassTag: ClassTag[R],
@transient val rowWriterFactory: RowWriterFactory[L],
@transient val rowReaderFactory: RowReaderFactory[R])
extends CassandraRDD[(L, R)](left.sparkContext, left.dependencies)
with CassandraTableRowReaderProvider[R] {
override type Self = CassandraJoinRDD[L, R]
override protected val classTag = rightClassTag
override protected[connector] lazy val rowReader: RowReader[R] = manualRowReader match {
case Some(rr) => rr
case None => rowReaderFactory.rowReader (tableDef, columnNames.selectFrom (tableDef) )
}
override protected def copy(
columnNames: ColumnSelector = columnNames,
where: CqlWhereClause = where,
limit: Option[Long] = limit,
clusteringOrder: Option[ClusteringOrder] = None,
readConf: ReadConf = readConf,
connector: CassandraConnector = connector): Self = {
new CassandraJoinRDD[L, R](
left = left,
keyspaceName = keyspaceName,
tableName = tableName,
connector = connector,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf)
}
lazy val joinColumnNames: Seq[ColumnRef] = joinColumns match {
case AllColumns => throw new IllegalArgumentException(
"Unable to join against all columns in a Cassandra Table. Only primary key columns allowed.")
case PartitionKeyColumns =>
tableDef.partitionKey.map(col => col.columnName: ColumnRef)
case SomeColumns(cs @ _*) =>
checkColumnsExistence(cs)
cs.map {
case c: ColumnRef => c
case _ => throw new IllegalArgumentException(
"Unable to join against unnamed columns. No CQL Functions allowed.")
}
}
override def cassandraCount(): Long = {
columnNames match {
case SomeColumns(_) =>
logWarning("You are about to count rows but an explicit projection has been specified.")
case _ =>
}
val counts =
new CassandraJoinRDD[L, Long](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = SomeColumns(RowCountRef),
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf= readConf)
counts.map(_._2).reduce(_ + _)
}
/** This method will create the RowWriter required before the RDD is serialized.
* This is called during getPartitions */
protected def checkValidJoin(): Seq[ColumnRef] = {
val partitionKeyColumnNames = tableDef.partitionKey.map(_.columnName).toSet
val primaryKeyColumnNames = tableDef.primaryKey.map(_.columnName).toSet
val colNames = joinColumnNames.map(_.columnName).toSet
// Initialize RowWriter and Query to be used for accessing Cassandra
rowWriter.columnNames
singleKeyCqlQuery.length
def checkSingleColumn(column: ColumnRef): Unit = {
require(
primaryKeyColumnNames.contains(column.columnName),
s"Can't pushdown join on column $column because it is not part of the PRIMARY KEY")
}
// Make sure we have all of the clustering indexes between the 0th position and the max requested
// in the join:
val chosenClusteringColumns = tableDef.clusteringColumns
.filter(cc => colNames.contains(cc.columnName))
if (!tableDef.clusteringColumns.startsWith(chosenClusteringColumns)) {
val maxCol = chosenClusteringColumns.last
val maxIndex = maxCol.componentIndex.get
val requiredColumns = tableDef.clusteringColumns.takeWhile(_.componentIndex.get <= maxIndex)
val missingColumns = requiredColumns.toSet -- chosenClusteringColumns.toSet
throw new IllegalArgumentException(
s"Can't pushdown join on column $maxCol without also specifying [ $missingColumns ]")
}
val missingPartitionKeys = partitionKeyColumnNames -- colNames
require(
missingPartitionKeys.isEmpty,
s"Can't join without the full partition key. Missing: [ $missingPartitionKeys ]")
joinColumnNames.foreach(checkSingleColumn)
joinColumnNames
}
lazy val rowWriter = manualRowWriter match {
case Some(rowWriter) => rowWriter
case None => implicitly[RowWriterFactory[L]].rowWriter (tableDef, joinColumnNames.toIndexedSeq)
}
def on(joinColumns: ColumnSelector): CassandraJoinRDD[L, R] = {
new CassandraJoinRDD[L, R](
left = left,
connector = connector,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = columnNames,
joinColumns = joinColumns,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf)
}
//We need to make sure we get selectedColumnRefs before serialization so that our RowReader is
//built
lazy val singleKeyCqlQuery: (String) = {
val whereClauses = where.predicates.flatMap(CqlWhereParser.parse)
val joinColumns = joinColumnNames.map(_.columnName)
val joinColumnPredicates = whereClauses.collect {
case EqPredicate(c, _) if joinColumns.contains(c) => c
case InPredicate(c) if joinColumns.contains(c) => c
case InListPredicate(c, _) if joinColumns.contains(c) => c
case RangePredicate(c, _, _) if joinColumns.contains(c) => c
}.toSet
require(
joinColumnPredicates.isEmpty,
s"""Columns specified in both the join on clause and the where clause.
|Partition key columns are always part of the join clause.
|Columns in both: ${joinColumnPredicates.mkString(", ")}""".stripMargin
)
logDebug("Generating Single Key Query Prepared Statement String")
logDebug(s"SelectedColumns : $selectedColumnRefs -- JoinColumnNames : $joinColumnNames")
val columns = selectedColumnRefs.map(_.cql).mkString(", ")
val joinWhere = joinColumnNames.map(_.columnName).map(name => s"${quote(name)} = :$name")
val limitClause = limit.map(limit => s"LIMIT $limit").getOrElse("")
val orderBy = clusteringOrder.map(_.toCql(tableDef)).getOrElse("")
val filter = (where.predicates ++ joinWhere).mkString(" AND ")
val quotedKeyspaceName = quote(keyspaceName)
val quotedTableName = quote(tableName)
val query =
s"SELECT $columns " +
s"FROM $quotedKeyspaceName.$quotedTableName " +
s"WHERE $filter $limitClause $orderBy"
logDebug(s"Query : $query")
query
}
/**
* When computing a CassandraPartitionKeyRDD the data is selected via single CQL statements
* from the specified C* Keyspace and Table. This will be preformed on whatever data is
* available in the previous RDD in the chain.
*/
override def compute(split: Partition, context: TaskContext): Iterator[(L, R)] = {
val session = connector.openSession()
val stmt = session.prepare(singleKeyCqlQuery).setConsistencyLevel(consistencyLevel)
val bsb = new BoundStatementBuilder[L](rowWriter, stmt, where.values)
val metricsUpdater = InputMetricsUpdater(context, readConf)
val rowIterator = fetchIterator(session, bsb, left.iterator(split, context))
val countingIterator = new CountingIterator(rowIterator, limit)
context.addTaskCompletionListener { (context) =>
val duration = metricsUpdater.finish() / 1000000000d
logDebug(
f"Fetched ${countingIterator.count} rows " +
f"from $keyspaceName.$tableName " +
f"for partition ${split.index} in $duration%.3f s.")
session.close()
}
countingIterator
}
private def fetchIterator(
session: Session,
bsb: BoundStatementBuilder[L],
lastIt: Iterator[L]): Iterator[(L, R)] = {
val columnNamesArray = selectedColumnRefs.map(_.selectedAs).toArray
for (leftSide <- lastIt;
rightSide <- {
val rs = session.execute(bsb.bind(leftSide))
val iterator = new PrefetchingResultSetIterator(rs, fetchSize)
iterator.map(rowReader.read(_, columnNamesArray))
}) yield (leftSide, rightSide)
}
override protected def getPartitions: Array[Partition] = {
verify()
checkValidJoin()
left.partitions
}
override def getPreferredLocations(split: Partition): Seq[String] = left.preferredLocations(split)
override def toEmptyCassandraRDD: EmptyCassandraRDD[(L, R)] =
new EmptyCassandraRDD[(L, R)](
sc = left.sparkContext,
keyspaceName = keyspaceName,
tableName = tableName,
columnNames = columnNames,
where = where,
limit = limit,
clusteringOrder = clusteringOrder,
readConf = readConf)
/**
* Turns this CassandraJoinRDD into a factory for converting other RDD's after being serialized
* This method is for streaming operations as it allows us to Serialize a template JoinRDD
* and the use that serializable template in the DStream closure. This gives us a fully serializable
* joinWithCassandra operation
*/
private[connector] def applyToRDD( left:RDD[L]): CassandraJoinRDD[L,R] = {
new CassandraJoinRDD[L,R](
left,
keyspaceName,
tableName,
connector,
columnNames,
joinColumns,
where,
limit,
clusteringOrder,
readConf,
Some(rowReader),
Some(rowWriter)
)
}
} | uncommoncode/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/CassandraJoinRDD.scala | Scala | apache-2.0 | 11,015 |
/*
* Derived from https://github.com/spray/spray/blob/v1.1-M7/spray-http/src/main/scala/spray/http/parser/SimpleHeaders.scala
*
* Copyright (C) 2011-2012 spray.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package parser
import cats.data.NonEmptyList
import java.net.InetAddress
import java.nio.charset.StandardCharsets
import org.http4s.headers._
import org.http4s.headers.ETag.EntityTag
import org.http4s.internal.parboiled2.Rule1
import org.http4s.syntax.string._
/**
* parser rules for all headers that can be parsed with one simple rule
*/
private[parser] trait SimpleHeaders {
def ALLOW(value: String): ParseResult[Allow] =
new Http4sHeaderParser[Allow](value) {
def entry = rule {
zeroOrMore(Token).separatedBy(ListSep) ~ EOL ~> { ts: Seq[String] =>
val ms = ts.map(
Method
.fromString(_)
.toOption
.getOrElse(sys.error("Impossible. Please file a bug report.")))
Allow(ms.toSet)
}
}
}.parse
def CONNECTION(value: String): ParseResult[Connection] =
new Http4sHeaderParser[Connection](value) {
def entry = rule(
oneOrMore(Token).separatedBy(ListSep) ~ EOL ~> { xs: Seq[String] =>
Connection(xs.head.ci, xs.tail.map(_.ci): _*)
}
)
}.parse
def CONTENT_LENGTH(value: String): ParseResult[`Content-Length`] =
new Http4sHeaderParser[`Content-Length`](value) {
def entry = rule {
Digits ~ EOL ~> { s: String =>
`Content-Length`.unsafeFromLong(s.toLong)
}
}
}.parse
def CONTENT_ENCODING(value: String): ParseResult[`Content-Encoding`] =
new Http4sHeaderParser[`Content-Encoding`](value)
with org.http4s.ContentCoding.ContentCodingParser {
def entry = rule {
EncodingRangeDecl ~ EOL ~> { c: ContentCoding =>
`Content-Encoding`(c)
}
}
}.parse
def CONTENT_DISPOSITION(value: String): ParseResult[`Content-Disposition`] =
new Http4sHeaderParser[`Content-Disposition`](value) {
def entry = rule {
Token ~ zeroOrMore(";" ~ OptWS ~ Parameter) ~ EOL ~> {
(token: String, params: collection.Seq[(String, String)]) =>
`Content-Disposition`(token, params.toMap)
}
}
}.parse
def DATE(value: String): ParseResult[Date] =
new Http4sHeaderParser[Date](value) {
def entry = rule {
HttpDate ~ EOL ~> (Date(_))
}
}.parse
def EXPIRES(value: String): ParseResult[Expires] =
new Http4sHeaderParser[Expires](value) {
def entry = rule {
HttpDate ~ EOL ~> (Expires(_)) | // Valid Expires header
Digit1 ~ EOL ~> ((t: Int) =>
Expires(org.http4s.HttpDate.unsafeFromEpochSecond(t.toLong / 1000L))) | // Used for bogus http servers returning 0
NegDigit1 ~ EOL ~> Function
.const(Expires(org.http4s.HttpDate.Epoch)) _ // Used for bogus http servers returning -1
}
}.parse
def RETRY_AFTER(value: String): ParseResult[`Retry-After`] =
new Http4sHeaderParser[`Retry-After`](value) {
def entry = rule {
HttpDate ~ EOL ~> ((t: org.http4s.HttpDate) => `Retry-After`(t)) | // Date value
Digits ~ EOL ~> ((t: String) => `Retry-After`.unsafeFromLong(t.toLong))
}
}.parse
def AGE(value: String): ParseResult[Age] =
new Http4sHeaderParser[Age](value) {
def entry = rule {
Digits ~ EOL ~> ((t: String) => Age.unsafeFromLong(t.toLong))
}
}.parse
// // Do not accept scoped IPv6 addresses as they should not appear in the Host header,
// // see also https://issues.apache.org/bugzilla/show_bug.cgi?id=35122 (WONTFIX in Apache 2 issue) and
// // https://bugzilla.mozilla.org/show_bug.cgi?id=464162 (FIXED in mozilla)
def HOST(value: String): ParseResult[Host] =
new Http4sHeaderParser[Host](value) with Rfc3986Parser {
def charset = StandardCharsets.UTF_8
def entry = rule {
(Token | IpLiteral) ~ OptWS ~
optional(":" ~ capture(oneOrMore(Digit)) ~> (_.toInt)) ~ EOL ~> (org.http4s.headers
.Host(_: String, _: Option[Int]))
}
}.parse
def LAST_EVENT_ID(value: String): ParseResult[`Last-Event-Id`] =
new Http4sHeaderParser[`Last-Event-Id`](value) {
def entry = rule {
capture(zeroOrMore(ANY)) ~ EOL ~> { id: String =>
`Last-Event-Id`(ServerSentEvent.EventId(id))
}
}
}.parse
def LAST_MODIFIED(value: String): ParseResult[`Last-Modified`] =
new Http4sHeaderParser[`Last-Modified`](value) {
def entry = rule {
HttpDate ~ EOL ~> (`Last-Modified`(_))
}
}.parse
def IF_MODIFIED_SINCE(value: String): ParseResult[`If-Modified-Since`] =
new Http4sHeaderParser[`If-Modified-Since`](value) {
def entry = rule {
HttpDate ~ EOL ~> (`If-Modified-Since`(_))
}
}.parse
def IF_UNMODIFIED_SINCE(value: String): ParseResult[`If-Unmodified-Since`] =
new Http4sHeaderParser[`If-Unmodified-Since`](value) {
def entry = rule {
HttpDate ~ EOL ~> (`If-Unmodified-Since`(_))
}
}.parse
def ETAG(value: String): ParseResult[ETag] =
new Http4sHeaderParser[ETag](value) {
def entry = rule { EntityTag ~> (ETag(_: ETag.EntityTag)) }
}.parse
def IF_MATCH(value: String): ParseResult[`If-Match`] =
new Http4sHeaderParser[`If-Match`](value) {
def entry = rule {
"*" ~ push(`If-Match`.`*`) |
oneOrMore(EntityTag).separatedBy(ListSep) ~> { tags: Seq[EntityTag] =>
`If-Match`(Some(NonEmptyList.of(tags.head, tags.tail: _*)))
}
}
}.parse
def IF_NONE_MATCH(value: String): ParseResult[`If-None-Match`] =
new Http4sHeaderParser[`If-None-Match`](value) {
def entry = rule {
"*" ~ push(`If-None-Match`.`*`) |
oneOrMore(EntityTag).separatedBy(ListSep) ~> { tags: Seq[EntityTag] =>
`If-None-Match`(Some(NonEmptyList.of(tags.head, tags.tail: _*)))
}
}
}.parse
def TRANSFER_ENCODING(value: String): ParseResult[`Transfer-Encoding`] =
TransferCoding.parseList(value).map(`Transfer-Encoding`.apply)
def USER_AGENT(value: String): ParseResult[`User-Agent`] =
new Http4sHeaderParser[`User-Agent`](value) {
def entry = rule {
product ~ zeroOrMore(RWS ~ (product | comment)) ~> {
(product: AgentProduct, tokens: collection.Seq[AgentToken]) =>
(`User-Agent`(product, tokens.toList))
}
}
def product: Rule1[AgentProduct] = rule {
Token ~ optional("/" ~ Token) ~> (AgentProduct(_, _))
}
def comment: Rule1[AgentComment] = rule {
capture(Comment) ~> { s: String =>
AgentComment(s.substring(1, s.length - 1))
}
}
def RWS = rule { oneOrMore(anyOf(" \\t")) }
}.parse
def X_FORWARDED_FOR(value: String): ParseResult[`X-Forwarded-For`] =
new Http4sHeaderParser[`X-Forwarded-For`](value) with IpParser {
def entry = rule {
oneOrMore(
(capture(IpV4Address | IpV6Address) ~> { s: String =>
Some(InetAddress.getByName(s))
}) |
("unknown" ~ push(None))).separatedBy(ListSep) ~
EOL ~> { xs: Seq[Option[InetAddress]] =>
`X-Forwarded-For`(xs.head, xs.tail: _*)
}
}
}.parse
}
| ChristopherDavenport/http4s | core/src/main/scala/org/http4s/parser/SimpleHeaders.scala | Scala | apache-2.0 | 7,890 |
package controllers.admin
import play.api.mvc._
import play.api.libs.json.Json
import play.api.libs.iteratee._
import play.api.libs.concurrent.Promise
import java.util.concurrent.TimeUnit
import play.api.libs.concurrent.Execution.Implicits._
import java.util.Date
import ch.qos.logback.classic.spi.ILoggingEvent
import java.text.SimpleDateFormat
import models.Client
import org.slf4j.LoggerFactory
import ch.qos.logback.classic.{Level, LoggerContext}
import scala.collection.JavaConversions._
/*
* Code inspired from https://github.com/playframework/Play20/tree/master/samples/scala/comet-live-monitoring
*/
object Monitor extends Controller {
private val logFile = play.api.Play.current.configuration.getString("soapower.log.file").get
def downloadLogFile = Action {
Ok.sendFile(new java.io.File(logFile))
}
def socket = WebSocket.using[String] {
request =>
val in = Iteratee.ignore[String]
val out = Streams.getCPU >-
Streams.getHeap >-
Streams.getTotalMemory >-
Streams.liveEnumerator >-
Streams.getNbRequests
(in, out)
}
def logfile = Action {
Ok(Json.toJson(logFile)).as(JSON)
}
def gc = Action {
Runtime.getRuntime().gc()
Ok("Done")
}
implicit val loggersFormat = Json.format[JsonLogger]
def loggers = Action {
val loggerContext: LoggerContext = LoggerFactory.getILoggerFactory().asInstanceOf[LoggerContext]
val loggersList = scala.collection.mutable.ListBuffer[JsonLogger]()
loggerContext.getLoggerList.toList.foreach {
logger =>
loggersList.add(new JsonLogger(logger.getName, logger.getEffectiveLevel().toString()))
}
Ok(Json.toJson(loggersList)).as(JSON)
}
def changeLevel(loggerName: String, newLevel: String) = Action {
val loggerContext: LoggerContext = LoggerFactory.getILoggerFactory().asInstanceOf[LoggerContext]
loggerContext.getLogger(loggerName).setLevel(Level.valueOf(newLevel))
Ok("Done")
}
}
case class JsonLogger(name: String, level: String)
object Streams {
private val timeRefreshMillis = 1100
private val timeRefreshMillisLong = 5000
private val dateFormat = new SimpleDateFormat("HH:mm:ss.SSSS")
val getHeap = Enumerator.generateM({
Promise.timeout(
Some((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / (1024 * 1024) + ":memory"),
timeRefreshMillis, TimeUnit.MILLISECONDS)
})
val cpu = new models.CPU()
val getCPU = Enumerator.generateM({
Promise.timeout(Some((cpu.getCpuUsage() * 1000).round / 10.0 + ":cpu"), timeRefreshMillis, TimeUnit.MILLISECONDS)
})
val getTotalMemory = Enumerator.generateM({
Promise.timeout(
Some(Runtime.getRuntime().totalMemory() / (1024 * 1024) + ":totalMemory")
, timeRefreshMillis, TimeUnit.MILLISECONDS)
})
val (liveEnumerator, channelLogs) = Concurrent.broadcast[String]
def pushLog(event: ILoggingEvent) {
channelLogs.push(views.html.monitor.log.render(dateFormat.format(new Date(event.getTimeStamp)), event).toString)
}
val getNbRequests = Enumerator.generateM({
Promise.timeout(
Some(Client.getNbRequest + ":nbReq"), timeRefreshMillisLong, TimeUnit.MILLISECONDS)
})
}
| soapower/soapower | app/controllers/admin/Monitor.scala | Scala | gpl-3.0 | 3,208 |
/*
* Copyright (c) 2017 joesan @ http://github.com/joesan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.inland24.plantsim.core
import com.inland24.plantsim.models.PowerPlantSignal
import monix.reactive.OverflowStrategy.Unbounded
import monix.execution.{Ack, Scheduler}
import monix.reactive.observers.Subscriber
import monix.reactive.subjects.ConcurrentSubject
final class PowerPlantEventObservable private (
underlying: ConcurrentSubject[PowerPlantSignal, PowerPlantSignal])
extends ConcurrentSubject[PowerPlantSignal, PowerPlantSignal] {
override def unsafeSubscribeFn(s: Subscriber[PowerPlantSignal]) = {
underlying.unsafeSubscribeFn(s)
}
override def size: Int = underlying.size
override def onNext(elem: PowerPlantSignal): Ack = {
underlying.onNext(elem)
}
override def onError(ex: Throwable): Unit = underlying.onError(ex)
override def onComplete(): Unit = {
underlying.onComplete()
}
}
object PowerPlantEventObservable {
def apply(s: Scheduler): PowerPlantEventObservable = {
implicit val s = monix.execution.Scheduler.Implicits.global
new PowerPlantEventObservable(
ConcurrentSubject.publish[PowerPlantSignal](Unbounded))
}
}
| joesan/plant-simulator | app/com/inland24/plantsim/core/PowerPlantEventObservable.scala | Scala | apache-2.0 | 1,727 |
package dotty.tools.dotc.transform
import dotty.tools.dotc.ast.Trees._
import dotty.tools.dotc.ast.tpd
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.core.Decorators._
import dotty.tools.dotc.core.DenotTransformers.DenotTransformer
import dotty.tools.dotc.core.Denotations.SingleDenotation
import dotty.tools.dotc.core.Symbols._
import dotty.tools.dotc.core.Types._
import dotty.tools.dotc.core._
import dotty.tools.dotc.transform.TailRec._
import dotty.tools.dotc.transform.TreeTransforms.{TransformerInfo, MiniPhaseTransform}
/**
* A Tail Rec Transformer
*
* @author Erik Stenman, Iulian Dragos,
* ported to dotty by Dmitry Petrashko
* @version 1.1
*
* What it does:
* <p>
* Finds method calls in tail-position and replaces them with jumps.
* A call is in a tail-position if it is the last instruction to be
* executed in the body of a method. This is done by recursing over
* the trees that may contain calls in tail-position (trees that can't
* contain such calls are not transformed). However, they are not that
* many.
* </p>
* <p>
* Self-recursive calls in tail-position are replaced by jumps to a
* label at the beginning of the method. As the JVM provides no way to
* jump from a method to another one, non-recursive calls in
* tail-position are not optimized.
* </p>
* <p>
* A method call is self-recursive if it calls the current method and
* the method is final (otherwise, it could
* be a call to an overridden method in a subclass).
*
* Recursive calls on a different instance
* are optimized. Since 'this' is not a local variable it s added as
* a label parameter.
* </p>
* <p>
* This phase has been moved before pattern matching to catch more
* of the common cases of tail recursive functions. This means that
* more cases should be taken into account (like nested function, and
* pattern cases).
* </p>
* <p>
* If a method contains self-recursive calls, a label is added to at
* the beginning of its body and the calls are replaced by jumps to
* that label.
* </p>
* <p>
*
* In scalac, If the method had type parameters, the call must contain same
* parameters as type arguments. This is no longer case in dotc.
* In scalac, this is named tailCall but it does only provide optimization for
* self recursive functions, that's why it's renamed to tailrec
* </p>
*/
class TailRec extends MiniPhaseTransform with DenotTransformer with FullParameterization { thisTransform =>
import dotty.tools.dotc.ast.tpd._
override def transform(ref: SingleDenotation)(implicit ctx: Context): SingleDenotation = ref
override def phaseName: String = "tailrec"
override def treeTransformPhase = thisTransform // TODO Make sure tailrec runs at next phase.
final val labelPrefix = "tailLabel"
final val labelFlags = Flags.Synthetic | Flags.Label
private def mkLabel(method: Symbol, abstractOverClass: Boolean)(implicit c: Context): TermSymbol = {
val name = c.freshName(labelPrefix)
c.newSymbol(method, name.toTermName, labelFlags, fullyParameterizedType(method.info, method.enclosingClass.asClass, abstractOverClass))
}
override def transformDefDef(tree: tpd.DefDef)(implicit ctx: Context, info: TransformerInfo): tpd.Tree = {
val sym = tree.symbol
tree match {
case dd@DefDef(name, tparams, vparamss0, tpt, _)
if (sym.isEffectivelyFinal) && !((sym is Flags.Accessor) || (dd.rhs eq EmptyTree) || (sym is Flags.Label)) =>
val mandatory = sym.hasAnnotation(defn.TailrecAnnotationClass)
atGroupEnd { implicit ctx: Context =>
cpy.DefDef(dd)(rhs = {
val defIsTopLevel = sym.owner.isClass
val origMeth = sym
val label = mkLabel(sym, abstractOverClass = defIsTopLevel)
val owner = ctx.owner.enclosingClass.asClass
val thisTpe = owner.thisType.widen
var rewrote = false
// Note: this can be split in two separate transforms(in different groups),
// than first one will collect info about which transformations and rewritings should be applied
// and second one will actually apply,
// now this speculatively transforms tree and throws away result in many cases
val rhsSemiTransformed = {
val transformer = new TailRecElimination(origMeth, owner, thisTpe, mandatory, label, abstractOverClass = defIsTopLevel)
val rhs = atGroupEnd(transformer.transform(dd.rhs)(_))
rewrote = transformer.rewrote
rhs
}
if (rewrote) {
val dummyDefDef = cpy.DefDef(tree)(rhs = rhsSemiTransformed)
val res = fullyParameterizedDef(label, dummyDefDef, abstractOverClass = defIsTopLevel)
val call = forwarder(label, dd, abstractOverClass = defIsTopLevel)
Block(List(res), call)
} else {
if (mandatory)
ctx.error("TailRec optimisation not applicable, method not tail recursive", dd.pos)
dd.rhs
}
})
}
case d: DefDef if d.symbol.hasAnnotation(defn.TailrecAnnotationClass) =>
ctx.error("TailRec optimisation not applicable, method is neither private nor final so can be overridden", d.pos)
d
case d if d.symbol.hasAnnotation(defn.TailrecAnnotationClass) =>
ctx.error("TailRec optimisation not applicable, not a method", d.pos)
d
case _ => tree
}
}
class TailRecElimination(method: Symbol, enclosingClass: Symbol, thisType: Type, isMandatory: Boolean, label: Symbol, abstractOverClass: Boolean) extends tpd.TreeMap {
import dotty.tools.dotc.ast.tpd._
var rewrote = false
private val defaultReason = "it contains a recursive call not in tail position"
private var ctx: TailContext = yesTailContext
/** Rewrite this tree to contain no tail recursive calls */
def transform(tree: Tree, nctx: TailContext)(implicit c: Context): Tree = {
if (ctx == nctx) transform(tree)
else {
val saved = ctx
ctx = nctx
try transform(tree)
finally this.ctx = saved
}
}
def yesTailTransform(tree: Tree)(implicit c: Context): Tree =
transform(tree, yesTailContext)
def noTailTransform(tree: Tree)(implicit c: Context): Tree =
transform(tree, noTailContext)
def noTailTransforms[Tr <: Tree](trees: List[Tr])(implicit c: Context): List[Tr] =
trees.map(noTailTransform).asInstanceOf[List[Tr]]
override def transform(tree: Tree)(implicit c: Context): Tree = {
/* A possibly polymorphic apply to be considered for tail call transformation. */
def rewriteApply(tree: Tree, sym: Symbol): Tree = {
def receiverArgumentsAndSymbol(t: Tree, accArgs: List[List[Tree]] = Nil, accT: List[Tree] = Nil):
(Tree, Tree, List[List[Tree]], List[Tree], Symbol) = t match {
case TypeApply(fun, targs) if fun.symbol eq t.symbol => receiverArgumentsAndSymbol(fun, accArgs, targs)
case Apply(fn, args) if fn.symbol == t.symbol => receiverArgumentsAndSymbol(fn, args :: accArgs, accT)
case Select(qual, _) => (qual, t, accArgs, accT, t.symbol)
case x: This => (x, x, accArgs, accT, x.symbol)
case x: Ident if x.symbol eq method => (EmptyTree, x, accArgs, accT, x.symbol)
case x => (x, x, accArgs, accT, x.symbol)
}
val (reciever, call, arguments, typeArguments, symbol) = receiverArgumentsAndSymbol(tree)
val recv = noTailTransform(reciever)
val targs = typeArguments.map(noTailTransform)
val argumentss = arguments.map(noTailTransforms)
val recvWiden = recv.tpe.widenDealias
val receiverIsSame = enclosingClass.typeRef.widenDealias =:= recvWiden
val receiverIsSuper = (method.name eq sym) && enclosingClass.typeRef.widen <:< recvWiden
val receiverIsThis = recv.tpe =:= thisType
val isRecursiveCall = (method eq sym)
def continue = {
val method = noTailTransform(call)
val methodWithTargs = if (targs.nonEmpty) TypeApply(method, targs) else method
if (methodWithTargs.tpe.widen.isParameterless) methodWithTargs
else argumentss.foldLeft(methodWithTargs) {
// case (method, args) => Apply(method, args) // Dotty deviation no auto-detupling yet. Interesting that one can do it in Scala2!
(method, args) => Apply(method, args)
}
}
def fail(reason: String) = {
if (isMandatory) c.error(s"Cannot rewrite recursive call: $reason", tree.pos)
else c.debuglog("Cannot rewrite recursive call at: " + tree.pos + " because: " + reason)
continue
}
def rewriteTailCall(recv: Tree): Tree = {
c.debuglog("Rewriting tail recursive call: " + tree.pos)
rewrote = true
val receiver = noTailTransform(recv)
val callTargs: List[tpd.Tree] =
if (abstractOverClass) {
val classTypeArgs = recv.tpe.baseTypeWithArgs(enclosingClass).argInfos
targs ::: classTypeArgs.map(x => ref(x.typeSymbol))
} else targs
val method = Apply(if (callTargs.nonEmpty) TypeApply(Ident(label.termRef), callTargs) else Ident(label.termRef),
List(receiver))
val res =
if (method.tpe.widen.isParameterless) method
else argumentss.foldLeft(method) {
(met, ar) => Apply(met, ar) // Dotty deviation no auto-detupling yet.
}
res
}
if (isRecursiveCall) {
if (ctx.tailPos) {
if (recv eq EmptyTree) rewriteTailCall(This(enclosingClass.asClass))
else if (receiverIsSame || receiverIsThis) rewriteTailCall(recv)
else fail("it changes type of 'this' on a polymorphic recursive call")
}
else fail(defaultReason)
} else {
if (receiverIsSuper) fail("it contains a recursive call targeting a supertype")
else continue
}
}
def rewriteTry(tree: Try): Try = {
if (tree.finalizer eq EmptyTree) {
// SI-1672 Catches are in tail position when there is no finalizer
tpd.cpy.Try(tree)(
noTailTransform(tree.expr),
transformSub(tree.cases),
EmptyTree
)
}
else {
tpd.cpy.Try(tree)(
noTailTransform(tree.expr),
noTailTransforms(tree.cases),
noTailTransform(tree.finalizer)
)
}
}
val res: Tree = tree match {
case Ident(qual) =>
val sym = tree.symbol
if (sym == method && ctx.tailPos) rewriteApply(tree, sym)
else tree
case tree: Select =>
val sym = tree.symbol
if (sym == method && ctx.tailPos) rewriteApply(tree, sym)
else tpd.cpy.Select(tree)(noTailTransform(tree.qualifier), tree.name)
case Apply(fun, args) =>
val meth = fun.symbol
if (meth == defn.Boolean_|| || meth == defn.Boolean_&&)
tpd.cpy.Apply(tree)(fun, transform(args))
else
rewriteApply(tree, meth)
case tree@Block(stats, expr) =>
tpd.cpy.Block(tree)(
noTailTransforms(stats),
transform(expr)
)
case tree@If(cond, thenp, elsep) =>
tpd.cpy.If(tree)(
noTailTransform(cond),
transform(thenp),
transform(elsep)
)
case tree@CaseDef(_, _, body) =>
cpy.CaseDef(tree)(body = transform(body))
case tree@Match(selector, cases) =>
tpd.cpy.Match(tree)(
noTailTransform(selector),
transformSub(cases)
)
case tree: Try =>
rewriteTry(tree)
case Alternative(_) | Bind(_, _) =>
assert(false, "We should never have gotten inside a pattern")
tree
case ValDef(_, _, _) | EmptyTree | Super(_, _) | This(_) |
Literal(_) | TypeTree(_) | DefDef(_, _, _, _, _) | TypeDef(_, _) =>
tree
case Return(expr, from) =>
tpd.cpy.Return(tree)(noTailTransform(expr), from)
case t: DefDef =>
t // todo: could improve to handle DefDef's with a label flag calls to which are in tail position
case _ =>
super.transform(tree)
}
res
}
}
/** If references to original `target` from fully parameterized method `derived` should be
* rewired to some fully parameterized method, that method symbol,
* otherwise NoSymbol.
*/
override protected def rewiredTarget(target: Symbol, derived: Symbol)(implicit ctx: Context): Symbol = NoSymbol
}
object TailRec {
final class TailContext(val tailPos: Boolean) extends AnyVal
final val noTailContext = new TailContext(false)
final val yesTailContext = new TailContext(true)
}
| folone/dotty | src/dotty/tools/dotc/transform/TailRec.scala | Scala | bsd-3-clause | 13,474 |
package org.apache.spark.sql.sparkcv
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, Filter, InsertableRelation, PrunedFilteredScan}
import org.apache.spark.sql.types.StructType
case class ImageRelation(sqlContext: SQLContext, parameters: Map[String, String], schema: StructType)
extends BaseRelation
with InsertableRelation
with PrunedFilteredScan
with Logging {
override def insert(data: DataFrame, overwrite: Boolean): Unit = logInfo("[TODO] insert")
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = sqlContext.emptyDataFrame.rdd
}
| miguel0afd/sparkCV | src/main/scala/org/apache/spark/sql/sparkcv/ImageRelation.scala | Scala | apache-2.0 | 732 |
package org.bitcoins.core.protocol.script
import org.bitcoins.core.gen.ScriptGenerators
import org.scalacheck.{Prop, Properties}
/**
* Created by chris on 6/22/16.
*/
class MultiSignatureScriptSignatureSpec extends Properties("MultiSignatureScriptSigSpec") {
property("Serialization symmetry") =
Prop.forAll(ScriptGenerators.multiSignatureScriptSignature) { multiSigScriptSig =>
MultiSignatureScriptSignature(multiSigScriptSig.hex) == multiSigScriptSig
}
}
| SuredBits/bitcoin-s-sidechains | src/test/scala/org/bitcoins/core/protocol/script/MultiSignatureScriptSignatureSpec.scala | Scala | mit | 481 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.planning
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.TreeNode
/**
* Given a [[LogicalPlan]], returns a list of `PhysicalPlan`s that can
* be used for execution. If this strategy does not apply to the given logical operation then an
* empty list should be returned.
*/
abstract class GenericStrategy[PhysicalPlan <: TreeNode[PhysicalPlan]] extends Logging {
/**
* Returns a placeholder for a physical plan that executes `plan`. This placeholder will be
* filled in automatically by the QueryPlanner using the other execution strategies that are
* available.
*/
protected def planLater(plan: LogicalPlan): PhysicalPlan
def apply(plan: LogicalPlan): Seq[PhysicalPlan]
}
/**
* Abstract class for transforming [[LogicalPlan]]s into physical plans.
* Child classes are responsible for specifying a list of [[GenericStrategy]] objects that
* each of which can return a list of possible physical plan options.
* If a given strategy is unable to plan all of the remaining operators in the tree,
* it can call [[GenericStrategy#planLater planLater]], which returns a placeholder
* object that will be [[collectPlaceholders collected]] and filled in
* using other available strategies.
*
* TODO: RIGHT NOW ONLY ONE PLAN IS RETURNED EVER...
* PLAN SPACE EXPLORATION WILL BE IMPLEMENTED LATER.
*
* @tparam PhysicalPlan The type of physical plan produced by this [[QueryPlanner]]
*/
abstract class QueryPlanner[PhysicalPlan <: TreeNode[PhysicalPlan]] {
/** A list of execution strategies that can be used by the planner */
def strategies: Seq[GenericStrategy[PhysicalPlan]]
def plan(plan: LogicalPlan): Iterator[PhysicalPlan] = {
// Obviously a lot to do here still...
// Collect physical plan candidates.
val candidates = strategies.iterator.flatMap(_(plan))
// The candidates may contain placeholders marked as [[planLater]],
// so try to replace them by their child plans.
val plans = candidates.flatMap { candidate =>
val placeholders = collectPlaceholders(candidate)
if (placeholders.isEmpty) {
// Take the candidate as is because it does not contain placeholders.
Iterator(candidate)
} else {
// Plan the logical plan marked as [[planLater]] and replace the placeholders.
placeholders.iterator.foldLeft(Iterator(candidate)) {
case (candidatesWithPlaceholders, (placeholder, logicalPlan)) =>
// Plan the logical plan for the placeholder.
val childPlans = this.plan(logicalPlan)
candidatesWithPlaceholders.flatMap { candidateWithPlaceholders =>
childPlans.map { childPlan =>
// Replace the placeholder by the child plan
candidateWithPlaceholders.transformUp {
case p if p.eq(placeholder) => childPlan
}
}
}
}
}
}
val pruned = prunePlans(plans)
assert(pruned.hasNext, s"No plan for $plan")
pruned
}
/**
* Collects placeholders marked using [[GenericStrategy#planLater planLater]]
* by [[strategies]].
*/
protected def collectPlaceholders(plan: PhysicalPlan): Seq[(PhysicalPlan, LogicalPlan)]
/** Prunes bad plans to prevent combinatorial explosion. */
protected def prunePlans(plans: Iterator[PhysicalPlan]): Iterator[PhysicalPlan]
}
| michalsenkyr/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala | Scala | apache-2.0 | 4,301 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.util.Properties
import joptsimple._
import kafka.common.{AdminCommandFailedException, Topic}
import kafka.consumer.Whitelist
import kafka.log.LogConfig
import kafka.server.ConfigType
import kafka.utils.ZkUtils._
import kafka.utils._
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import org.apache.kafka.common.errors.TopicExistsException
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.utils.Utils
import scala.collection.JavaConverters._
import scala.collection._
object TopicCommand extends Logging {
def main(args: Array[String]): Unit = {
val opts = new TopicCommandOptions(args)
if(args.length == 0)
CommandLineUtils.printUsageAndDie(opts.parser, "Create, delete, describe, or change a topic.")
// should have exactly one action
val actions = Seq(opts.createOpt, opts.listOpt, opts.alterOpt, opts.describeOpt, opts.deleteOpt).count(opts.options.has _)
if(actions != 1)
CommandLineUtils.printUsageAndDie(opts.parser, "Command must include exactly one action: --list, --describe, --create, --alter or --delete")
opts.checkArgs()
val zkUtils = ZkUtils(opts.options.valueOf(opts.zkConnectOpt),
30000,
30000,
JaasUtils.isZkSecurityEnabled())
var exitCode = 0
try {
if(opts.options.has(opts.createOpt))
createTopic(zkUtils, opts)
else if(opts.options.has(opts.alterOpt))
alterTopic(zkUtils, opts)
else if(opts.options.has(opts.listOpt))
listTopics(zkUtils, opts)
else if(opts.options.has(opts.describeOpt))
describeTopic(zkUtils, opts)
else if(opts.options.has(opts.deleteOpt))
deleteTopic(zkUtils, opts)
} catch {
case e: Throwable =>
println("Error while executing topic command : " + e.getMessage)
error(Utils.stackTrace(e))
exitCode = 1
} finally {
zkUtils.close()
System.exit(exitCode)
}
}
private def getTopics(zkUtils: ZkUtils, opts: TopicCommandOptions): Seq[String] = {
val allTopics = zkUtils.getAllTopics().sorted
if (opts.options.has(opts.topicOpt)) {
val topicsSpec = opts.options.valueOf(opts.topicOpt)
val topicsFilter = new Whitelist(topicsSpec)
allTopics.filter(topicsFilter.isTopicAllowed(_, excludeInternalTopics = false))
} else
allTopics
}
def createTopic(zkUtils: ZkUtils, opts: TopicCommandOptions) {
val topic = opts.options.valueOf(opts.topicOpt)
val configs = parseTopicConfigsToBeAdded(opts)
val ifNotExists = opts.options.has(opts.ifNotExistsOpt)
if (Topic.hasCollisionChars(topic))
println("WARNING: Due to limitations in metric names, topics with a period ('.') or underscore ('_') could collide. To avoid issues it is best to use either, but not both.")
try {
if (opts.options.has(opts.replicaAssignmentOpt)) {
val assignment = parseReplicaAssignment(opts.options.valueOf(opts.replicaAssignmentOpt))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, assignment, configs, update = false)
} else {
CommandLineUtils.checkRequiredArgs(opts.parser, opts.options, opts.partitionsOpt, opts.replicationFactorOpt)
val partitions = opts.options.valueOf(opts.partitionsOpt).intValue
val replicas = opts.options.valueOf(opts.replicationFactorOpt).intValue
val rackAwareMode = if (opts.options.has(opts.disableRackAware)) RackAwareMode.Disabled
else RackAwareMode.Enforced
AdminUtils.createTopic(zkUtils, topic, partitions, replicas, configs, rackAwareMode)
}
println("Created topic \"%s\".".format(topic))
} catch {
case e: TopicExistsException => if (!ifNotExists) throw e
}
}
def alterTopic(zkUtils: ZkUtils, opts: TopicCommandOptions) {
val topics = getTopics(zkUtils, opts)
val ifExists = opts.options.has(opts.ifExistsOpt)
if (topics.isEmpty && !ifExists) {
throw new IllegalArgumentException("Topic %s does not exist on ZK path %s".format(opts.options.valueOf(opts.topicOpt),
opts.options.valueOf(opts.zkConnectOpt)))
}
topics.foreach { topic =>
val configs = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic, topic)
if(opts.options.has(opts.configOpt) || opts.options.has(opts.deleteConfigOpt)) {
println("WARNING: Altering topic configuration from this script has been deprecated and may be removed in future releases.")
println(" Going forward, please use kafka-configs.sh for this functionality")
val configsToBeAdded = parseTopicConfigsToBeAdded(opts)
val configsToBeDeleted = parseTopicConfigsToBeDeleted(opts)
// compile the final set of configs
configs.putAll(configsToBeAdded)
configsToBeDeleted.foreach(config => configs.remove(config))
AdminUtils.changeTopicConfig(zkUtils, topic, configs)
println("Updated config for topic \"%s\".".format(topic))
}
if(opts.options.has(opts.partitionsOpt)) {
if (topic == Topic.GroupMetadataTopicName) {
throw new IllegalArgumentException("The number of partitions for the offsets topic cannot be changed.")
}
println("WARNING: If partitions are increased for a topic that has a key, the partition " +
"logic or ordering of the messages will be affected")
val nPartitions = opts.options.valueOf(opts.partitionsOpt).intValue
val replicaAssignmentStr = opts.options.valueOf(opts.replicaAssignmentOpt)
AdminUtils.addPartitions(zkUtils, topic, nPartitions, replicaAssignmentStr)
println("Adding partitions succeeded!")
}
}
}
def listTopics(zkUtils: ZkUtils, opts: TopicCommandOptions) {
val topics = getTopics(zkUtils, opts)
for(topic <- topics) {
if (zkUtils.pathExists(getDeleteTopicPath(topic))) {
println("%s - marked for deletion".format(topic))
} else {
println(topic)
}
}
}
def deleteTopic(zkUtils: ZkUtils, opts: TopicCommandOptions) {
val topics = getTopics(zkUtils, opts)
val ifExists = opts.options.has(opts.ifExistsOpt)
if (topics.isEmpty && !ifExists) {
throw new IllegalArgumentException("Topic %s does not exist on ZK path %s".format(opts.options.valueOf(opts.topicOpt),
opts.options.valueOf(opts.zkConnectOpt)))
}
topics.foreach { topic =>
try {
if (Topic.isInternal(topic)) {
throw new AdminOperationException("Topic %s is a kafka internal topic and is not allowed to be marked for deletion.".format(topic))
} else {
zkUtils.createPersistentPath(getDeleteTopicPath(topic))
println("Topic %s is marked for deletion.".format(topic))
println("Note: This will have no impact if delete.topic.enable is not set to true.")
}
} catch {
case _: ZkNodeExistsException =>
println("Topic %s is already marked for deletion.".format(topic))
case e: AdminOperationException =>
throw e
case _: Throwable =>
throw new AdminOperationException("Error while deleting topic %s".format(topic))
}
}
}
def describeTopic(zkUtils: ZkUtils, opts: TopicCommandOptions) {
val topics = getTopics(zkUtils, opts)
val reportUnderReplicatedPartitions = opts.options.has(opts.reportUnderReplicatedPartitionsOpt)
val reportUnavailablePartitions = opts.options.has(opts.reportUnavailablePartitionsOpt)
val reportOverriddenConfigs = opts.options.has(opts.topicsWithOverridesOpt)
val liveBrokers = zkUtils.getAllBrokersInCluster().map(_.id).toSet
for (topic <- topics) {
zkUtils.getPartitionAssignmentForTopics(List(topic)).get(topic) match {
case Some(topicPartitionAssignment) =>
val describeConfigs: Boolean = !reportUnavailablePartitions && !reportUnderReplicatedPartitions
val describePartitions: Boolean = !reportOverriddenConfigs
val sortedPartitions = topicPartitionAssignment.toList.sortWith((m1, m2) => m1._1 < m2._1)
if (describeConfigs) {
val configs = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic, topic).asScala
if (!reportOverriddenConfigs || configs.nonEmpty) {
val numPartitions = topicPartitionAssignment.size
val replicationFactor = topicPartitionAssignment.head._2.size
println("Topic:%s\tPartitionCount:%d\tReplicationFactor:%d\tConfigs:%s"
.format(topic, numPartitions, replicationFactor, configs.map(kv => kv._1 + "=" + kv._2).mkString(",")))
}
}
if (describePartitions) {
for ((partitionId, assignedReplicas) <- sortedPartitions) {
val inSyncReplicas = zkUtils.getInSyncReplicasForPartition(topic, partitionId)
val leader = zkUtils.getLeaderForPartition(topic, partitionId)
if ((!reportUnderReplicatedPartitions && !reportUnavailablePartitions) ||
(reportUnderReplicatedPartitions && inSyncReplicas.size < assignedReplicas.size) ||
(reportUnavailablePartitions && (leader.isEmpty || !liveBrokers.contains(leader.get)))) {
print("\tTopic: " + topic)
print("\tPartition: " + partitionId)
print("\tLeader: " + (if(leader.isDefined) leader.get else "none"))
print("\tReplicas: " + assignedReplicas.mkString(","))
println("\tIsr: " + inSyncReplicas.mkString(","))
}
}
}
case None =>
println("Topic " + topic + " doesn't exist!")
}
}
}
def parseTopicConfigsToBeAdded(opts: TopicCommandOptions): Properties = {
val configsToBeAdded = opts.options.valuesOf(opts.configOpt).asScala.map(_.split("""\s*=\s*"""))
require(configsToBeAdded.forall(config => config.length == 2),
"Invalid topic config: all configs to be added must be in the format \"key=val\".")
val props = new Properties
configsToBeAdded.foreach(pair => props.setProperty(pair(0).trim, pair(1).trim))
LogConfig.validate(props)
if (props.containsKey(LogConfig.MessageFormatVersionProp)) {
println(s"WARNING: The configuration ${LogConfig.MessageFormatVersionProp}=${props.getProperty(LogConfig.MessageFormatVersionProp)} is specified. " +
s"This configuration will be ignored if the version is newer than the inter.broker.protocol.version specified in the broker.")
}
props
}
def parseTopicConfigsToBeDeleted(opts: TopicCommandOptions): Seq[String] = {
if (opts.options.has(opts.deleteConfigOpt)) {
val configsToBeDeleted = opts.options.valuesOf(opts.deleteConfigOpt).asScala.map(_.trim())
val propsToBeDeleted = new Properties
configsToBeDeleted.foreach(propsToBeDeleted.setProperty(_, ""))
LogConfig.validateNames(propsToBeDeleted)
configsToBeDeleted
}
else
Seq.empty
}
def parseReplicaAssignment(replicaAssignmentList: String): Map[Int, List[Int]] = {
val partitionList = replicaAssignmentList.split(",")
val ret = new mutable.HashMap[Int, List[Int]]()
for (i <- 0 until partitionList.size) {
val brokerList = partitionList(i).split(":").map(s => s.trim().toInt)
val duplicateBrokers = CoreUtils.duplicates(brokerList)
if (duplicateBrokers.nonEmpty)
throw new AdminCommandFailedException("Partition replica lists may not contain duplicate entries: %s".format(duplicateBrokers.mkString(",")))
ret.put(i, brokerList.toList)
if (ret(i).size != ret(0).size)
throw new AdminOperationException("Partition " + i + " has different replication factor: " + brokerList)
}
ret.toMap
}
class TopicCommandOptions(args: Array[String]) {
val parser = new OptionParser
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val listOpt = parser.accepts("list", "List all available topics.")
val createOpt = parser.accepts("create", "Create a new topic.")
val deleteOpt = parser.accepts("delete", "Delete a topic")
val alterOpt = parser.accepts("alter", "Alter the number of partitions, replica assignment, and/or configuration for the topic.")
val describeOpt = parser.accepts("describe", "List details for the given topics.")
val helpOpt = parser.accepts("help", "Print usage information.")
val topicOpt = parser.accepts("topic", "The topic to be create, alter or describe. Can also accept a regular " +
"expression except for --create option")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val nl = System.getProperty("line.separator")
val configOpt = parser.accepts("config", "A topic configuration override for the topic being created or altered." +
"The following is a list of valid configurations: " + nl + LogConfig.configNames.map("\t" + _).mkString(nl) + nl +
"See the Kafka documentation for full details on the topic configs.")
.withRequiredArg
.describedAs("name=value")
.ofType(classOf[String])
val deleteConfigOpt = parser.accepts("delete-config", "A topic configuration override to be removed for an existing topic (see the list of configurations under the --config option).")
.withRequiredArg
.describedAs("name")
.ofType(classOf[String])
val partitionsOpt = parser.accepts("partitions", "The number of partitions for the topic being created or " +
"altered (WARNING: If partitions are increased for a topic that has a key, the partition logic or ordering of the messages will be affected")
.withRequiredArg
.describedAs("# of partitions")
.ofType(classOf[java.lang.Integer])
val replicationFactorOpt = parser.accepts("replication-factor", "The replication factor for each partition in the topic being created.")
.withRequiredArg
.describedAs("replication factor")
.ofType(classOf[java.lang.Integer])
val replicaAssignmentOpt = parser.accepts("replica-assignment", "A list of manual partition-to-broker assignments for the topic being created or altered.")
.withRequiredArg
.describedAs("broker_id_for_part1_replica1 : broker_id_for_part1_replica2 , " +
"broker_id_for_part2_replica1 : broker_id_for_part2_replica2 , ...")
.ofType(classOf[String])
val reportUnderReplicatedPartitionsOpt = parser.accepts("under-replicated-partitions",
"if set when describing topics, only show under replicated partitions")
val reportUnavailablePartitionsOpt = parser.accepts("unavailable-partitions",
"if set when describing topics, only show partitions whose leader is not available")
val topicsWithOverridesOpt = parser.accepts("topics-with-overrides",
"if set when describing topics, only show topics that have overridden configs")
val ifExistsOpt = parser.accepts("if-exists",
"if set when altering or deleting topics, the action will only execute if the topic exists")
val ifNotExistsOpt = parser.accepts("if-not-exists",
"if set when creating topics, the action will only execute if the topic does not already exist")
val disableRackAware = parser.accepts("disable-rack-aware", "Disable rack aware replica assignment")
val forceOpt = parser.accepts("force", "Suppress console prompts")
val options = parser.parse(args : _*)
val allTopicLevelOpts: Set[OptionSpec[_]] = Set(alterOpt, createOpt, describeOpt, listOpt, deleteOpt)
def checkArgs() {
// check required args
CommandLineUtils.checkRequiredArgs(parser, options, zkConnectOpt)
if (!options.has(listOpt) && !options.has(describeOpt))
CommandLineUtils.checkRequiredArgs(parser, options, topicOpt)
// check invalid args
CommandLineUtils.checkInvalidArgs(parser, options, configOpt, allTopicLevelOpts -- Set(alterOpt, createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, deleteConfigOpt, allTopicLevelOpts -- Set(alterOpt))
CommandLineUtils.checkInvalidArgs(parser, options, partitionsOpt, allTopicLevelOpts -- Set(alterOpt, createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, replicationFactorOpt, allTopicLevelOpts -- Set(createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, replicaAssignmentOpt, allTopicLevelOpts -- Set(createOpt,alterOpt))
if(options.has(createOpt))
CommandLineUtils.checkInvalidArgs(parser, options, replicaAssignmentOpt, Set(partitionsOpt, replicationFactorOpt))
CommandLineUtils.checkInvalidArgs(parser, options, reportUnderReplicatedPartitionsOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnavailablePartitionsOpt + topicsWithOverridesOpt)
CommandLineUtils.checkInvalidArgs(parser, options, reportUnavailablePartitionsOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnderReplicatedPartitionsOpt + topicsWithOverridesOpt)
CommandLineUtils.checkInvalidArgs(parser, options, topicsWithOverridesOpt,
allTopicLevelOpts -- Set(describeOpt) + reportUnderReplicatedPartitionsOpt + reportUnavailablePartitionsOpt)
CommandLineUtils.checkInvalidArgs(parser, options, ifExistsOpt, allTopicLevelOpts -- Set(alterOpt, deleteOpt))
CommandLineUtils.checkInvalidArgs(parser, options, ifNotExistsOpt, allTopicLevelOpts -- Set(createOpt))
}
}
def askToProceed: Unit = {
println("Are you sure you want to continue? [y/n]")
if (!Console.readLine().equalsIgnoreCase("y")) {
println("Ending your session")
System.exit(0)
}
}
}
| eribeiro/kafka | core/src/main/scala/kafka/admin/TopicCommand.scala | Scala | apache-2.0 | 19,503 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.infinity.streams
import java.io.InputStream
/** State in which the stream is open and being read */
private[streams] class ReadingStreamState(stream: InputStream) extends StreamState {
override def seek(context: StreamContext, pos: Long): Unit = {
stream.close()
context.position = pos
context.setState(SeekingStreamState)
}
override def read(context: StreamContext, b: Array[Byte], off: Int, length: Int): Int = {
val readBytes = stream.read(b, off, length)
if (readBytes != -1) {
context.position += readBytes
}
readBytes
}
override def read(context: StreamContext): Int = {
val value = stream.read()
if (value != -1) {
context.position += 1
}
value
}
override def close(context: StreamContext): Unit = {
stream.close()
context.setState(ClosedStreamState)
}
}
| telefonicaid/fiware-cosmos-platform | infinity/driver/src/main/scala/es/tid/cosmos/infinity/streams/ReadingStreamState.scala | Scala | apache-2.0 | 1,510 |
package at.bioinform.webapp.components
import at.bioinform.webapp.Repositories
trait RepositoriesComponent {
def repositories: Repositories
}
| peri4n/bIO | subprojects/webapp/src/main/scala/at/bioinform/webapp/components/RepositoriesComponent.scala | Scala | apache-2.0 | 148 |
package api
import scala.concurrent.ExecutionContext
import spray.routing.Directives
import spray.http.MediaTypes.{ `application/json` }
import akka.actor.ActorRef
import akka.util.Timeout
import spray.httpx.SprayJsonSupport
import scala.concurrent.duration.Duration
import spray.routing.HttpService
import spray.routing.authentication.BasicAuth
import spray.routing.directives.CachingDirectives._
import spray.httpx.encoding._
import scala.reflect.ClassTag
import models._
import models.JsonProtocol._
import scala.concurrent.Future
class SourceService(source: ActorRef, markov: ActorRef)(implicit executionContext: ExecutionContext)
extends Directives with SprayJsonSupport {
import actors.SourceActor._
import actors.MarkovActor._
import akka.pattern.ask
import scala.concurrent.duration._
implicit val timeout = Timeout(2.seconds)
val route = rejectEmptyResponse {
pathPrefix("api") {
path("corpus") {
post {
entity(as[Corpus]) { corpus =>
complete {
(source ask corpus).mapTo[Option[OK]]
}
}
}
} ~ path("sentence" / Segment / Segment) { (word1, word2) =>
get {
respondWithMediaType(`application/json`) {
complete {
(markov ask Get(List(word1, word2), 100)).mapTo[Future[String]]
}
}
}
}
}
}
}
| opyate/beingjohnmarkovic | src/main/scala/api/SourceService.scala | Scala | mit | 1,394 |
import scala.concurrent.ExecutionContextExecutor
package object services {
implicit val ec: ExecutionContextExecutor = scalajs.concurrent.JSExecutionContext.Implicits.queue
}
| Daxten/BayTemplate-ScalaJs | web/src/main/scala/services/package.scala | Scala | apache-2.0 | 178 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.tpe
import slamdata.Predef.Option
import quasar.contrib.matryoshka.PatternArbitrary
import quasar.pkg.tests._
import scala.Predef.$conforms
import matryoshka.Delay
import scalaz._
import scalaz.scalacheck.ScalazArbitrary._
trait TypeFArbitrary {
import TypeF._, SimpleTypeArbitrary._
implicit def arbitraryTypeF[J: Arbitrary: Order]: Delay[Arbitrary, TypeF[J, ?]] =
new PatternArbitrary[TypeF[J, ?]] {
def leafGenerators[A] =
uniformly(
Gen.const( bottom[J, A]() ),
Gen.const( top[J, A]() ),
arbitrary[SimpleType] ^^ (simple[J, A](_)),
arbitrary[J] ^^ ( const[J, A](_)))
def branchGenerators[A: Arbitrary] =
uniformly(
arbitrary[IList[A] \\/ A] ^^ ( arr[J, A](_)),
arbitrary[(IMap[J, A], Option[(A, A)])] ^^ ( map[J, A](_)),
arbitrary[(A, A)] ^^ (coproduct[J, A](_)))
}
}
object TypeFArbitrary extends TypeFArbitrary
| jedesah/Quasar | frontend/src/test/scala/quasar/tpe/TypeFArbitrary.scala | Scala | apache-2.0 | 1,642 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.e2
/** Collection of engine libraries that have no dependency on PredictionIO */
package object engine {}
/** Collection of evaluation libraries that have no dependency on PredictionIO */
package object evaluation {}
| ch33hau/PredictionIO | e2/src/main/scala/io/prediction/e2/package.scala | Scala | apache-2.0 | 855 |
package is.launaskil.client
import utest._
import utest.framework.TestSuite
object MainTest extends TestSuite {
val tests = TestSuite{
}
}
| olafurpg/slick-codegen-scalajs | client/src/test/scala/is/launaskil/client/MainTest.scala | Scala | mit | 147 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnet.init
import ml.dmlc.mxnet.init.Base._
import scala.collection.mutable.ListBuffer
class LibInfo {
@native def mxSymbolListAtomicSymbolCreators(symbolList: ListBuffer[SymbolHandle]): Int
@native def mxSymbolGetAtomicSymbolInfo(handle: SymbolHandle,
name: RefString,
desc: RefString,
numArgs: RefInt,
argNames: ListBuffer[String],
argTypes: ListBuffer[String],
argDescs: ListBuffer[String],
keyVarNumArgs: RefString): Int
@native def mxListAllOpNames(names: ListBuffer[String]): Int
@native def nnGetOpHandle(opName: String, opHandle: RefLong): Int
}
| likelyzhao/mxnet | scala-package/init/src/main/scala/ml/dmlc/mxnet/init/LibInfo.scala | Scala | apache-2.0 | 1,677 |
package com.fustigatedcat.heystk.ui.lib
import com.fustigatedcat.heystk.ui.dao.UserDAO
import com.fustigatedcat.heystk.ui.snippet.LoggedInUser
import net.liftweb.http.RequestVar
object UserPrivileges extends RequestVar[List[String]]({
LoggedInUser.is.map(UserDAO.getPrivilegesForUser).getOrElse(List())
})
object Authorization {
def userHasPrivilege(priv : String) : Boolean = {
UserPrivileges.is.contains(priv)
}
def userAuthorized[T](priv : String, onTrue : => T, onFalse : => T) : T = {
if(userHasPrivilege(priv)) {
onTrue
} else {
onFalse
}
}
def userAuthorized[T](privs : List[String], onTrue : => T, onFalse : => T) : T = {
if(privs.exists(userHasPrivilege)) {
onTrue
} else {
onFalse
}
}
}
| fustigatedcat/heystk | system-ui/src/main/scala/com/fustigatedcat/heystk/ui/lib/Authorization.scala | Scala | gpl-3.0 | 770 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.