code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package nl.dekkr.feedfrenzy.backend.extractor.action
import nl.dekkr.feedfrenzy.backend.model.{Split, DateParser}
import org.scalatest.FlatSpecLike
class SplitActionTest extends FlatSpecLike {
val SA = new SplitAction()
val testHtml =
"""
<div>
<div><a href="#1">1</a></div>
<div><a href="#2">2</a></div>
<div><a href="#3">3</a></div>
<div><a href="#4">4</a></div>
</div>
""".stripMargin
"SplitAction" should "should create a list of matches" in {
val vars = Map("input" -> List(testHtml))
val actionSplit = new Split(inputVariable = Some("input"), outputVariable = Some("split"),selectorPattern = "div > a")
assert(SA.execute(vars, actionSplit) == List(
"<a href=\\"#1\\">1</a>",
"<a href=\\"#2\\">2</a>",
"<a href=\\"#3\\">3</a>",
"<a href=\\"#4\\">4</a>")
)
}
it should "should handle empty content and pattern" in {
val vars = Map("input" -> List(testHtml))
val actionSplitNoPattern = new Split(inputVariable = Some("input"), outputVariable = Some("split"),selectorPattern = "")
assert(SA.execute(vars, actionSplitNoPattern).isEmpty)
val actionSplitNoContent = new Split(inputVariable = None, outputVariable = Some("split"),selectorPattern = "div > a")
assert(SA.execute(vars, actionSplitNoContent).isEmpty)
}
}
| dekkr/feedfrenzy-backend | src/test/scala/nl/dekkr/feedfrenzy/backend/extractor/action/SplitActionTest.scala | Scala | mit | 1,348 |
/* Title: Pure/General/output.scala
Author: Makarius
Isabelle output channels.
*/
package isabelle
object Output
{
def clean_yxml(msg: String): String =
try { XML.content(Protocol_Message.clean_reports(YXML.parse_body(msg))) }
catch { case ERROR(_) => msg }
def writeln_text(msg: String): String = clean_yxml(msg)
def warning_text(msg: String): String =
cat_lines(split_lines(clean_yxml(msg)).map("### " + _))
def error_message_text(msg: String): String =
cat_lines(split_lines(clean_yxml(msg)).map("*** " + _))
def writeln(msg: String, stdout: Boolean = false)
{
if (msg != "") {
if (stdout) Console.print(writeln_text(msg) + "\\n")
else Console.err.print(writeln_text(msg) + "\\n")
}
}
def warning(msg: String, stdout: Boolean = false)
{
if (msg != "") {
if (stdout) Console.print(warning_text(msg) + "\\n")
else Console.err.print(warning_text(msg) + "\\n")
}
}
def error_message(msg: String, stdout: Boolean = false)
{
if (msg != "") {
if (stdout) Console.print(error_message_text(msg) + "\\n")
else Console.err.print(error_message_text(msg) + "\\n")
}
}
}
| larsrh/libisabelle | modules/pide/2019-RC4/src/main/scala/General/output.scala | Scala | apache-2.0 | 1,180 |
package de.htwg.zeta.common.format.project
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue.BoolValue
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue.DoubleValue
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue.EnumValue
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue.IntValue
import de.htwg.zeta.common.models.project.concept.elements.AttributeValue.StringValue
import play.api.libs.json.JsError
import play.api.libs.json.JsObject
import play.api.libs.json.Json
import play.api.libs.json.JsResult
import play.api.libs.json.JsValue
import play.api.libs.json.OFormat
import play.api.libs.json.Reads
import play.api.libs.json.Writes
@SuppressWarnings(Array("org.wartremover.warts.DefaultArguments"))
class AttributeValueFormat(
sType: String = "type",
sString: String = "string",
sValue: String = "value",
sBoolean: String = "boolean",
sInt: String = "int",
sDouble: String = "double",
sEnum: String = "enum",
sEnumName: String = "enumName",
sValueName: String = "valueName"
) extends OFormat[AttributeValue] {
val asMapOfLists: OFormat[Map[String, List[AttributeValue]]] = {
val reads = Reads.map(Reads.list(this))
val writes = Writes.map(Writes.list(this))
OFormat(reads, writes)
}
override def writes(attributeValue: AttributeValue): JsObject = {
attributeValue match {
case StringValue(value) => Json.obj(sType -> sString, sValue -> value)
case BoolValue(value) => Json.obj(sType -> sBoolean, sValue -> value)
case IntValue(value) => Json.obj(sType -> sInt, sValue -> value)
case DoubleValue(value) => Json.obj(sType -> sDouble, sValue -> value)
case EnumValue(enumName, valueName) => Json.obj(sType -> sEnum, sEnumName -> enumName, sValueName -> valueName)
}
}
override def reads(json: JsValue): JsResult[AttributeValue] = {
(json \\ sType).validate[String].flatMap {
case `sString` => (json \\ sValue).validate[String].map(StringValue)
case `sBoolean` => (json \\ sValue).validate[Boolean].map(BoolValue)
case `sInt` => (json \\ sValue).validate[Int].map(IntValue)
case `sDouble` => (json \\ sValue).validate[Double].map(DoubleValue)
case `sEnum` => readsEnumValue(json)
case unknown: String => JsError(s"Unknown AttributeType in AttributeValue: $unknown")
}
}
private def readsEnumValue(json: JsValue): JsResult[EnumValue] = for {
enumName <- (json \\ sEnumName).validate[String]
enumValue <- (json \\ sValueName).validate[String]
} yield {
EnumValue(enumName, enumValue)
}
}
| Zeta-Project/zeta | api/common/src/main/scala/de/htwg/zeta/common/format/project/AttributeValueFormat.scala | Scala | bsd-2-clause | 2,718 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import scala.collection.JavaConversions._
import scala.collection.mutable
import com.google.common.base.Objects
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.common.StatsSetupConst
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.Warehouse
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.ql.metadata._
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.analysis.{Catalog, MultiInstanceRelation, OverrideCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.{InternalRow, SqlParser, TableIdentifier}
import org.apache.spark.sql.execution.datasources.parquet.ParquetRelation
import org.apache.spark.sql.execution.datasources.{CreateTableUsingAsSelect, LogicalRelation, Partition => ParquetPartition, PartitionSpec, ResolvedDataSource}
import org.apache.spark.sql.execution.{FileRelation, datasources}
import org.apache.spark.sql.hive.client._
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{AnalysisException, SQLContext, SaveMode}
private[hive] case class HiveSerDe(
inputFormat: Option[String] = None,
outputFormat: Option[String] = None,
serde: Option[String] = None)
private[hive] object HiveSerDe {
/**
* Get the Hive SerDe information from the data source abbreviation string or classname.
*
* @param source Currently the source abbreviation can be one of the following:
* SequenceFile, RCFile, ORC, PARQUET, and case insensitive.
* @param hiveConf Hive Conf
* @return HiveSerDe associated with the specified source
*/
def sourceToSerDe(source: String, hiveConf: HiveConf): Option[HiveSerDe] = {
val serdeMap = Map(
"sequencefile" ->
HiveSerDe(
inputFormat = Option("org.apache.hadoop.mapred.SequenceFileInputFormat"),
outputFormat = Option("org.apache.hadoop.mapred.SequenceFileOutputFormat")),
"rcfile" ->
HiveSerDe(
inputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileInputFormat"),
outputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"),
serde = Option(hiveConf.getVar(HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE))),
"orc" ->
HiveSerDe(
inputFormat = Option("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"),
outputFormat = Option("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"),
serde = Option("org.apache.hadoop.hive.ql.io.orc.OrcSerde")),
"parquet" ->
HiveSerDe(
inputFormat = Option("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"),
outputFormat = Option("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"),
serde = Option("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe")))
val key = source.toLowerCase match {
case s if s.startsWith("org.apache.spark.sql.parquet") => "parquet"
case s if s.startsWith("org.apache.spark.sql.orc") => "orc"
case s => s
}
serdeMap.get(key)
}
}
private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: HiveContext)
extends Catalog with Logging {
val conf = hive.conf
/** Usages should lock on `this`. */
protected[hive] lazy val hiveWarehouse = new Warehouse(hive.hiveconf)
// TODO: Use this everywhere instead of tuples or databaseName, tableName,.
/** A fully qualified identifier for a table (i.e., database.tableName) */
case class QualifiedTableName(database: String, name: String) {
def toLowerCase: QualifiedTableName = QualifiedTableName(database.toLowerCase, name.toLowerCase)
}
/** A cache of Spark SQL data source tables that have been accessed. */
protected[hive] val cachedDataSourceTables: LoadingCache[QualifiedTableName, LogicalPlan] = {
val cacheLoader = new CacheLoader[QualifiedTableName, LogicalPlan]() {
override def load(in: QualifiedTableName): LogicalPlan = {
logDebug(s"Creating new cached data source for $in")
val table = client.getTable(in.database, in.name)
def schemaStringFromParts: Option[String] = {
table.properties.get("spark.sql.sources.schema.numParts").map { numParts =>
val parts = (0 until numParts.toInt).map { index =>
val part = table.properties.get(s"spark.sql.sources.schema.part.$index").orNull
if (part == null) {
throw new AnalysisException(
"Could not read schema from the metastore because it is corrupted " +
s"(missing part $index of the schema, $numParts parts are expected).")
}
part
}
// Stick all parts back to a single schema string.
parts.mkString
}
}
// Originally, we used spark.sql.sources.schema to store the schema of a data source table.
// After SPARK-6024, we removed this flag.
// Although we are not using spark.sql.sources.schema any more, we need to still support.
val schemaString =
table.properties.get("spark.sql.sources.schema").orElse(schemaStringFromParts)
val userSpecifiedSchema =
schemaString.map(s => DataType.fromJson(s).asInstanceOf[StructType])
// We only need names at here since userSpecifiedSchema we loaded from the metastore
// contains partition columns. We can always get datatypes of partitioning columns
// from userSpecifiedSchema.
val partitionColumns = table.partitionColumns.map(_.name)
// It does not appear that the ql client for the metastore has a way to enumerate all the
// SerDe properties directly...
val options = table.serdeProperties
val resolvedRelation =
ResolvedDataSource(
hive,
userSpecifiedSchema,
partitionColumns.toArray,
table.properties("spark.sql.sources.provider"),
options)
LogicalRelation(resolvedRelation.relation)
}
}
CacheBuilder.newBuilder().maximumSize(1000).build(cacheLoader)
}
override def refreshTable(tableIdent: TableIdentifier): Unit = {
// refreshTable does not eagerly reload the cache. It just invalidate the cache.
// Next time when we use the table, it will be populated in the cache.
// Since we also cache ParquetRelations converted from Hive Parquet tables and
// adding converted ParquetRelations into the cache is not defined in the load function
// of the cache (instead, we add the cache entry in convertToParquetRelation),
// it is better at here to invalidate the cache to avoid confusing waring logs from the
// cache loader (e.g. cannot find data source provider, which is only defined for
// data source table.).
invalidateTable(tableIdent)
}
def invalidateTable(tableIdent: TableIdentifier): Unit = {
val databaseName = tableIdent.database.getOrElse(client.currentDatabase)
val tableName = tableIdent.table
cachedDataSourceTables.invalidate(QualifiedTableName(databaseName, tableName).toLowerCase)
}
val caseSensitive: Boolean = false
/**
* Creates a data source table (a table created with USING clause) in Hive's metastore.
* Returns true when the table has been created. Otherwise, false.
*/
// TODO: Remove this in SPARK-10104.
def createDataSourceTable(
tableName: String,
userSpecifiedSchema: Option[StructType],
partitionColumns: Array[String],
provider: String,
options: Map[String, String],
isExternal: Boolean): Unit = {
createDataSourceTable(
SqlParser.parseTableIdentifier(tableName),
userSpecifiedSchema,
partitionColumns,
provider,
options,
isExternal)
}
def createDataSourceTable(
tableIdent: TableIdentifier,
userSpecifiedSchema: Option[StructType],
partitionColumns: Array[String],
provider: String,
options: Map[String, String],
isExternal: Boolean): Unit = {
val (dbName, tblName) = {
val database = tableIdent.database.getOrElse(client.currentDatabase)
processDatabaseAndTableName(database, tableIdent.table)
}
val tableProperties = new mutable.HashMap[String, String]
tableProperties.put("spark.sql.sources.provider", provider)
// Saves optional user specified schema. Serialized JSON schema string may be too long to be
// stored into a single metastore SerDe property. In this case, we split the JSON string and
// store each part as a separate SerDe property.
userSpecifiedSchema.foreach { schema =>
val threshold = conf.schemaStringLengthThreshold
val schemaJsonString = schema.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
tableProperties.put("spark.sql.sources.schema.numParts", parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
tableProperties.put(s"spark.sql.sources.schema.part.$index", part)
}
}
val metastorePartitionColumns = userSpecifiedSchema.map { schema =>
val fields = partitionColumns.map(col => schema(col))
fields.map { field =>
HiveColumn(
name = field.name,
hiveType = HiveMetastoreTypes.toMetastoreType(field.dataType),
comment = "")
}.toSeq
}.getOrElse {
if (partitionColumns.length > 0) {
// The table does not have a specified schema, which means that the schema will be inferred
// when we load the table. So, we are not expecting partition columns and we will discover
// partitions when we load the table. However, if there are specified partition columns,
// we simply ignore them and provide a warning message.
logWarning(
s"The schema and partitions of table $tableIdent will be inferred when it is loaded. " +
s"Specified partition columns (${partitionColumns.mkString(",")}) will be ignored.")
}
Seq.empty[HiveColumn]
}
val tableType = if (isExternal) {
tableProperties.put("EXTERNAL", "TRUE")
ExternalTable
} else {
tableProperties.put("EXTERNAL", "FALSE")
ManagedTable
}
val maybeSerDe = HiveSerDe.sourceToSerDe(provider, hive.hiveconf)
val dataSource = ResolvedDataSource(
hive, userSpecifiedSchema, partitionColumns, provider, options)
def newSparkSQLSpecificMetastoreTable(): HiveTable = {
HiveTable(
specifiedDatabase = Option(dbName),
name = tblName,
schema = Seq.empty,
partitionColumns = metastorePartitionColumns,
tableType = tableType,
properties = tableProperties.toMap,
serdeProperties = options)
}
def newHiveCompatibleMetastoreTable(relation: HadoopFsRelation, serde: HiveSerDe): HiveTable = {
def schemaToHiveColumn(schema: StructType): Seq[HiveColumn] = {
schema.map { field =>
HiveColumn(
name = field.name,
hiveType = HiveMetastoreTypes.toMetastoreType(field.dataType),
comment = "")
}
}
val partitionColumns = schemaToHiveColumn(relation.partitionColumns)
val dataColumns = schemaToHiveColumn(relation.schema).filterNot(partitionColumns.contains)
HiveTable(
specifiedDatabase = Option(dbName),
name = tblName,
schema = dataColumns,
partitionColumns = partitionColumns,
tableType = tableType,
properties = tableProperties.toMap,
serdeProperties = options,
location = Some(relation.paths.head),
viewText = None, // TODO We need to place the SQL string here.
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
serde = serde.serde)
}
// TODO: Support persisting partitioned data source relations in Hive compatible format
val qualifiedTableName = tableIdent.quotedString
val (hiveCompitiableTable, logMessage) = (maybeSerDe, dataSource.relation) match {
case (Some(serde), relation: HadoopFsRelation)
if relation.paths.length == 1 && relation.partitionColumns.isEmpty =>
val hiveTable = newHiveCompatibleMetastoreTable(relation, serde)
val message =
s"Persisting data source relation $qualifiedTableName with a single input path " +
s"into Hive metastore in Hive compatible format. Input path: ${relation.paths.head}."
(Some(hiveTable), message)
case (Some(serde), relation: HadoopFsRelation) if relation.partitionColumns.nonEmpty =>
val message =
s"Persisting partitioned data source relation $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " +
"Input path(s): " + relation.paths.mkString("\\n", "\\n", "")
(None, message)
case (Some(serde), relation: HadoopFsRelation) =>
val message =
s"Persisting data source relation $qualifiedTableName with multiple input paths into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. " +
s"Input paths: " + relation.paths.mkString("\\n", "\\n", "")
(None, message)
case (Some(serde), _) =>
val message =
s"Data source relation $qualifiedTableName is not a " +
s"${classOf[HadoopFsRelation].getSimpleName}. Persisting it into Hive metastore " +
"in Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
case _ =>
val message =
s"Couldn't find corresponding Hive SerDe for data source provider $provider. " +
s"Persisting data source relation $qualifiedTableName into Hive metastore in " +
s"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
}
(hiveCompitiableTable, logMessage) match {
case (Some(table), message) =>
// We first try to save the metadata of the table in a Hive compatiable way.
// If Hive throws an error, we fall back to save its metadata in the Spark SQL
// specific way.
try {
logInfo(message)
client.createTable(table)
} catch {
case throwable: Throwable =>
val warningMessage =
s"Could not persist $qualifiedTableName in a Hive compatible way. Persisting " +
s"it into Hive metastore in Spark SQL specific format."
logWarning(warningMessage, throwable)
val sparkSqlSpecificTable = newSparkSQLSpecificMetastoreTable()
client.createTable(sparkSqlSpecificTable)
}
case (None, message) =>
logWarning(message)
val hiveTable = newSparkSQLSpecificMetastoreTable()
client.createTable(hiveTable)
}
}
def hiveDefaultTableFilePath(tableName: String): String = {
hiveDefaultTableFilePath(SqlParser.parseTableIdentifier(tableName))
}
def hiveDefaultTableFilePath(tableIdent: TableIdentifier): String = {
// Code based on: hiveWarehouse.getTablePath(currentDatabase, tableName)
val database = tableIdent.database.getOrElse(client.currentDatabase)
new Path(
new Path(client.getDatabase(database).location),
tableIdent.table.toLowerCase).toString
}
def tableExists(tableIdentifier: Seq[String]): Boolean = {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName =
tableIdent
.lift(tableIdent.size - 2)
.getOrElse(client.currentDatabase)
val tblName = tableIdent.last
client.getTableOption(databaseName, tblName).isDefined
}
def lookupRelation(
tableIdentifier: Seq[String],
alias: Option[String]): LogicalPlan = {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName = tableIdent.lift(tableIdent.size - 2).getOrElse(
client.currentDatabase)
val tblName = tableIdent.last
val table = client.getTable(databaseName, tblName)
if (table.properties.get("spark.sql.sources.provider").isDefined) {
val dataSourceTable =
cachedDataSourceTables(QualifiedTableName(databaseName, tblName).toLowerCase)
// Then, if alias is specified, wrap the table with a Subquery using the alias.
// Otherwise, wrap the table with a Subquery using the table name.
val withAlias =
alias.map(a => Subquery(a, dataSourceTable)).getOrElse(
Subquery(tableIdent.last, dataSourceTable))
withAlias
} else if (table.tableType == VirtualView) {
val viewText = table.viewText.getOrElse(sys.error("Invalid view without text."))
alias match {
// because hive use things like `_c0` to build the expanded text
// currently we cannot support view from "create view v1(c1) as ..."
case None => Subquery(table.name, HiveQl.createPlan(viewText))
case Some(aliasText) => Subquery(aliasText, HiveQl.createPlan(viewText))
}
} else {
MetastoreRelation(databaseName, tblName, alias)(table)(hive)
}
}
private def convertToParquetRelation(metastoreRelation: MetastoreRelation): LogicalRelation = {
val metastoreSchema = StructType.fromAttributes(metastoreRelation.output)
val mergeSchema = hive.convertMetastoreParquetWithSchemaMerging
// NOTE: Instead of passing Metastore schema directly to `ParquetRelation`, we have to
// serialize the Metastore schema to JSON and pass it as a data source option because of the
// evil case insensitivity issue, which is reconciled within `ParquetRelation`.
val parquetOptions = Map(
ParquetRelation.METASTORE_SCHEMA -> metastoreSchema.json,
ParquetRelation.MERGE_SCHEMA -> mergeSchema.toString)
val tableIdentifier =
QualifiedTableName(metastoreRelation.databaseName, metastoreRelation.tableName)
def getCached(
tableIdentifier: QualifiedTableName,
pathsInMetastore: Seq[String],
schemaInMetastore: StructType,
partitionSpecInMetastore: Option[PartitionSpec]): Option[LogicalRelation] = {
cachedDataSourceTables.getIfPresent(tableIdentifier) match {
case null => None // Cache miss
case logical @ LogicalRelation(parquetRelation: ParquetRelation, _) =>
// If we have the same paths, same schema, and same partition spec,
// we will use the cached Parquet Relation.
val useCached =
parquetRelation.paths.toSet == pathsInMetastore.toSet &&
logical.schema.sameType(metastoreSchema) &&
parquetRelation.partitionSpec == partitionSpecInMetastore.getOrElse {
PartitionSpec(StructType(Nil), Array.empty[datasources.Partition])
}
if (useCached) {
Some(logical)
} else {
// If the cached relation is not updated, we invalidate it right away.
cachedDataSourceTables.invalidate(tableIdentifier)
None
}
case other =>
logWarning(
s"${metastoreRelation.databaseName}.${metastoreRelation.tableName} should be stored " +
s"as Parquet. However, we are getting a $other from the metastore cache. " +
s"This cached entry will be invalidated.")
cachedDataSourceTables.invalidate(tableIdentifier)
None
}
}
val result = if (metastoreRelation.hiveQlTable.isPartitioned) {
val partitionSchema = StructType.fromAttributes(metastoreRelation.partitionKeys)
val partitionColumnDataTypes = partitionSchema.map(_.dataType)
// We're converting the entire table into ParquetRelation, so predicates to Hive metastore
// are empty.
val partitions = metastoreRelation.getHiveQlPartitions().map { p =>
val location = p.getLocation
val values = InternalRow.fromSeq(p.getValues.zip(partitionColumnDataTypes).map {
case (rawValue, dataType) => Cast(Literal(rawValue), dataType).eval(null)
})
ParquetPartition(values, location)
}
val partitionSpec = PartitionSpec(partitionSchema, partitions)
val paths = partitions.map(_.path)
val cached = getCached(tableIdentifier, paths, metastoreSchema, Some(partitionSpec))
val parquetRelation = cached.getOrElse {
val created = LogicalRelation(
new ParquetRelation(
paths.toArray, None, Some(partitionSpec), parquetOptions)(hive))
cachedDataSourceTables.put(tableIdentifier, created)
created
}
parquetRelation
} else {
val paths = Seq(metastoreRelation.hiveQlTable.getDataLocation.toString)
val cached = getCached(tableIdentifier, paths, metastoreSchema, None)
val parquetRelation = cached.getOrElse {
val created = LogicalRelation(
new ParquetRelation(paths.toArray, None, None, parquetOptions)(hive))
cachedDataSourceTables.put(tableIdentifier, created)
created
}
parquetRelation
}
result.copy(expectedOutputAttributes = Some(metastoreRelation.output))
}
override def getTables(databaseName: Option[String]): Seq[(String, Boolean)] = {
val db = databaseName.getOrElse(client.currentDatabase)
client.listTables(db).map(tableName => (tableName, false))
}
protected def processDatabaseAndTableName(
databaseName: Option[String],
tableName: String): (Option[String], String) = {
if (!caseSensitive) {
(databaseName.map(_.toLowerCase), tableName.toLowerCase)
} else {
(databaseName, tableName)
}
}
protected def processDatabaseAndTableName(
databaseName: String,
tableName: String): (String, String) = {
if (!caseSensitive) {
(databaseName.toLowerCase, tableName.toLowerCase)
} else {
(databaseName, tableName)
}
}
/**
* When scanning or writing to non-partitioned Metastore Parquet tables, convert them to Parquet
* data source relations for better performance.
*/
object ParquetConversions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
if (!plan.resolved || plan.analyzed) {
return plan
}
plan transformUp {
// Write path
case InsertIntoTable(r: MetastoreRelation, partition, child, overwrite, ifNotExists)
// Inserting into partitioned table is not supported in Parquet data source (yet).
if !r.hiveQlTable.isPartitioned && hive.convertMetastoreParquet &&
r.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(r)
InsertIntoTable(parquetRelation, partition, child, overwrite, ifNotExists)
// Write path
case InsertIntoHiveTable(r: MetastoreRelation, partition, child, overwrite, ifNotExists)
// Inserting into partitioned table is not supported in Parquet data source (yet).
if !r.hiveQlTable.isPartitioned && hive.convertMetastoreParquet &&
r.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(r)
InsertIntoTable(parquetRelation, partition, child, overwrite, ifNotExists)
// Read path
case relation: MetastoreRelation if hive.convertMetastoreParquet &&
relation.tableDesc.getSerdeClassName.toLowerCase.contains("parquet") =>
val parquetRelation = convertToParquetRelation(relation)
Subquery(relation.alias.getOrElse(relation.tableName), parquetRelation)
}
}
}
/**
* Creates any tables required for query execution.
* For example, because of a CREATE TABLE X AS statement.
*/
object CreateTables extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
case p: LogicalPlan if p.resolved => p
case p @ CreateTableAsSelect(table, child, allowExisting) =>
val schema = if (table.schema.nonEmpty) {
table.schema
} else {
child.output.map {
attr => new HiveColumn(
attr.name,
HiveMetastoreTypes.toMetastoreType(attr.dataType), null)
}
}
val desc = table.copy(schema = schema)
if (hive.convertCTAS && table.serde.isEmpty) {
// Do the conversion when spark.sql.hive.convertCTAS is true and the query
// does not specify any storage format (file format and storage handler).
if (table.specifiedDatabase.isDefined) {
throw new AnalysisException(
"Cannot specify database name in a CTAS statement " +
"when spark.sql.hive.convertCTAS is set to true.")
}
val mode = if (allowExisting) SaveMode.Ignore else SaveMode.ErrorIfExists
CreateTableUsingAsSelect(
TableIdentifier(desc.name),
hive.conf.defaultDataSourceName,
temporary = false,
Array.empty[String],
mode,
options = Map.empty[String, String],
child
)
} else {
val desc = if (table.serde.isEmpty) {
// add default serde
table.copy(
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
} else {
table
}
val (dbName, tblName) =
processDatabaseAndTableName(
desc.specifiedDatabase.getOrElse(client.currentDatabase), desc.name)
execution.CreateTableAsSelect(
desc.copy(
specifiedDatabase = Some(dbName),
name = tblName),
child,
allowExisting)
}
}
}
/**
* Casts input data to correct data types according to table definition before inserting into
* that table.
*/
object PreInsertionCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
case p @ InsertIntoTable(table: MetastoreRelation, _, child, _, _) =>
castChildOutput(p, table, child)
}
def castChildOutput(p: InsertIntoTable, table: MetastoreRelation, child: LogicalPlan)
: LogicalPlan = {
val childOutputDataTypes = child.output.map(_.dataType)
val numDynamicPartitions = p.partition.values.count(_.isEmpty)
val tableOutputDataTypes =
(table.attributes ++ table.partitionKeys.takeRight(numDynamicPartitions))
.take(child.output.length).map(_.dataType)
if (childOutputDataTypes == tableOutputDataTypes) {
InsertIntoHiveTable(table, p.partition, p.child, p.overwrite, p.ifNotExists)
} else if (childOutputDataTypes.size == tableOutputDataTypes.size &&
childOutputDataTypes.zip(tableOutputDataTypes)
.forall { case (left, right) => left.sameType(right) }) {
// If both types ignoring nullability of ArrayType, MapType, StructType are the same,
// use InsertIntoHiveTable instead of InsertIntoTable.
InsertIntoHiveTable(table, p.partition, p.child, p.overwrite, p.ifNotExists)
} else {
// Only do the casting when child output data types differ from table output data types.
val castedChildOutput = child.output.zip(table.output).map {
case (input, output) if input.dataType != output.dataType =>
Alias(Cast(input, output.dataType), input.name)()
case (input, _) => input
}
p.copy(child = logical.Project(castedChildOutput, child))
}
}
}
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = {
throw new UnsupportedOperationException
}
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
override def unregisterTable(tableIdentifier: Seq[String]): Unit = {
throw new UnsupportedOperationException
}
override def unregisterAllTables(): Unit = {}
}
/**
* A logical plan representing insertion into Hive table.
* This plan ignores nullability of ArrayType, MapType, StructType unlike InsertIntoTable
* because Hive table doesn't have nullability for ARRAY, MAP, STRUCT types.
*/
private[hive] case class InsertIntoHiveTable(
table: MetastoreRelation,
partition: Map[String, Option[String]],
child: LogicalPlan,
overwrite: Boolean,
ifNotExists: Boolean)
extends LogicalPlan {
override def children: Seq[LogicalPlan] = child :: Nil
override def output: Seq[Attribute] = Seq.empty
val numDynamicPartitions = partition.values.count(_.isEmpty)
// This is the expected schema of the table prepared to be inserted into,
// including dynamic partition columns.
val tableOutput = table.attributes ++ table.partitionKeys.takeRight(numDynamicPartitions)
override lazy val resolved: Boolean = childrenResolved && child.output.zip(tableOutput).forall {
case (childAttr, tableAttr) => childAttr.dataType.sameType(tableAttr.dataType)
}
}
private[hive] case class MetastoreRelation
(databaseName: String, tableName: String, alias: Option[String])
(val table: HiveTable)
(@transient sqlContext: SQLContext)
extends LeafNode with MultiInstanceRelation with FileRelation {
override def equals(other: Any): Boolean = other match {
case relation: MetastoreRelation =>
databaseName == relation.databaseName &&
tableName == relation.tableName &&
alias == relation.alias &&
output == relation.output
case _ => false
}
override def hashCode(): Int = {
Objects.hashCode(databaseName, tableName, alias, output)
}
@transient val hiveQlTable: Table = {
// We start by constructing an API table as Hive performs several important transformations
// internally when converting an API table to a QL table.
val tTable = new org.apache.hadoop.hive.metastore.api.Table()
tTable.setTableName(table.name)
tTable.setDbName(table.database)
val tableParameters = new java.util.HashMap[String, String]()
tTable.setParameters(tableParameters)
table.properties.foreach { case (k, v) => tableParameters.put(k, v) }
tTable.setTableType(table.tableType.name)
val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor()
tTable.setSd(sd)
sd.setCols(table.schema.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
tTable.setPartitionKeys(
table.partitionColumns.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
table.location.foreach(sd.setLocation)
table.inputFormat.foreach(sd.setInputFormat)
table.outputFormat.foreach(sd.setOutputFormat)
val serdeInfo = new org.apache.hadoop.hive.metastore.api.SerDeInfo
table.serde.foreach(serdeInfo.setSerializationLib)
sd.setSerdeInfo(serdeInfo)
val serdeParameters = new java.util.HashMap[String, String]()
table.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
serdeInfo.setParameters(serdeParameters)
new Table(tTable)
}
@transient override lazy val statistics: Statistics = Statistics(
sizeInBytes = {
val totalSize = hiveQlTable.getParameters.get(StatsSetupConst.TOTAL_SIZE)
val rawDataSize = hiveQlTable.getParameters.get(StatsSetupConst.RAW_DATA_SIZE)
// TODO: check if this estimate is valid for tables after partition pruning.
// NOTE: getting `totalSize` directly from params is kind of hacky, but this should be
// relatively cheap if parameters for the table are populated into the metastore. An
// alternative would be going through Hadoop's FileSystem API, which can be expensive if a lot
// of RPCs are involved. Besides `totalSize`, there are also `numFiles`, `numRows`,
// `rawDataSize` keys (see StatsSetupConst in Hive) that we can look at in the future.
BigInt(
// When table is external,`totalSize` is always zero, which will influence join strategy
// so when `totalSize` is zero, use `rawDataSize` instead
// if the size is still less than zero, we use default size
Option(totalSize).map(_.toLong).filter(_ > 0)
.getOrElse(Option(rawDataSize).map(_.toLong).filter(_ > 0)
.getOrElse(sqlContext.conf.defaultSizeInBytes)))
}
)
// When metastore partition pruning is turned off, we cache the list of all partitions to
// mimic the behavior of Spark < 1.5
lazy val allPartitions = table.getAllPartitions
def getHiveQlPartitions(predicates: Seq[Expression] = Nil): Seq[Partition] = {
val rawPartitions = if (sqlContext.conf.metastorePartitionPruning) {
table.getPartitions(predicates)
} else {
allPartitions
}
rawPartitions.map { p =>
val tPartition = new org.apache.hadoop.hive.metastore.api.Partition
tPartition.setDbName(databaseName)
tPartition.setTableName(tableName)
tPartition.setValues(p.values)
val sd = new org.apache.hadoop.hive.metastore.api.StorageDescriptor()
tPartition.setSd(sd)
sd.setCols(table.schema.map(c => new FieldSchema(c.name, c.hiveType, c.comment)))
sd.setLocation(p.storage.location)
sd.setInputFormat(p.storage.inputFormat)
sd.setOutputFormat(p.storage.outputFormat)
val serdeInfo = new org.apache.hadoop.hive.metastore.api.SerDeInfo
sd.setSerdeInfo(serdeInfo)
serdeInfo.setSerializationLib(p.storage.serde)
val serdeParameters = new java.util.HashMap[String, String]()
serdeInfo.setParameters(serdeParameters)
table.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
p.storage.serdeProperties.foreach { case (k, v) => serdeParameters.put(k, v) }
new Partition(hiveQlTable, tPartition)
}
}
/** Only compare database and tablename, not alias. */
override def sameResult(plan: LogicalPlan): Boolean = {
plan match {
case mr: MetastoreRelation =>
mr.databaseName == databaseName && mr.tableName == tableName
case _ => false
}
}
val tableDesc = new TableDesc(
hiveQlTable.getInputFormatClass,
// The class of table should be org.apache.hadoop.hive.ql.metadata.Table because
// getOutputFormatClass will use HiveFileFormatUtils.getOutputFormatSubstitute to
// substitute some output formats, e.g. substituting SequenceFileOutputFormat to
// HiveSequenceFileOutputFormat.
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata
)
implicit class SchemaAttribute(f: HiveColumn) {
def toAttribute: AttributeReference = AttributeReference(
f.name,
HiveMetastoreTypes.toDataType(f.hiveType),
// Since data can be dumped in randomly with no validation, everything is nullable.
nullable = true
)(qualifiers = Seq(alias.getOrElse(tableName)))
}
/** PartitionKey attributes */
val partitionKeys = table.partitionColumns.map(_.toAttribute)
/** Non-partitionKey attributes */
val attributes = table.schema.map(_.toAttribute)
val output = attributes ++ partitionKeys
/** An attribute map that can be used to lookup original attributes based on expression id. */
val attributeMap = AttributeMap(output.map(o => (o, o)))
/** An attribute map for determining the ordinal for non-partition columns. */
val columnOrdinals = AttributeMap(attributes.zipWithIndex)
override def inputFiles: Array[String] = {
val partLocations = table.getPartitions(Nil).map(_.storage.location).toArray
if (partLocations.nonEmpty) {
partLocations
} else {
Array(
table.location.getOrElse(
sys.error(s"Could not get the location of ${table.qualifiedName}.")))
}
}
override def newInstance(): MetastoreRelation = {
MetastoreRelation(databaseName, tableName, alias)(table)(sqlContext)
}
}
private[hive] object HiveMetastoreTypes {
def toDataType(metastoreType: String): DataType = DataTypeParser.parse(metastoreType)
def decimalMetastoreString(decimalType: DecimalType): String = decimalType match {
case DecimalType.Fixed(precision, scale) => s"decimal($precision,$scale)"
case _ => s"decimal($HiveShim.UNLIMITED_DECIMAL_PRECISION,$HiveShim.UNLIMITED_DECIMAL_SCALE)"
}
def toMetastoreType(dt: DataType): String = dt match {
case ArrayType(elementType, _) => s"array<${toMetastoreType(elementType)}>"
case StructType(fields) =>
s"struct<${fields.map(f => s"${f.name}:${toMetastoreType(f.dataType)}").mkString(",")}>"
case MapType(keyType, valueType, _) =>
s"map<${toMetastoreType(keyType)},${toMetastoreType(valueType)}>"
case StringType => "string"
case FloatType => "float"
case IntegerType => "int"
case ByteType => "tinyint"
case ShortType => "smallint"
case DoubleType => "double"
case LongType => "bigint"
case BinaryType => "binary"
case BooleanType => "boolean"
case DateType => "date"
case d: DecimalType => decimalMetastoreString(d)
case TimestampType => "timestamp"
case NullType => "void"
case udt: UserDefinedType[_] => toMetastoreType(udt.sqlType)
}
}
| practice-vishnoi/dev-spark-1 | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala | Scala | apache-2.0 | 38,704 |
package com.karasiq.shadowcloud.streams.chunk
import java.security.SecureRandom
import akka.stream._
import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler}
import com.karasiq.shadowcloud.exceptions.CryptoException
import com.karasiq.shadowcloud.model.crypto._
import com.karasiq.shadowcloud.providers.CryptoModuleRegistry
private[shadowcloud] object ChunkKeyStream {
def apply(cryptoModules: CryptoModuleRegistry,
method: EncryptionMethod,
maxKeyReuse: Int = 256): ChunkKeyStream = {
new ChunkKeyStream(cryptoModules, method, maxKeyReuse)
}
private def isKeyReused(p1: EncryptionParameters, p2: EncryptionParameters) = (p1, p2) match {
case (_, _) if CryptoMethod.isNoOpMethod(p1.method) && CryptoMethod.isNoOpMethod(p2.method) ⇒
false
case (sp1: SymmetricEncryptionParameters, sp2: SymmetricEncryptionParameters) ⇒
sp1.key == sp2.key
case (ap1: AsymmetricEncryptionParameters, ap2: AsymmetricEncryptionParameters) ⇒
ap1.publicKey == ap2.publicKey || ap1.privateKey == ap2.privateKey
case _ ⇒
false
}
private def isNonceReused(p1: EncryptionParameters, p2: EncryptionParameters) = (p1, p2) match {
case (_, _) if CryptoMethod.isNoOpMethod(p1.method) && CryptoMethod.isNoOpMethod(p2.method) ⇒
false
case (sp1: SymmetricEncryptionParameters, sp2: SymmetricEncryptionParameters) ⇒
sp1.nonce == sp2.nonce
case _ ⇒
false
}
}
private[shadowcloud] final class ChunkKeyStream(cryptoModules: CryptoModuleRegistry, method: EncryptionMethod, maxKeyReuse: Int)
extends GraphStage[SourceShape[EncryptionParameters]] {
val outlet = Outlet[EncryptionParameters]("ChunkKeyStream.out")
val shape = SourceShape(outlet)
def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with OutHandler {
private[this] val encryptionModule = cryptoModules.encryptionModule(method)
private[this] var secureRandom: SecureRandom = _
private[this] var keyParameters: EncryptionParameters = _
private[this] var encryptedCount: Int = _
private[this] var changeKeyIn: Int = _
private[this] def resetParametersAndCounter(): Unit = { // TODO: Log key changes
this.keyParameters = encryptionModule.createParameters()
this.encryptedCount = 0
this.changeKeyIn = if (maxKeyReuse > 0) {
if (secureRandom == null) secureRandom = new SecureRandom()
secureRandom.nextInt(maxKeyReuse)
} else {
maxKeyReuse
}
}
private[this] def updateParameters(): Unit = {
this.keyParameters = encryptionModule.updateParameters(this.keyParameters)
}
// Update IV/key
private[this] def updateOrResetKey(): Unit = {
val oldParameters = this.keyParameters
if (encryptedCount > changeKeyIn) {
resetParametersAndCounter()
if (ChunkKeyStream.isKeyReused(oldParameters, keyParameters) || ChunkKeyStream.isNonceReused(oldParameters, keyParameters)) {
failStage(CryptoException.ReuseError(new IllegalArgumentException("Key or nonce is reused")))
}
} else {
updateParameters()
if (ChunkKeyStream.isNonceReused(oldParameters, keyParameters)) {
failStage(CryptoException.ReuseError(new IllegalArgumentException("Nonce is reused")))
}
}
}
def onPull(): Unit = {
if (keyParameters == null) resetParametersAndCounter()
push(outlet, keyParameters)
encryptedCount += 1
updateOrResetKey()
}
setHandler(outlet, this)
}
}
| Karasiq/shadowcloud | core/src/main/scala/com/karasiq/shadowcloud/streams/chunk/ChunkKeyStream.scala | Scala | apache-2.0 | 3,560 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* It is a trait for all regularizers.
* Any regularizers need to inherit the result.
*
* @tparam T type parameters [[Float]] or [[Double]]
*/
trait Regularizer[T]
extends Serializable {
private var isRegualrized: Boolean = true
/**
* Enable the regularization feature
*/
def enable(): Unit = isRegualrized = true
/**
* Disable the regularization feature
*/
def disable(): Unit = isRegualrized = false
/**
* The method need to be override by the concrete regularizer class
* It accumulates the gradient of the regularization of `parameter` to `gradParameter`
*
* @param parameter the parameter that is regularized
* @param gradParameter the gradient of the parameter
*/
def accRegularization(
parameter: Tensor[T],
gradParameter: Tensor[T]
): Unit
/**
* Check the regularization is applied or not
*
* @param parameter the parameter that is regularized
* @param gradParameter the gradient of the parameter
* @return a boolean, if true, accumulates the gradient of regularization,
* otherwise not.
*/
protected def preCheck(
parameter: Tensor[T],
gradParameter: Tensor[T]
): Boolean = {
if (null == parameter
|| null == gradParameter
|| !isRegualrized) {
false
} else {
true
}
}
}
/**
* Apply both L1 and L2 regularization
* @param l1 l1 regularization rate
* @param l2 l2 regularization rate
* @tparam T type parameters [[Float]] or [[Double]]
*/
@SerialVersionUID(- 5617491971070914067L)
class L1L2Regularizer[T: ClassTag](
l1: Double,
l2: Double
)(implicit ev: TensorNumeric[T])
extends Regularizer[T] {
override def accRegularization(
parameter: Tensor[T],
gradParameter: Tensor[T]
): Unit = {
if (!preCheck(parameter, gradParameter)) return
accL1L2Regularization(l1, l2, parameter, gradParameter)
}
/**
* Accumulates the gradient of the l1, l2 regularization of `parameter`
* to `gradParameter`
*
* @param l1Alpha l1 regularization rate
* @param l2Alpha l2 regularization rate
* @param parameter the parameter that is regularized
* @param gradParameter the gradient of the parameter
*/
private def accL1L2Regularization(
l1Alpha: Double,
l2Alpha: Double,
parameter: Tensor[T],
gradParameter: Tensor[T]
): Unit = {
accL1Regularization(l1Alpha, parameter, gradParameter)
accL2Regularization(l2Alpha, parameter, gradParameter)
}
/**
* Accumulates the gradient of the l1 regularization of `parameter`
* to `gradParameter`
*
* @param alpha l1 regularization rate
* @param parameter the parameter that is regularized
* @param gradParameter the gradient of the parameter
*/
private def accL1Regularization(
alpha: Double,
parameter: Tensor[T],
gradParameter: Tensor[T]
): Unit = {
if (alpha != 0) gradParameter.add(ev.fromType(alpha),
l1SignBuffer.resizeAs(parameter).copy(parameter).sign())
}
@transient private val l1SignBuffer = Tensor()
/**
* Accumulates the gradient of the l2 regularization of `parameter`
* to `gradParameter`
*
* @param alpha l2 regularization rate
* @param parameter the parameter that is regularized
* @param gradParameter the gradient of the parameter
*/
private def accL2Regularization(
alpha: Double,
parameter: Tensor[T],
gradParameter: Tensor[T]
): Unit = {
if (alpha != 0) gradParameter.add(ev.fromType(alpha), parameter)
}
}
object L1L2Regularizer {
def apply[@specialized(Float, Double) T: ClassTag](
l1: Double,
l2: Double
)(implicit ev: TensorNumeric[T]): L1L2Regularizer[T] = new L1L2Regularizer(l1, l2)
}
/**
* Apply L1 regularization
* @param l1 l1 regularization rate
* @tparam T type parameters [[Float]] or [[Double]]
*/
@SerialVersionUID(1950693435414946281L)
case class L1Regularizer[T: ClassTag](
l1: Double
) (implicit ev: TensorNumeric[T])
extends L1L2Regularizer[T](l1, 0)
/**
* Apply L2 regularization
* @param l2 l2 regularization rate
* @tparam T type parameters [[Float]] or [[Double]]
*/
@SerialVersionUID(- 6597840589687540202L)
case class L2Regularizer[T: ClassTag](
l2: Double
) (implicit ev: TensorNumeric[T])
extends L1L2Regularizer[T](0, l2)
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/optim/Regularizer.scala | Scala | apache-2.0 | 5,064 |
package com.fh
import org.scalatest.{BeforeAndAfter, FunSuite}
/**
* Created: 2017-07-15
*/
class AppTest extends FunSuite with BeforeAndAfter {
var app: App = _
before {
app = new App
}
test( "make sure we calculate the correct msg" ) {
assert( "Hello Vaadin!" == app.getMsg )
}
}
| fh137/ScalaVaadin | src/test/scala/com/fh/AppTest.scala | Scala | mit | 308 |
package models
import helpers.{PasswordHash, TokenGenerator}
import java.security.MessageDigest
import java.sql.Connection
import javax.inject.{Inject, Singleton}
import scala.collection.immutable
case class ModifyUser(
userId: Long, userName: String, firstName: String, middleName: Option[String], lastName: String,
email: String, supplementalEmails: immutable.Seq[String], password: String, companyName: String,
sendNoticeMail: Boolean,
altFirstName: Option[String], altMiddleName: Option[String], altLastName: Option[String],
)(
implicit storeUserRepo: StoreUserRepo,
orderNotificationRepo: OrderNotificationRepo
) extends CreateUserBase {
def update(implicit tokenGenerator: TokenGenerator, conn: Connection) {
val salt = tokenGenerator.next
val hash = PasswordHash.generate(password, salt, storeUserRepo.PasswordHashStretchCount())
storeUserRepo.update(
userId, userName, firstName, middleName, lastName, email, hash, salt, Some(companyName),
altFirstName, altMiddleName, altLastName
)
SupplementalUserEmail.save(supplementalEmails.toSet, userId)
orderNotificationRepo.delete(userId)
if (sendNoticeMail)
orderNotificationRepo.createNew(userId)
UserMetadata.getByStoreUserId(userId) match {
case None =>
UserMetadata.createNew(
userId,
firstNameKana = altFirstName,
middleNameKana = altMiddleName,
lastNameKana = altLastName
)
case Some(um) =>
UserMetadata.update(
userId,
firstNameKana = altFirstName,
middleNameKana = altMiddleName,
lastNameKana = altLastName
)
}
}
}
object ModifyUser {
def apply(
user: ListUserEntry, supplementalUserEmails: Seq[SupplementalUserEmail]
)(
implicit storeUserRepo: StoreUserRepo,
orderNotificationRepo: OrderNotificationRepo,
conn: Connection
): ModifyUser = {
val optMd: Option[UserMetadata] = UserMetadata.getByStoreUserId(user.user.id.get)
ModifyUser(
user.user.id.get,
user.user.userName,
user.user.firstName,
user.user.middleName,
user.user.lastName,
user.user.email,
supplementalUserEmails.map(_.email).sorted.toList,
"",
user.user.companyName.getOrElse(""),
user.sendNoticeMail,
optMd.flatMap(_.firstNameKana),
optMd.flatMap(_.middleNameKana),
optMd.flatMap(_.lastNameKana)
)
}
def fromForm(
userId: Long, userName: String, firstName: String, middleName: Option[String], lastName: String,
email: String, supplementalEmails: Seq[Option[String]], passwords: (String, String), companyName: String,
sendNoticeMail: Boolean,
altFirstName: Option[String], altMiddleName: Option[String], altLastName: Option[String]
)(
implicit storeUserRepo: StoreUserRepo,
orderNotificationRepo: OrderNotificationRepo
): ModifyUser =
ModifyUser(
userId, userName, firstName, middleName, lastName, email,
supplementalEmails.filter(_.isDefined).map(_.get).toList,
passwords._1, companyName, sendNoticeMail,
altFirstName, altMiddleName, altLastName
)
def toForm(m: ModifyUser) = Some(
m.userId, m.userName, m.firstName, m.middleName, m.lastName, m.email,
m.supplementalEmails.map {e => Some(e)},
(m.password, m.password), m.companyName, m.sendNoticeMail,
m.altFirstName, m.altMiddleName, m.altLastName
)
}
| ruimo/store2 | app/models/ModifyUser.scala | Scala | apache-2.0 | 3,430 |
package com.monkeygroover.pipeline
import shapeless._
/**
* Provides a way to convert a value into an HList.
* If the value is already an HList then it is returned unchanged, otherwise it's wrapped into a single-element HList.
*/
trait HListable[T] {
type Out <: HList
def apply(value: T): Out
}
object HListable extends LowerPriorityHListable {
implicit def fromHList[T <: HList] = new HListable[T] {
type Out = T
def apply(value: T) = value
}
}
private[pipeline] abstract class LowerPriorityHListable {
implicit def fromAnyRef[T] = new HListable[T] {
type Out = T :: HNil
def apply(value: T) = value :: HNil
}
} | monkeygroover/my_spray | src/main/scala/com/monkeygroover/pipeline/HListable.scala | Scala | gpl-2.0 | 648 |
package org.jetbrains.plugins.cbt.runner.action
import com.intellij.execution.ExecutionManager
import com.intellij.icons.AllIcons
import com.intellij.openapi.actionSystem.{AnAction, AnActionEvent}
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.cbt.runner.{CbtProcessListener, CbtProjectTaskRunner}
class RunTaskAction(task: String, module: Module, project: Project)
extends AnAction(s"Run task '$task'", "sRun task '$task'", AllIcons.General.Run){
override def actionPerformed(e: AnActionEvent): Unit = {
val environment =
CbtProjectTaskRunner.createExecutionEnv(task, module, project, CbtProcessListener.Dummy)
ExecutionManager.getInstance(project).restartRunProfile(environment)
}
}
| triplequote/intellij-scala | cbt/src/org/jetbrains/plugins/cbt/runner/action/RunTaskAction.scala | Scala | apache-2.0 | 775 |
package com.github.ldaniels528.trifecta.io.kafka
/**
* Type-safe Broker representation
* @author lawrence.daniels@gmail.com
*/
case class Broker(host: String, port: Int, brokerId: Int = 0) {
override def toString = s"$host:$port"
}
/**
* Broker Singleton
* @author lawrence.daniels@gmail.com
*/
object Broker {
/**
* Parses the given broker list into a collection of broker instances
* @param brokerList the given broker list (e.g. "localhost:9091,localhost:9092,localhost:9093")
* @return a collection of [[Broker]] instances
*/
def parseBrokerList(brokerList: String): Seq[Broker] = {
brokerList.split("[,]").toList map {
_.split("[:]").toList match {
case host :: Nil => Broker(host, 9091)
case host :: port :: Nil => Broker(host, port.toInt)
case args =>
throw new IllegalArgumentException(s"Illegal host definition - '${args.mkString(", ")}'")
}
}
}
} | ldaniels528/trifecta | src/main/scala/com/github/ldaniels528/trifecta/io/kafka/Broker.scala | Scala | apache-2.0 | 952 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.function.xxforms
import org.orbeon.oxf.xforms.analysis.SimpleElementAnalysis
import org.orbeon.oxf.xforms.function.{FunctionSupport, MatchSimpleAnalysis, XFormsFunction}
import org.orbeon.saxon.expr.PathMap.PathMapNodeSet
import org.orbeon.saxon.expr._
/**
* Return the current node of one of the enclosing xf:repeat iteration, either the closest
* iteration if no argument is passed, or the iteration for the repeat id passed.
*
* This function must be called from within an xf:repeat.
*/
class XXFormsRepeatCurrent extends XFormsFunction with MatchSimpleAnalysis with FunctionSupport {
override def evaluateItem(xpathContext: XPathContext) = {
implicit val ctx = xpathContext
bindingContext.enclosingRepeatIterationBindingContext(stringArgumentOpt(0)).getSingleItem
}
override def addToPathMap(pathMap: PathMap, pathMapNodeSet: PathMapNodeSet): PathMapNodeSet = {
// Match on context expression
argument.headOption match {
case Some(repeatIdExpression: StringLiteral) ⇒
// Argument is literal and we have a context to ask
pathMap.getPathMapContext match {
case context: SimpleElementAnalysis#SimplePathMapContext ⇒
// Get PathMap for context id
matchSimpleAnalysis(pathMap, context.getInScopeContexts.get(repeatIdExpression.getStringValue))
case _ ⇒ throw new IllegalStateException("Can't process PathMap because context is not of expected type.")
}
case None ⇒
// Argument is not specified, ask PathMap for the result
pathMap.getPathMapContext match {
case context: SimpleElementAnalysis#SimplePathMapContext ⇒
// Get PathMap for context id
matchSimpleAnalysis(pathMap, context.getInScopeRepeat)
case _ ⇒ throw new IllegalStateException("Can't process PathMap because context is not of expected type.")
}
case _ ⇒
// Argument is not literal so we can't figure it out
pathMap.setInvalidated(true)
null
}
}
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/function/xxforms/XXFormsRepeatCurrent.scala | Scala | lgpl-2.1 | 2,717 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import org.concordion.api.FullOGNL
import org.concordion.api.option.{ConcordionOptions, MarkdownExtensions}
//import org.concordion.ext.EmbedExtension
//import org.concordion.ext.collapse.CollapseOutputExtension
import org.concordion.integration.junit4.ConcordionRunner
import org.junit.runner.RunWith
//@FullOGNL
////@Extensions(Array(classOf[EmbedExtension], classOf[CollapseOutputExtension]))
//@ConcordionOptions(
// declareNamespaces = Array("ext", "urn:concordion-extensions:2010"),
// markdownExtensions = Array(
// MarkdownExtensions.WIKILINKS,
// MarkdownExtensions.AUTOLINKS,
// MarkdownExtensions.TASKLISTITEMS)
//)
//@RunWith(classOf[ConcordionRunner])
class GeneralFeatRelatedFixture extends FeatDisplayHelper {
val displayEnum: E = GeneralFeat
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/specs/scala/io/truthencode/ddo/model/feats/GeneralFeatRelatedFixture.scala | Scala | apache-2.0 | 1,459 |
/*
* Copyright © 2017 University of Texas at Arlington
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.uta.diql.core
import scala.reflect.macros.whitebox.Context
import java.io._
import scala.collection.immutable.HashMap
object DistributedEvaluator {
var distributed: ParallelCodeGenerator = new { val c = null } with ParallelCodeGenerator
}
abstract class QueryCodeGenerator {
val context: Context
val cg = new { val c: context.type = context } with ParallelCodeGenerator
val sg = new { val c: context.type = context } with Streaming
val optimizer = new { val c: context.type = context } with Optimizer
/** Translate a DIQL query to Scala byte code */
def code_generator ( e: Expr, query_text: String, line: Int, debug: Boolean,
env: cg.Environment = Map() ): context.Expr[Any] = {
import context.universe.{Expr=>_,_}
import Normalizer.normalizeAll
import Pretty.{print=>pretty_print}
try {
cg.line = line
distributed = cg
cg.typecheck(e,env)
val oe = normalizeAll(optimizer.optimizeAll(e,env))
if (diql_explain)
println("Optimized term:\\n"+pretty_print(oe.toString))
cg.typecheck(oe,env)
if (diql_streaming) {
val se = sg.findHomomorphisms(oe,new HashMap())
println("Streaming term:\\n"+pretty_print(se.toString))
}
val de = if (debug)
normalizeAll(Call("debug",
List(Provenance.embedLineage(oe,cg.isDistributed(_)),
BoolConst(cg.isDistributed(oe)),
Call("List",Provenance.exprs.map(StringConst(_))))))
else oe
if (debug && diql_explain)
println("Debugging term:\\n"+pretty_print(de.toString))
val ec = cg.codeGen(de,env)
if (diql_explain)
println("Scala code:\\n"+showCode(ec))
val tp = cg.getType(ec,env)
if (diql_explain)
println("Scala type: "+showCode(tp))
context.Expr[Any](ec)
} catch {
case ex: Any
=> println(ex)
if (diql_explain) {
val sw = new StringWriter
ex.printStackTrace(new PrintWriter(sw))
println(sw.toString)
}
context.Expr[Any](q"()")
}
}
}
| fegaras/DIQL | src/parallel/scala/edu/uta/diql/QueryCodeGenerator.scala | Scala | apache-2.0 | 2,841 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.r
import java.io.{DataOutputStream, File, FileOutputStream, IOException}
import java.net.{InetAddress, InetSocketAddress, ServerSocket}
import java.util.concurrent.TimeUnit
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.{ChannelFuture, ChannelInitializer, EventLoopGroup}
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.handler.codec.LengthFieldBasedFrameDecoder
import io.netty.handler.codec.bytes.{ByteArrayDecoder, ByteArrayEncoder}
import io.netty.handler.timeout.ReadTimeoutHandler
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
/**
* Netty-based backend server that is used to communicate between R and Java.
*/
private[spark] class RBackend {
private[this] var channelFuture: ChannelFuture = null
private[this] var bootstrap: ServerBootstrap = null
private[this] var bossGroup: EventLoopGroup = null
/** Tracks JVM objects returned to R for this RBackend instance. */
private[r] val jvmObjectTracker = new JVMObjectTracker
def init(): Int = {
val conf = new SparkConf()
val backendConnectionTimeout = conf.getInt(
"spark.r.backendConnectionTimeout", SparkRDefaults.DEFAULT_CONNECTION_TIMEOUT)
bossGroup = new NioEventLoopGroup(
conf.getInt("spark.r.numRBackendThreads", SparkRDefaults.DEFAULT_NUM_RBACKEND_THREADS))
val workerGroup = bossGroup
val handler = new RBackendHandler(this)
bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(classOf[NioServerSocketChannel])
bootstrap.childHandler(new ChannelInitializer[SocketChannel]() {
def initChannel(ch: SocketChannel): Unit = {
ch.pipeline()
.addLast("encoder", new ByteArrayEncoder())
.addLast("frameDecoder",
// maxFrameLength = 2G
// lengthFieldOffset = 0
// lengthFieldLength = 4
// lengthAdjustment = 0
// initialBytesToStrip = 4, i.e. strip out the length field itself
new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
.addLast("decoder", new ByteArrayDecoder())
.addLast("readTimeoutHandler", new ReadTimeoutHandler(backendConnectionTimeout))
.addLast("handler", handler)
}
})
channelFuture = bootstrap.bind(new InetSocketAddress("localhost", 0))
channelFuture.syncUninterruptibly()
channelFuture.channel().localAddress().asInstanceOf[InetSocketAddress].getPort()
}
def run(): Unit = {
channelFuture.channel.closeFuture().syncUninterruptibly()
}
def close(): Unit = {
if (channelFuture != null) {
// close is a local operation and should finish within milliseconds; timeout just to be safe
channelFuture.channel().close().awaitUninterruptibly(10, TimeUnit.SECONDS)
channelFuture = null
}
if (bootstrap != null && bootstrap.group() != null) {
bootstrap.group().shutdownGracefully()
}
if (bootstrap != null && bootstrap.childGroup() != null) {
bootstrap.childGroup().shutdownGracefully()
}
bootstrap = null
jvmObjectTracker.clear()
}
}
private[spark] object RBackend extends Logging {
initializeLogIfNecessary(true)
def main(args: Array[String]): Unit = {
if (args.length < 1) {
// scalastyle:off println
System.err.println("Usage: RBackend <tempFilePath>")
// scalastyle:on println
System.exit(-1)
}
val sparkRBackend = new RBackend()
try {
// bind to random port
val boundPort = sparkRBackend.init()
val serverSocket = new ServerSocket(0, 1, InetAddress.getByName("localhost"))
val listenPort = serverSocket.getLocalPort()
// Connection timeout is set by socket client. To make it configurable we will pass the
// timeout value to client inside the temp file
val conf = new SparkConf()
val backendConnectionTimeout = conf.getInt(
"spark.r.backendConnectionTimeout", SparkRDefaults.DEFAULT_CONNECTION_TIMEOUT)
// tell the R process via temporary file
val path = args(0)
val f = new File(path + ".tmp")
val dos = new DataOutputStream(new FileOutputStream(f))
dos.writeInt(boundPort)
dos.writeInt(listenPort)
SerDe.writeString(dos, RUtils.rPackages.getOrElse(""))
dos.writeInt(backendConnectionTimeout)
dos.close()
f.renameTo(new File(path))
// wait for the end of stdin, then exit
new Thread("wait for socket to close") {
setDaemon(true)
override def run(): Unit = {
// any un-catched exception will also shutdown JVM
val buf = new Array[Byte](1024)
// shutdown JVM if R does not connect back in 10 seconds
serverSocket.setSoTimeout(10000)
try {
val inSocket = serverSocket.accept()
serverSocket.close()
// wait for the end of socket, closed if R process die
inSocket.getInputStream().read(buf)
} finally {
sparkRBackend.close()
System.exit(0)
}
}
}.start()
sparkRBackend.run()
} catch {
case e: IOException =>
logError("Server shutting down: failed with exception ", e)
sparkRBackend.close()
System.exit(1)
}
System.exit(0)
}
}
| aokolnychyi/spark | core/src/main/scala/org/apache/spark/api/r/RBackend.scala | Scala | apache-2.0 | 6,237 |
package cz.flih.relgraph
import com.tinkerpop.blueprints._
import java.lang.{Iterable => JIterable}
class SQLRGraph extends Graph {
def addEdge(id: Any, outVertex: Vertex, inVertex: Vertex, label: String): Edge = ???
def addVertex(id: Any): Vertex = ???
def getEdge(id: Any): Edge = ???
def getEdges(key: String, value: Any): JIterable[Edge] = ???
def getEdges(): JIterable[Edge] = ???
def getFeatures(): Features = ???
def getVertex(id: Any): Vertex = ???
def getVertices(key: String, value: Any): JIterable[Vertex] = ???
def getVertices(): JIterable[Vertex] = ???
def query(): GraphQuery = ???
def removeEdge(edge: Edge): Unit = ???
def removeVertex(vertex: Vertex): Unit = ???
def shutdown(): Unit = ???
}
| crabhi/relational-graph | src/main/scala/cz/flih/relgraph/SQLRGraph.scala | Scala | apache-2.0 | 749 |
package netcaty.http.server
import io.netty.channel.ChannelInitializer
import io.netty.channel.socket.SocketChannel
import io.netty.handler.codec.http.{HttpRequestDecoder, HttpObjectAggregator, HttpResponseEncoder}
import io.netty.handler.stream.ChunkedWriteHandler
import netcaty.{http, Ssl}
class PipelineInitializer(
https: Boolean, server: Server, handler: http.RequestHandler, stopAfterOneResponse: Boolean
) extends ChannelInitializer[SocketChannel] {
def initChannel(ch: SocketChannel) {
val p = ch.pipeline
if (https) p.addLast(Ssl.serverContext.newHandler(ch.alloc))
// HttpObjectAggregator automatically sends "Continue" response for
// "Expect 100 Continue" request.
//
// But: http://netty.io/4.0/api/io/netty/handler/codec/http/HttpObjectAggregator.html
// "Be aware that you need to have the HttpResponseEncoder or HttpRequestEncoder
// before the HttpObjectAggregator in the ChannelPipeline."
p.addLast(
// Outbound
new HttpResponseEncoder,
// Inbound
new HttpRequestDecoder,
new HttpObjectAggregator(http.MAX_CONTENT_LENGTH), // Handle chunks
new RequestHandler(server, handler, stopAfterOneResponse)
)
}
}
| ngocdaothanh/netcaty | src/main/scala/netcaty/http/server/PipelineInitializer.scala | Scala | mit | 1,212 |
package com.avsystem.commons
package analyzer
import scala.tools.nsc.Global
class CheckBincompat(g: Global) extends AnalyzerRule(g, "bincompat") {
import global._
private lazy val bincompatAnnotType = classType("com.avsystem.commons.annotation.bincompat")
def analyze(unit: CompilationUnit): Unit =
unit.body.foreach(analyzeTree {
case tree@(_: Ident | _: Select | _: New) if tree.symbol != null &&
tree.symbol.annotations.exists(_.tree.tpe <:< bincompatAnnotType) =>
report(tree.pos, "Symbols annotated as @bincompat exist only for binary compatibility " +
"and should not be used directly")
})
}
| AVSystem/scala-commons | commons-analyzer/src/main/scala/com/avsystem/commons/analyzer/CheckBincompat.scala | Scala | mit | 649 |
package xyz.hyperreal.markdown
object Main extends App {
val input =
"""
|* alkj fds fdsa fds fds
|oiu dsf oiu dsaf oiu
""".trim.stripMargin
// """
// |Asterisks tight:
// |
// |* a 1
// |* a 2
// |* a 3
// |
// |
// |Asterisks loose:
// |
// |* a 1
// |
// |
// |* a 2
// |
// |
// |* a 3
// |
// |stuff
// """.trim.stripMargin
println( input )
val doc = Markdown( input )
Util.headingIds( doc )
val html = Util.html( doc, 2 )
println( html )
}
| edadma/scala-markdown | src/test/scala/Main.scala | Scala | mit | 582 |
package io.github.interestinglab.waterdrop.input.batch
import io.github.interestinglab.waterdrop.config.{Config, ConfigFactory}
import io.github.interestinglab.waterdrop.apis.BaseStaticInput
import org.apache.spark.sql.{Dataset, Row, SparkSession, TiContext}
class Tidb extends BaseStaticInput {
var config: Config = ConfigFactory.empty()
override def setConfig(config: Config): Unit = {
this.config = config
}
override def getConfig(): Config = {
this.config
}
override def checkConfig(): (Boolean, String) = {
config.hasPath("pre_sql") && config.hasPath("database") match {
case true => (true, "")
case false => (false, "please specify [pre_sql]")
}
}
override def getDataset(spark: SparkSession): Dataset[Row] = {
val database = config.getString("database")
val ti = new TiContext(spark)
ti.tidbMapDatabase(database)
spark.sql(config.getString("pre_sql"))
}
}
| InterestingLab/waterdrop | waterdrop-core/src/main/scala/io/github/interestinglab/waterdrop/input/batch/Tidb.scala | Scala | apache-2.0 | 932 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.pages.amends
import forms.AmendCurrentPensionForm
import org.jsoup.Jsoup
import testHelpers.ViewSpecHelpers.CommonViewSpecHelper
import testHelpers.ViewSpecHelpers.ip2016.CurrentPensionsViewMessages
import uk.gov.hmrc.play.views.html.helpers.{ErrorSummary, FormWithCSRF}
import views.html.pages.amends.amendCurrentPensions
class AmendCurrentPensionsViewSpec extends CommonViewSpecHelper with CurrentPensionsViewMessages{
implicit val errorSummary: ErrorSummary = app.injector.instanceOf[ErrorSummary]
implicit val formWithCSRF: FormWithCSRF = app.injector.instanceOf[FormWithCSRF]
"the AmendCurrentPensionsView" should{
val amendCurrentPensionsForm = AmendCurrentPensionForm.amendCurrentPensionForm.bind(Map("amendedUKPensionAmt" -> "12000",
"protectionType" -> "ip2016",
"status" -> "open"))
lazy val view = application.injector.instanceOf[amendCurrentPensions]
lazy val doc = Jsoup.parse(view.apply(amendCurrentPensionsForm).body)
val errorForm = AmendCurrentPensionForm.amendCurrentPensionForm.bind(Map("amendedUKPensionAmt" -> "a",
"protectionType" -> "ip2016",
"status" -> "open"))
lazy val errorView = application.injector.instanceOf[amendCurrentPensions]
lazy val errorDoc = Jsoup.parse(errorView.apply(errorForm).body)
lazy val form = doc.select("form")
"have the correct title" in{
doc.title() shouldBe plaCurrentPensionsTitle
}
"have the correct and properly formatted header"in{
doc.select("h1").text shouldBe plaCurrentPensionsTitle
}
"have some introductory text" in{
doc.select("p").first().text shouldBe plaCurrentPensionsQuestion
}
"have a hidden menu with the correct values" in{
doc.select("summary").text shouldBe plaCurrentPensionsHiddenLink
doc.select("li").eq(0).text shouldBe plaHiddenMenuItemOne
doc.select("li").eq(1).text shouldBe plaHiddenMenuItemTwo
doc.select("li").eq(2).text shouldBe plaHiddenMenuItemThree
doc.select("li").eq(3).text shouldBe plaHiddenMenuItemFour
}
"have a help link redirecting to the right location" in{
doc.select("p").eq(3).text shouldBe plaHelpLinkCompleteMessage
doc.select("a").text shouldBe plaHelpLink
doc.select("a").attr("href") shouldBe plaHelpLinkExternalReference
}
"has a valid form" in{
form.attr("method") shouldBe "POST"
form.attr("action") shouldBe controllers.routes.AmendsController.submitAmendCurrentPension().url
form.select("legend.visually-hidden").text() shouldBe plaCurrentPensionsLegendText
}
"have a £ symbol present" in{
doc.select(".poundSign").text shouldBe "£"
}
"have a continue button" in{
doc.select("button").text shouldBe plaBaseChange
doc.select("button").attr("type") shouldBe "submit"
}
"display the correct errors appropriately" in{
errorDoc.select("h2").text shouldBe plaBaseErrorSummaryLabel
errorDoc.select("a#amendedUKPensionAmt-error-summary").text shouldBe errorReal
errorDoc.select("span#amendedUKPensionAmt-error-message.error-notification").text shouldBe errorReal
}
"not have errors on valid pages" in{
amendCurrentPensionsForm.hasErrors shouldBe false
doc.select("a#amendedUKPensionAmt-error-summary").text shouldBe ""
doc.select("span#amendedUKPensionAmt-error-message.error-notification").text shouldBe ""
}
}
}
| hmrc/pensions-lifetime-allowance-frontend | test/views/pages/amends/AmendCurrentPensionsViewSpec.scala | Scala | apache-2.0 | 4,345 |
package dk.itu.coqoon.opam
import org.eclipse.core.runtime.{Path, IPath}
import scala.sys.process.{Process,ProcessBuilder,ProcessLogger}
case class OPAMException(s : String) extends Exception(s)
class OPAMRoot private[opam](val path : IPath) {
private var cache : Map[Package,Option[Package#Version]] = Map()
case class Repository(val name : String, val uri : String) {
def getRoot() = OPAMRoot.this
}
case class Package(val name : String) {
def getRoot() = OPAMRoot.this
case class Version(val version : String) {
def getPackage() = Package.this
def install(pin : Boolean = false,
logger : ProcessLogger = OPAM.drop) : Boolean = {
val order_chars = """^[><=]""".r
val thing = version match {
case order_chars() => name + version
case _ => name + "=" + version }
val ok = OPAMRoot.this(logger, "install","-y",thing)
var pin_ok = true
fillCache
if (ok && pin)
OPAMRoot.this.getPackage(name).getInstalledVersion.foreach(v =>
pin_ok = OPAMRoot.this(logger, "pin","add",name,v.version))
ok && pin_ok
}
def uninstall() : Boolean = {
val ok = OPAMRoot.this("remove","-y",name)
fillCache
ok
}
} /* Version */
def installAnyVersion(logger : ProcessLogger = OPAM.drop) : Boolean = {
val ok = OPAMRoot.this(logger, "install","-y",this.name)
fillCache
ok
}
def getConfigVar(name : String) =
read("config", "var", this.name + ":" + name).head
def getDescription() : String =
read("show","-f","description",name).mkString("\\n")
def getAvailableVersions() : Seq[Version] =
if (cache.contains(this)) {
val version = """([^, ]++)""".r
val versions_str =
read("show", "-f",
"available-version,available-versions", name).head.split(':')(1)
(version findAllIn versions_str).map(new Version(_)).toList
} else Seq()
def getInstalledVersion() : Option[Package#Version] =
try cache(this)
catch { case e : NoSuchElementException => None }
/*{
val v = read("config","var",name + ":version").head
if (v == "#undefined") None else Some(new Version(v))
}*/
def getLatestVersion() : Option[Version] =
getAvailableVersions().lastOption
def getVersion(version : String) : Version = new Version(version)
def isPinned() =
read("config", "var", name + ":pinned").head.trim == "true"
} /* Package */
def upgradeAllPackages(logger : ProcessLogger) =
this(logger,"upgrade","-y")
def getRepositories() : Seq[Repository] = {
val repo = """.*?(\\S++)\\s++(\\S++)$""".r
read("repo","list").map(_ match {
case repo(name,uri) => new Repository(name,uri)
case s => throw new OPAMException("error parsing " + s)
})
}
def addRepositories(logger : ProcessLogger, repos : Repository*) : Unit =
repos.foreach(r => this(logger, "repo","add",r.name,r.uri))
def addRepositories(repos : Repository*) : Unit =
addRepositories(OPAM.drop, repos:_*)
def updateRepositories(logger : ProcessLogger) = {
val ok = this(logger,"update")
fillCache
ok
}
def getPackages(filter : Package => Boolean = _ => true) : Seq[Package] =
cache.keys.toList.filter(filter).sortWith((p1, p2) => p1.name < p2.name)
/*{
read("list","-a","-s",filter).map(s => new Package(s))
}*/
def getPackage(name : String) : Package = new Package(name)
private final val name_ver = """^(\\S++)\\s++(\\S++).*""".r
def fillCache() : Unit =
for (name_ver(name, version) <- read("list","-a") if name(0) != '#';
p = new Package(name);
v = if (version != "--") Some(new p.Version(version)) else None)
cache += (p -> v)
private [opam] def opam(args : String*) : ProcessBuilder = {
Process(command="opam" +: args, cwd=None,
"OPAMROOT" -> path.toString,
"COQLIB" -> "",
"COQBIN" -> ""
)
}
private[opam] def read(cmd : String*) : Seq[String] = {
try opam(cmd:_*).lineStream.toList
catch { case e : RuntimeException => throw new OPAMException(cmd.mkString(" ") + ": " + e.getMessage) }
}
private[opam] def apply(cmd : String*) : Boolean = {
opam(cmd:_*).run.exitValue() == 0
}
private[opam] def apply(logger : ProcessLogger, cmd : String*) : Boolean = {
logger.out("opam " + cmd.mkString(" "))
opam(cmd :+ "-v" :_*).run(logger).exitValue() == 0
}
} /* OPAMRoot */
object OPAM {
import scala.ref.WeakReference
import scala.collection.mutable.{Map => MMap}
var roots : MMap[IPath, WeakReference[OPAMRoot]] = MMap()
def canonicalise(p : IPath) =
roots.get(p).flatMap(_.get) match {
case Some(root) =>
Some(root)
case None =>
try {
val root = new OPAMRoot(p)
roots.update(p, WeakReference(root))
root.fillCache
Some(root)
} catch {
case e : OPAMException =>
None
}
}
def drop = ProcessLogger(s => ())
def initRoot(path : IPath,
ocaml : String = "system",
logger : ProcessLogger = drop) = {
val root = new OPAMRoot(path)
val is_root = path.addTrailingSeparator.append("config").toFile.exists()
val is_empty_dir = path.toFile.isDirectory() && path.toFile.list().isEmpty
if (!is_root)
if (is_empty_dir || !path.toFile.exists()) {
if (root(logger,"init","--comp="+ocaml,"-j","2","-n")) {
roots.update(path, WeakReference(root))
} else throw new OPAMException("OPAM root initialisation failed")
} else throw new OPAMException("path " + path + " is a non empty directory")
root.fillCache
root
}
/*
def main(args : Array[String]) = {
val tmp = "/tmp/test"
//assert(Process("rm",Seq("-rf",tmp)).run().exitValue() == 0)
val r = initRoot(new Path(tmp),"system")
//r.addRepositories(new r.Repository("coq","http://coq.inria.fr/opam/released"))
assert(r.getRepositories().length == 2, "#repos")
println(r.getPackages())
println(r.getPackage("camlp5").getAvailableVersions())
println(r.getPackage("camlp5").getInstalledVersion())
println(r.getPackage("camlp5").getLatestVersion.foreach(_.install(pin = false)))
println(r.getPackage("camlp5").getInstalledVersion())
println(r.getPackage("camlp5").getInstalledVersion().foreach(_.uninstall))
}
*/
} | coqoon/coqoon-opam | plugins/dk.itu.coqoon.opam/src/dk/itu/coqoon/opam/OPAM.scala | Scala | apache-2.0 | 6,521 |
package k8sdnssky
import java.io.{ByteArrayInputStream, InputStream, InputStreamReader, StringWriter}
import java.security.spec.PKCS8EncodedKeySpec
import java.security.{KeyFactory, PrivateKey, Security}
import java.util.Collections
import org.bouncycastle.asn1.{ASN1Integer, ASN1OctetString, ASN1Primitive, ASN1Sequence}
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.util.io.pem.{PemObject, PemReader, PemWriter}
import scala.collection.JavaConverters._
object BouncyCastleUtil {
def ensureBouncyCastleProviderIsRegistered(): Unit = {
this.synchronized {
if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) {
Security.addProvider(new BouncyCastleProvider())
}
}
}
ensureBouncyCastleProviderIsRegistered()
private def pkcsOneToPkcsEight(pemObject: PemObject): String = {
val keySpec: PKCS8EncodedKeySpec = new PKCS8EncodedKeySpec(pemObject.getContent)
val factory: KeyFactory = KeyFactory.getInstance("RSA", "BC")
val privateKey: PrivateKey = factory.generatePrivate(keySpec)
val stringWriter = new StringWriter()
val pemWriter = new PemWriter(stringWriter)
try {
val pemType = "RSA PRIVATE KEY"
pemWriter.writeObject(new PemObject(pemType, privateKey.getEncoded))
pemWriter.flush()
stringWriter.toString
} finally {
stringWriter.close()
pemWriter.close()
}
}
/**
* Loads the given private key as PKCS#8 for netty. The private key file must be in PEM form and
* can be either PKCS#1 (usually the default when generated with openssl) or PKCS#8.
*/
def loadPrivateKeyAsPkcsEightString(f: () => InputStream): () => InputStream = {
val pemReader = new PemReader(new InputStreamReader(f.apply()))
try {
val pemObject = pemReader.readPemObject()
val asn1Primitive = ASN1Primitive.fromByteArray(pemObject.getContent)
val errorMessage = "Private key is not a PKCS#1 or PKCS#8 private key in PEM format."
// see https://tls.mbed.org/kb/cryptography/asn1-key-structures-in-der-and-pem for a nice
// description of ASN.1 key structures for PKCS#1 and PKCS#8 in PEM form
asn1Primitive match {
case asn1Sequence: ASN1Sequence =>
val asn = Collections.list(asn1Sequence.getObjects).asScala
// private key is in PKCS#1 and needs to be converted to PKCS#8 for netty
if (asn.length == 9 || asn.length == 10) {
if (asn.exists(x => !x.isInstanceOf[ASN1Integer])) {
throw new IllegalArgumentException(errorMessage)
} else {
() => new ByteArrayInputStream(pkcsOneToPkcsEight(pemObject).getBytes)
}
} else if (asn.length == 3) {
// private key is in PKCS#8 an can be passed to netty as-is
//noinspection ZeroIndexToHead
if (!asn(0).isInstanceOf[ASN1Integer] || !asn(1).isInstanceOf[ASN1Sequence]
| !asn(2).isInstanceOf[ASN1OctetString]) {
throw new IllegalArgumentException(errorMessage)
} else {
f
}
} else {
throw new IllegalArgumentException(errorMessage)
}
case _ =>
throw new IllegalArgumentException(errorMessage)
}
} finally {
pemReader.close()
}
}
}
| ferdinandhuebner/k8s-dns-sky | src/main/scala/k8sdnssky/BouncyCastleUtil.scala | Scala | apache-2.0 | 3,353 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms
import java.{util ⇒ ju}
import org.orbeon.oxf.common.ValidationException
import org.orbeon.oxf.util.Logging
import org.orbeon.oxf.util.ScalaUtils._
import org.orbeon.oxf.xforms.analysis.model.Model
import org.orbeon.oxf.xforms.event.events.{XXFormsInvalidEvent, XXFormsValidEvent}
import org.orbeon.oxf.xforms.event.{Dispatch, ListenersTrait, XFormsEvent}
import org.orbeon.oxf.xforms.function.XFormsFunction
import org.orbeon.oxf.xforms.model._
import org.orbeon.oxf.xforms.xbl.XBLContainer
import org.orbeon.saxon.expr.XPathContext
import org.orbeon.saxon.om.StructuredQName
import scala.collection.JavaConverters._
import scala.collection.mutable
abstract class XFormsModelBase(val container: XBLContainer, val effectiveId: String, val staticModel: Model)
extends Logging
with ListenersTrait {
val containingDocument = container.getContainingDocument
implicit val indentedLogger = containingDocument.getIndentedLogger(XFormsModel.LOGGING_CATEGORY)
val deferredActionContext = new DeferredActionContext(container)
// TEMP: implemented in Java subclass until we move everything to Scala
def selfModel: XFormsModel
def resetAndEvaluateVariables(): Unit
def getBinds: XFormsModelBinds
def getInstances: ju.List[XFormsInstance]
def getInstance(instanceStaticId: String): XFormsInstance
def mustBindValidate: Boolean
private lazy val _schemaValidator =
new XFormsModelSchemaValidator(staticModel.element, indentedLogger) |!> (_.loadSchemas(containingDocument))
def schemaValidator = _schemaValidator
def hasSchema = _schemaValidator.hasSchema
def getSchemaURIs: Array[String] =
if (hasSchema)
_schemaValidator.getSchemaURIs
else
null
def markStructuralChange(instanceOpt: Option[XFormsInstance], defaultsStrategy: DefaultsStrategy): Unit = {
deferredActionContext.markStructuralChange(defaultsStrategy, instanceOpt map (_.getId))
// NOTE: PathMapXPathDependencies doesn't yet make use of the `instance` parameter.
containingDocument.getXPathDependencies.markStructuralChange(selfModel, instanceOpt)
}
def doRebuild(): Unit = {
if (deferredActionContext.rebuild) {
try {
resetAndEvaluateVariables()
if (hasInstancesAndBinds) {
// NOTE: contextStack.resetBindingContext(this) called in evaluateVariables()
getBinds.rebuild()
// Controls may have @bind or bind() references, so we need to mark them as dirty. Will need dependencies for controls to fix this.
// TODO: Handle XPathDependencies
container.requireRefresh()
}
} finally {
deferredActionContext.resetRebuild()
}
}
containingDocument.getXPathDependencies.rebuildDone(staticModel)
}
// Recalculate and revalidate are a combined operation
// See https://github.com/orbeon/orbeon-forms/issues/1650
def doRecalculateRevalidate(): Unit = {
val instances = getInstances.asScala
// Do the work if needed
// TODO: Ensure that there are no side effects via event dispatch.
def recalculateRevalidate: Option[collection.Set[String]] =
if (deferredActionContext.recalculateRevalidate) {
try {
doRecalculate(deferredActionContext.defaultsStrategy)
containingDocument.getXPathDependencies.recalculateDone(staticModel)
// Validate only if needed, including checking the flags, because if validation state is clean, validation
// being idempotent, revalidating is not needed.
val mustRevalidate = instances.nonEmpty && (mustBindValidate || hasSchema)
mustRevalidate option {
val invalidInstances = doRevalidate()
containingDocument.getXPathDependencies.revalidateDone(staticModel)
invalidInstances
}
} finally {
for {
instanceId ← deferredActionContext.flaggedInstances
doc ← getInstance(instanceId).underlyingDocumentOpt
} locally {
InstanceDataOps.clearRequireDefaultValueRecursively(doc)
}
deferredActionContext.resetRecalculateRevalidate()
}
} else
None
// Gather events to dispatch, at most one per instance, and only if validity has changed
// NOTE: It is possible, with binds and the use of xxf:instance(), that some instances in
// invalidInstances do not belong to this model. Those instances won't get events with the dispatching
// algorithm below.
def createAndCommitValidationEvents(invalidInstancesIds: collection.Set[String]): Seq[XFormsEvent] = {
val changedInstances =
for {
instance ← instances
previouslyValid = instance.valid
currentlyValid = ! invalidInstancesIds(instance.getEffectiveId)
if previouslyValid != currentlyValid
} yield
instance
// Update instance validity
for (instance ← changedInstances)
instance.valid = ! instance.valid
// Create events
for (instance ← changedInstances)
yield
if (instance.valid) new XXFormsValidEvent(instance) else new XXFormsInvalidEvent(instance)
}
val validationEvents =
recalculateRevalidate map createAndCommitValidationEvents getOrElse Nil
// Dispatch all events
for (event ← validationEvents)
Dispatch.dispatchEvent(event)
}
private def doRecalculate(defaultsStrategy: DefaultsStrategy): Unit =
withDebug("performing recalculate", List("model" → effectiveId)) {
val hasVariables = staticModel.variablesSeq.nonEmpty
// Re-evaluate top-level variables if needed
if (hasInstancesAndBinds || hasVariables)
resetAndEvaluateVariables()
// Apply calculate binds
if (hasInstancesAndBinds)
getBinds.applyDefaultAndCalculateBinds(defaultsStrategy)
}
private def doRevalidate(): collection.Set[String] =
withDebug("performing revalidate", List("model" → effectiveId)) {
val instances = getInstances.asScala
val invalidInstancesIds = mutable.LinkedHashSet[String]()
// Clear schema validation state
// NOTE: This could possibly be moved to rebuild(), but we must be careful about the presence of a schema
for {
instance ← instances
instanceMightBeSchemaValidated = hasSchema && instance.isSchemaValidation
if instanceMightBeSchemaValidated
} locally {
DataModel.visitElement(instance.rootElement, InstanceData.clearSchemaState)
}
// Validate using schemas if needed
if (hasSchema)
for {
instance ← instances
if instance.isSchemaValidation // we don't support validating read-only instances
if ! _schemaValidator.validateInstance(instance) // apply schema
} locally {
// Remember that instance is invalid
invalidInstancesIds += instance.getEffectiveId
}
// Validate using binds if needed
if (mustBindValidate)
getBinds.applyValidationBinds(invalidInstancesIds.asJava)
invalidInstancesIds
}
private def hasInstancesAndBinds: Boolean =
! getInstances.isEmpty && (getBinds ne null)
def needRebuildRecalculateRevalidate =
deferredActionContext.rebuild || deferredActionContext.recalculateRevalidate
// This is called in response to dispatching xforms-refresh to this model, whether using the xf:refresh
// action or by dispatching the event by hand.
// NOTE: If the refresh flag is not set, we do not call synchronizeAndRefresh() because that would only have the
// side effect of performing RRR on models, but but not update the UI, which wouldn't make sense for xforms-refresh.
// This said, is unlikely (impossible?) that the RRR flags would be set but not the refresh flag.
// FIXME: See https://github.com/orbeon/orbeon-forms/issues/1650
protected def doRefresh(): Unit =
if (containingDocument.getControls.isRequireRefresh)
container.synchronizeAndRefresh()
def getDefaultEvaluationContext: BindingContext
val variableResolver =
(variableQName: StructuredQName, xpathContext: XPathContext) ⇒
staticModel.bindsByName.get(variableQName.getLocalName) match {
case Some(targetStaticBind) ⇒
// Variable value is a bind nodeset to resolve
BindVariableResolver.resolveClosestBind(
modelBinds = getBinds,
contextBindNodeOpt = XFormsFunction.context.data.asInstanceOf[Option[BindNode]],
targetStaticBind = targetStaticBind
) getOrElse
(throw new IllegalStateException)
case None ⇒
// Try top-level model variables
val modelVariables = getDefaultEvaluationContext.getInScopeVariables
// NOTE: With XPath analysis on, variable scope has been checked statically
Option(modelVariables.get(variableQName.getLocalName)) getOrElse
(throw new ValidationException("Undeclared variable in XPath expression: $" + variableQName.getClarkName, staticModel.locationData))
}
}
| joansmith/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/XFormsModelBase.scala | Scala | lgpl-2.1 | 9,814 |
package org.keycloak.gatling
import io.gatling.core.action.{Chainable, UserEnd}
import io.gatling.core.session.Session
import io.gatling.core.validation.Validation
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
trait ExitOnFailure extends Chainable {
override def execute(session: Session): Unit = {
executeOrFail(session).onFailure { message =>
logger.error(s"'${self.path.name}' failed to execute: $message")
UserEnd.instance ! session.markAsFailed
}
}
def executeOrFail(session: Session): Validation[_]
}
| thomasdarimont/keycloak | testsuite/performance/tests/src/main/scala/org/keycloak/gatling/ExitOnFailure.scala | Scala | apache-2.0 | 550 |
package mesosphere.marathon
package api.v2
import mesosphere.marathon.core.appinfo.AppSelector
import mesosphere.marathon.state.AppDefinition
import org.slf4j.LoggerFactory
import scala.util.control.NonFatal
import scala.util.parsing.combinator.RegexParsers
case class LabelSelector(key: String, fn: String => Boolean, value: List[String]) extends AppSelector {
def matches(app: AppDefinition): Boolean = app.labels.contains(key) && fn(app.labels(key))
}
case class LabelSelectors(selectors: Seq[LabelSelector]) extends AppSelector {
def matches(app: AppDefinition): Boolean = selectors.forall(_.matches(app))
}
/**
* Parse a label selector query.
* A label selector query has this format:
* query: selector {, selector}
* selector: existenceSelector | setSelector | equalsSelector
* existenceSelector: label
* equalsSelector: term ==|!= term
* setSelector: term in|notin set
* set: ( term {, term} )
* term: character sequence with character groups A-Za-z0-9 and characters .-_.
* Any other character needs to be escaped with backslash.
*
* Examples:
* test == foo
* test != foo
* environment in (production, qa)
* tier notin (frontend, backend)
* partition
* \\!\\!\\! in (\\-\\-\\-, \\&\\&\\&, \\+\\+\\+)
* a in (t1, t2, t3), b notin (t1), c, d
*
*/
class LabelSelectorParsers extends RegexParsers {
private[this] val log = LoggerFactory.getLogger(getClass.getName)
//Allowed characters are A-Za-z0-9._- All other characters can be used, but need to be escaped.
def term: Parser[String] = """(\\\\.|[-A-Za-z0-9_.])+""".r ^^ { _.replaceAll("""\\\\(.)""", "$1") }
def existenceSelector: Parser[LabelSelector] = term ^^ {
existence: String => LabelSelector(existence, _ => true, List.empty)
}
def equalityOp: Parser[String] = """(==|!=)""".r
def equalitySelector: Parser[LabelSelector] = term ~ equalityOp ~ term ^^ {
case label ~ "==" ~ value => LabelSelector(label, value == _, List(value))
case label ~ "!=" ~ value => LabelSelector(label, value != _, List(value))
}
def set: Parser[List[String]] = "(" ~> repsep(term, ",") <~ ")"
def setOp: Parser[String] = """(in|notin)""".r
def setSelector: Parser[LabelSelector] = term ~ setOp ~ set ^^ {
case label ~ "in" ~ set => LabelSelector(label, set.contains(_), set)
case label ~ "notin" ~ set => LabelSelector(label, !set.contains(_), set)
}
def selector: Parser[LabelSelector] = setSelector | equalitySelector | existenceSelector
def selectors: Parser[List[LabelSelector]] = repsep(selector, ",")
def parseSelectors(in: String): Either[String, LabelSelectors] = {
try {
parseAll(selectors, in) match {
case Success(selectors, _) => Right(LabelSelectors(selectors))
case NoSuccess(message, _) => Left(message)
}
} catch {
case NonFatal(ex) =>
log.warn(s"Could not parse $in", ex)
Left(ex.getMessage)
}
}
def parsed(in: String): LabelSelectors = parseSelectors(in) match {
case Left(message) => throw new IllegalArgumentException(s"Can not parse label selector $in. Reason: $message")
case Right(selectors) => selectors
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/api/v2/LabelSelector.scala | Scala | apache-2.0 | 3,143 |
import sbt._
import sbt.Keys._
import com.typesafe.sbt.SbtPgp
import java.io.File
import java.io.FileInputStream
import java.util.Properties
import sbtrelease.ReleasePlugin.ReleaseKeys._
object Sonatype {
val keyDir = new File(sys.env.getOrElse("GPG_KEY", "."))
val propFile = new File(keyDir, "ci.properties")
val username = "ORG_GRADLE_PROJECT_sonatypeUsername"
val password = "ORG_GRADLE_PROJECT_sonatypePassword"
val secretKeyRing = "ORG_GRADLE_PROJECT_signing.secretKeyRingFile"
val keyPassword = "ORG_GRADLE_PROJECT_signing.password"
lazy val settings: Seq[Def.Setting[_]] = {
if (!propFile.exists) Seq.empty else {
val props = new Properties
val input = new FileInputStream(propFile)
try props.load(input) finally input.close()
mavenSettings ++ SbtPgp.settings ++ Seq(
SbtPgp.pgpPassphrase := Some(props.getProperty(keyPassword).toArray),
SbtPgp.pgpSecretRing := new File(keyDir, s"secring.gpg"),
credentials += Credentials(
"Sonatype Nexus Repository Manager",
"oss.sonatype.org",
props.getProperty(username),
props.getProperty(password))
)
}
}
lazy val mavenSettings = Seq(
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishMavenStyle := true,
publishArtifact in Test := false,
publishArtifactsAction := SbtPgp.PgpKeys.publishSigned.value,
pomIncludeRepository := { _ => false },
pomExtra := (
<url>https://github.com/netflix/atlas/wiki</url>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:netflix/atlas.git</url>
<connection>scm:git:git@github.com:netflix/atlas.git</connection>
</scm>
<developers>
<developer>
<id>brharrington</id>
<name>Brian Harrington</name>
<email>brharrington@netflix.com</email>
</developer>
</developers>
)
)
lazy val noPublishing = Seq(
publish := (),
publishLocal := (),
publishTo := None
)
}
| rspieldenner/atlas | project/Sonatype.scala | Scala | apache-2.0 | 2,467 |
package io.getquill.dsl
import io.getquill.quotation.NonQuotedException
import scala.annotation.compileTimeOnly
private[dsl] trait OrdDsl {
trait Ord[T]
@compileTimeOnly(NonQuotedException.message)
implicit def implicitOrd[T]: Ord[T] = NonQuotedException()
object Ord {
@compileTimeOnly(NonQuotedException.message)
def asc[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def desc[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def ascNullsFirst[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def descNullsFirst[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def ascNullsLast[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def descNullsLast[T]: Ord[T] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2](o1: Ord[T1], o2: Ord[T2]): Ord[(T1, T2)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3]): Ord[(T1, T2, T3)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4]): Ord[(T1, T2, T3, T4)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5]): Ord[(T1, T2, T3, T4, T5)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5, T6](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5], o6: Ord[T6]): Ord[(T1, T2, T3, T4, T5, T6)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5, T6, T7](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5], o6: Ord[T6], o7: Ord[T7]): Ord[(T1, T2, T3, T4, T5, T6, T7)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5, T6, T7, T8](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5], o6: Ord[T6], o7: Ord[T7], o8: Ord[T8]): Ord[(T1, T2, T3, T4, T5, T6, T7, T8)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5], o6: Ord[T6], o7: Ord[T7], o8: Ord[T8], o9: Ord[T9]): Ord[(T1, T2, T3, T4, T5, T6, T7, T8, T9)] = NonQuotedException()
@compileTimeOnly(NonQuotedException.message)
def apply[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10](o1: Ord[T1], o2: Ord[T2], o3: Ord[T3], o4: Ord[T4], o5: Ord[T5], o6: Ord[T6], o7: Ord[T7], o8: Ord[T8], o9: Ord[T9], o10: Ord[T10]): Ord[(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10)] = NonQuotedException()
}
}
| jcranky/quill | quill-core/src/main/scala/io/getquill/dsl/OrdDsl.scala | Scala | apache-2.0 | 2,890 |
// Solution-4b.scala
// Solution to Exercise 4 in "Imports & Packages"
// Solution 2: import 2 classes
import com.atomicscala.trivia.{Science, Movies}
val science = new Science
val movies = new Movies
println("Imported 2 classes")
/* OUTPUT_SHOULD_BE
Imported 2 classes
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/14_ImportsAndPackages-2ndEdition/Solution-4b.scala | Scala | apache-2.0 | 276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.{FileSystem => _, _}
import java.net.{InetAddress, UnknownHostException, URI}
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{Locale, Properties, UUID}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, Map}
import scala.util.control.NonFatal
import com.google.common.base.Objects
import com.google.common.io.Files
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.MRJobConfig
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.StringUtils
import org.apache.hadoop.yarn.api._
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.protocolrecords._
import org.apache.hadoop.yarn.api.records._
import org.apache.hadoop.yarn.client.api.{YarnClient, YarnClientApplication}
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier
import org.apache.hadoop.yarn.util.Records
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.api.python.PythonUtils
import org.apache.spark.deploy.{SparkApplication, SparkHadoopUtil}
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.deploy.yarn.ResourceRequestHelper._
import org.apache.spark.deploy.yarn.config._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Python._
import org.apache.spark.launcher.{LauncherBackend, SparkAppHandle, YarnCommandBuilderUtils}
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.util.{CallerContext, Utils}
private[spark] class Client(
val args: ClientArguments,
val sparkConf: SparkConf,
val rpcEnv: RpcEnv)
extends Logging {
import Client._
import YarnSparkHadoopUtil._
private val yarnClient = YarnClient.createYarnClient
private val hadoopConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
private val isClusterMode = sparkConf.get(SUBMIT_DEPLOY_MODE) == "cluster"
private val isClientUnmanagedAMEnabled = sparkConf.get(YARN_UNMANAGED_AM) && !isClusterMode
private var appMaster: ApplicationMaster = _
private var stagingDirPath: Path = _
// AM related configurations
private val amMemory = if (isClusterMode) {
sparkConf.get(DRIVER_MEMORY).toInt
} else {
sparkConf.get(AM_MEMORY).toInt
}
private val amMemoryOverhead = {
val amMemoryOverheadEntry = if (isClusterMode) DRIVER_MEMORY_OVERHEAD else AM_MEMORY_OVERHEAD
sparkConf.get(amMemoryOverheadEntry).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * amMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
}
private val amCores = if (isClusterMode) {
sparkConf.get(DRIVER_CORES)
} else {
sparkConf.get(AM_CORES)
}
// Executor related configurations
private val executorMemory = sparkConf.get(EXECUTOR_MEMORY)
// Executor offHeap memory in MiB.
protected val executorOffHeapMemory = YarnSparkHadoopUtil.executorOffHeapMemorySizeAsMb(sparkConf)
private val executorMemoryOverhead = sparkConf.get(EXECUTOR_MEMORY_OVERHEAD).getOrElse(
math.max((MEMORY_OVERHEAD_FACTOR * executorMemory).toLong, MEMORY_OVERHEAD_MIN)).toInt
private val isPython = sparkConf.get(IS_PYTHON_APP)
private val pysparkWorkerMemory: Int = if (isPython) {
sparkConf.get(PYSPARK_EXECUTOR_MEMORY).map(_.toInt).getOrElse(0)
} else {
0
}
private val distCacheMgr = new ClientDistributedCacheManager()
private val cachedResourcesConf = new SparkConf(false)
private val keytab = sparkConf.get(KEYTAB).orNull
private val amKeytabFileName: Option[String] = if (keytab != null && isClusterMode) {
val principal = sparkConf.get(PRINCIPAL).orNull
require((principal == null) == (keytab == null),
"Both principal and keytab must be defined, or neither.")
logInfo(s"Kerberos credentials: principal = $principal, keytab = $keytab")
// Generate a file name that can be used for the keytab file, that does not conflict
// with any user file.
Some(new File(keytab).getName() + "-" + UUID.randomUUID().toString)
} else {
None
}
require(keytab == null || !Utils.isLocalUri(keytab), "Keytab should reference a local file.")
private val launcherBackend = new LauncherBackend() {
override protected def conf: SparkConf = sparkConf
override def onStopRequest(): Unit = {
if (isClusterMode && appId != null) {
yarnClient.killApplication(appId)
} else {
setState(SparkAppHandle.State.KILLED)
stop()
}
}
}
private val fireAndForget = isClusterMode && !sparkConf.get(WAIT_FOR_APP_COMPLETION)
private var appId: ApplicationId = null
def reportLauncherState(state: SparkAppHandle.State): Unit = {
launcherBackend.setState(state)
}
def stop(): Unit = {
if (appMaster != null) {
appMaster.stopUnmanaged(stagingDirPath)
}
launcherBackend.close()
yarnClient.stop()
}
/**
* Submit an application running our ApplicationMaster to the ResourceManager.
*
* The stable Yarn API provides a convenience method (YarnClient#createApplication) for
* creating applications and setting up the application submission context. This was not
* available in the alpha API.
*/
def submitApplication(): ApplicationId = {
ResourceRequestHelper.validateResources(sparkConf)
var appId: ApplicationId = null
try {
launcherBackend.connect()
yarnClient.init(hadoopConf)
yarnClient.start()
logInfo("Requesting a new application from cluster with %d NodeManagers"
.format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
// Get a new application from our RM
val newApp = yarnClient.createApplication()
val newAppResponse = newApp.getNewApplicationResponse()
appId = newAppResponse.getApplicationId()
// The app staging dir based on the STAGING_DIR configuration if configured
// otherwise based on the users home directory.
val appStagingBaseDir = sparkConf.get(STAGING_DIR)
.map { new Path(_, UserGroupInformation.getCurrentUser.getShortUserName) }
.getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
Option(appId.toString)).setCurrentContext()
// Verify whether the cluster has enough resources for our AM
verifyClusterResources(newAppResponse)
// Set up the appropriate contexts to launch our AM
val containerContext = createContainerLaunchContext(newAppResponse)
val appContext = createApplicationSubmissionContext(newApp, containerContext)
// Finally, submit and monitor the application
logInfo(s"Submitting application $appId to ResourceManager")
yarnClient.submitApplication(appContext)
launcherBackend.setAppId(appId.toString)
reportLauncherState(SparkAppHandle.State.SUBMITTED)
appId
} catch {
case e: Throwable =>
if (stagingDirPath != null) {
cleanupStagingDir()
}
throw e
}
}
/**
* Cleanup application staging directory.
*/
private def cleanupStagingDir(): Unit = {
if (sparkConf.get(PRESERVE_STAGING_FILES)) {
return
}
def cleanupStagingDirInternal(): Unit = {
try {
val fs = stagingDirPath.getFileSystem(hadoopConf)
if (fs.delete(stagingDirPath, true)) {
logInfo(s"Deleted staging directory $stagingDirPath")
}
} catch {
case ioe: IOException =>
logWarning("Failed to cleanup staging dir " + stagingDirPath, ioe)
}
}
cleanupStagingDirInternal()
}
/**
* Set up the context for submitting our ApplicationMaster.
* This uses the YarnClientApplication not available in the Yarn alpha API.
*/
def createApplicationSubmissionContext(
newApp: YarnClientApplication,
containerContext: ContainerLaunchContext): ApplicationSubmissionContext = {
val componentName = if (isClusterMode) {
config.YARN_DRIVER_RESOURCE_TYPES_PREFIX
} else {
config.YARN_AM_RESOURCE_TYPES_PREFIX
}
val yarnAMResources = getYarnResourcesAndAmounts(sparkConf, componentName)
val amResources = yarnAMResources ++
getYarnResourcesFromSparkResources(SPARK_DRIVER_PREFIX, sparkConf)
logDebug(s"AM resources: $amResources")
val appContext = newApp.getApplicationSubmissionContext
appContext.setApplicationName(sparkConf.get("spark.app.name", "Spark"))
appContext.setQueue(sparkConf.get(QUEUE_NAME))
appContext.setAMContainerSpec(containerContext)
appContext.setApplicationType("SPARK")
sparkConf.get(APPLICATION_TAGS).foreach { tags =>
appContext.setApplicationTags(new java.util.HashSet[String](tags.asJava))
}
sparkConf.get(MAX_APP_ATTEMPTS) match {
case Some(v) => appContext.setMaxAppAttempts(v)
case None => logDebug(s"${MAX_APP_ATTEMPTS.key} is not set. " +
"Cluster's default value will be used.")
}
sparkConf.get(AM_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS).foreach { interval =>
appContext.setAttemptFailuresValidityInterval(interval)
}
val capability = Records.newRecord(classOf[Resource])
capability.setMemory(amMemory + amMemoryOverhead)
capability.setVirtualCores(amCores)
if (amResources.nonEmpty) {
ResourceRequestHelper.setResourceRequests(amResources, capability)
}
logDebug(s"Created resource capability for AM request: $capability")
sparkConf.get(AM_NODE_LABEL_EXPRESSION) match {
case Some(expr) =>
val amRequest = Records.newRecord(classOf[ResourceRequest])
amRequest.setResourceName(ResourceRequest.ANY)
amRequest.setPriority(Priority.newInstance(0))
amRequest.setCapability(capability)
amRequest.setNumContainers(1)
amRequest.setNodeLabelExpression(expr)
appContext.setAMContainerResourceRequest(amRequest)
case None =>
appContext.setResource(capability)
}
sparkConf.get(ROLLED_LOG_INCLUDE_PATTERN).foreach { includePattern =>
try {
val logAggregationContext = Records.newRecord(classOf[LogAggregationContext])
logAggregationContext.setRolledLogsIncludePattern(includePattern)
sparkConf.get(ROLLED_LOG_EXCLUDE_PATTERN).foreach { excludePattern =>
logAggregationContext.setRolledLogsExcludePattern(excludePattern)
}
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
logWarning(s"Ignoring ${ROLLED_LOG_INCLUDE_PATTERN.key} because the version of YARN " +
"does not support it", e)
}
}
appContext.setUnmanagedAM(isClientUnmanagedAMEnabled)
appContext
}
/**
* Set up security tokens for launching our ApplicationMaster container.
*
* In client mode, a set of credentials has been obtained by the scheduler, so they are copied
* and sent to the AM. In cluster mode, new credentials are obtained and then sent to the AM,
* along with whatever credentials the current user already has.
*/
private def setupSecurityToken(amContainer: ContainerLaunchContext): Unit = {
val currentUser = UserGroupInformation.getCurrentUser()
val credentials = currentUser.getCredentials()
if (isClusterMode) {
val credentialManager = new HadoopDelegationTokenManager(sparkConf, hadoopConf, null)
credentialManager.obtainDelegationTokens(credentials)
}
val serializedCreds = SparkHadoopUtil.get.serialize(credentials)
amContainer.setTokens(ByteBuffer.wrap(serializedCreds))
}
/** Get the application report from the ResourceManager for an application we have submitted. */
def getApplicationReport(appId: ApplicationId): ApplicationReport =
yarnClient.getApplicationReport(appId)
/**
* Return the security token used by this client to communicate with the ApplicationMaster.
* If no security is enabled, the token returned by the report is null.
*/
private def getClientToken(report: ApplicationReport): String =
Option(report.getClientToAMToken).map(_.toString).getOrElse("")
/**
* Fail fast if we have requested more resources per container than is available in the cluster.
*/
private def verifyClusterResources(newAppResponse: GetNewApplicationResponse): Unit = {
val maxMem = newAppResponse.getMaximumResourceCapability().getMemory()
logInfo("Verifying our application has not requested more than the maximum " +
s"memory capability of the cluster ($maxMem MB per container)")
val executorMem =
executorMemory + executorOffHeapMemory + executorMemoryOverhead + pysparkWorkerMemory
if (executorMem > maxMem) {
throw new IllegalArgumentException(s"Required executor memory ($executorMemory MB), " +
s"offHeap memory ($executorOffHeapMemory) MB, overhead ($executorMemoryOverhead MB), " +
s"and PySpark memory ($pysparkWorkerMemory MB) is above the max threshold ($maxMem MB) " +
"of this cluster! Please check the values of 'yarn.scheduler.maximum-allocation-mb' " +
"and/or 'yarn.nodemanager.resource.memory-mb'.")
}
val amMem = amMemory + amMemoryOverhead
if (amMem > maxMem) {
throw new IllegalArgumentException(s"Required AM memory ($amMemory" +
s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " +
"Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " +
"'yarn.nodemanager.resource.memory-mb'.")
}
logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format(
amMem,
amMemoryOverhead))
// We could add checks to make sure the entire cluster has enough resources but that involves
// getting all the node reports and computing ourselves.
}
/**
* Copy the given file to a remote file system (e.g. HDFS) if needed.
* The file is only copied if the source and destination file systems are different or the source
* scheme is "file". This is used for preparing resources for launching the ApplicationMaster
* container. Exposed for testing.
*/
private[yarn] def copyFileToRemote(
destDir: Path,
srcPath: Path,
replication: Short,
symlinkCache: Map[URI, Path],
force: Boolean = false,
destName: Option[String] = None): Path = {
val destFs = destDir.getFileSystem(hadoopConf)
val srcFs = srcPath.getFileSystem(hadoopConf)
var destPath = srcPath
if (force || !compareFs(srcFs, destFs) || "file".equals(srcFs.getScheme)) {
destPath = new Path(destDir, destName.getOrElse(srcPath.getName()))
logInfo(s"Uploading resource $srcPath -> $destPath")
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf)
destFs.setReplication(destPath, replication)
destFs.setPermission(destPath, new FsPermission(APP_FILE_PERMISSION))
} else {
logInfo(s"Source and destination file systems are the same. Not copying $srcPath")
}
// Resolve any symlinks in the URI path so using a "current" symlink to point to a specific
// version shows the specific version in the distributed cache configuration
val qualifiedDestPath = destFs.makeQualified(destPath)
val qualifiedDestDir = qualifiedDestPath.getParent
val resolvedDestDir = symlinkCache.getOrElseUpdate(qualifiedDestDir.toUri(), {
val fc = FileContext.getFileContext(qualifiedDestDir.toUri(), hadoopConf)
fc.resolvePath(qualifiedDestDir)
})
new Path(resolvedDestDir, qualifiedDestPath.getName())
}
/**
* Upload any resources to the distributed cache if needed. If a resource is intended to be
* consumed locally, set up the appropriate config for downstream code to handle it properly.
* This is used for setting up a container launch context for our ApplicationMaster.
* Exposed for testing.
*/
def prepareLocalResources(
destDir: Path,
pySparkArchives: Seq[String]): HashMap[String, LocalResource] = {
logInfo("Preparing resources for our AM container")
// Upload Spark and the application JAR to the remote file system if necessary,
// and add them as local resources to the application master.
val fs = destDir.getFileSystem(hadoopConf)
// Used to keep track of URIs added to the distributed cache. If the same URI is added
// multiple times, YARN will fail to launch containers for the app with an internal
// error.
val distributedUris = new HashSet[String]
// Used to keep track of URIs(files) added to the distribute cache have the same name. If
// same name but different path files are added multiple time, YARN will fail to launch
// containers for the app with an internal error.
val distributedNames = new HashSet[String]
val replication = sparkConf.get(STAGING_FILE_REPLICATION).map(_.toShort)
.getOrElse(fs.getDefaultReplication(destDir))
val localResources = HashMap[String, LocalResource]()
FileSystem.mkdirs(fs, destDir, new FsPermission(STAGING_DIR_PERMISSION))
val statCache: Map[URI, FileStatus] = HashMap[URI, FileStatus]()
val symlinkCache: Map[URI, Path] = HashMap[URI, Path]()
def addDistributedUri(uri: URI): Boolean = {
val uriStr = uri.toString()
val fileName = new File(uri.getPath).getName
if (distributedUris.contains(uriStr)) {
logWarning(s"Same path resource $uri added multiple times to distributed cache.")
false
} else if (distributedNames.contains(fileName)) {
logWarning(s"Same name resource $uri added multiple times to distributed cache")
false
} else {
distributedUris += uriStr
distributedNames += fileName
true
}
}
/*
* Distribute a file to the cluster.
*
* If the file's path is a "local:" URI, it's actually not distributed. Other files are copied
* to HDFS (if not already there) and added to the application's distributed cache.
*
* @param path URI of the file to distribute.
* @param resType Type of resource being distributed.
* @param destName Name of the file in the distributed cache.
* @param targetDir Subdirectory where to place the file.
* @param appMasterOnly Whether to distribute only to the AM.
* @return A 2-tuple. First item is whether the file is a "local:" URI. Second item is the
* localized path for non-local paths, or the input `path` for local paths.
* The localized path will be null if the URI has already been added to the cache.
*/
def distribute(
path: String,
resType: LocalResourceType = LocalResourceType.FILE,
destName: Option[String] = None,
targetDir: Option[String] = None,
appMasterOnly: Boolean = false): (Boolean, String) = {
val trimmedPath = path.trim()
val localURI = Utils.resolveURI(trimmedPath)
if (localURI.getScheme != Utils.LOCAL_SCHEME) {
if (addDistributedUri(localURI)) {
val localPath = getQualifiedLocalPath(localURI, hadoopConf)
val linkname = targetDir.map(_ + "/").getOrElse("") +
destName.orElse(Option(localURI.getFragment())).getOrElse(localPath.getName())
val destPath = copyFileToRemote(destDir, localPath, replication, symlinkCache)
val destFs = FileSystem.get(destPath.toUri(), hadoopConf)
distCacheMgr.addResource(
destFs, hadoopConf, destPath, localResources, resType, linkname, statCache,
appMasterOnly = appMasterOnly)
(false, linkname)
} else {
(false, null)
}
} else {
(true, trimmedPath)
}
}
// If we passed in a keytab, make sure we copy the keytab to the staging directory on
// HDFS, and setup the relevant environment vars, so the AM can login again.
amKeytabFileName.foreach { kt =>
logInfo("To enable the AM to login from keytab, credentials are being copied over to the AM" +
" via the YARN Secure Distributed Cache.")
val (_, localizedPath) = distribute(keytab,
destName = Some(kt),
appMasterOnly = true)
require(localizedPath != null, "Keytab file already distributed.")
}
/**
* Add Spark to the cache. There are two settings that control what files to add to the cache:
* - if a Spark archive is defined, use the archive. The archive is expected to contain
* jar files at its root directory.
* - if a list of jars is provided, filter the non-local ones, resolve globs, and
* add the found files to the cache.
*
* Note that the archive cannot be a "local" URI. If none of the above settings are found,
* then upload all files found in $SPARK_HOME/jars.
*/
val sparkArchive = sparkConf.get(SPARK_ARCHIVE)
if (sparkArchive.isDefined) {
val archive = sparkArchive.get
require(!Utils.isLocalUri(archive), s"${SPARK_ARCHIVE.key} cannot be a local URI.")
distribute(Utils.resolveURI(archive).toString,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
} else {
sparkConf.get(SPARK_JARS) match {
case Some(jars) =>
// Break the list of jars to upload, and resolve globs.
val localJars = new ArrayBuffer[String]()
jars.foreach { jar =>
if (!Utils.isLocalUri(jar)) {
val path = getQualifiedLocalPath(Utils.resolveURI(jar), hadoopConf)
val pathFs = FileSystem.get(path.toUri(), hadoopConf)
pathFs.globStatus(path).filter(_.isFile()).foreach { entry =>
val uri = entry.getPath().toUri()
statCache.update(uri, entry)
distribute(uri.toString(), targetDir = Some(LOCALIZED_LIB_DIR))
}
} else {
localJars += jar
}
}
// Propagate the local URIs to the containers using the configuration.
sparkConf.set(SPARK_JARS, localJars)
case None =>
// No configuration, so fall back to uploading local jar files.
logWarning(s"Neither ${SPARK_JARS.key} nor ${SPARK_ARCHIVE.key} is set, falling back " +
"to uploading libraries under SPARK_HOME.")
val jarsDir = new File(YarnCommandBuilderUtils.findJarsDir(
sparkConf.getenv("SPARK_HOME")))
val jarsArchive = File.createTempFile(LOCALIZED_LIB_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val jarsStream = new ZipOutputStream(new FileOutputStream(jarsArchive))
try {
jarsStream.setLevel(0)
jarsDir.listFiles().foreach { f =>
if (f.isFile && f.getName.toLowerCase(Locale.ROOT).endsWith(".jar") && f.canRead) {
jarsStream.putNextEntry(new ZipEntry(f.getName))
Files.copy(f, jarsStream)
jarsStream.closeEntry()
}
}
} finally {
jarsStream.close()
}
distribute(jarsArchive.toURI.getPath,
resType = LocalResourceType.ARCHIVE,
destName = Some(LOCALIZED_LIB_DIR))
jarsArchive.delete()
}
}
/**
* Copy user jar to the distributed cache if their scheme is not "local".
* Otherwise, set the corresponding key in our SparkConf to handle it downstream.
*/
Option(args.userJar).filter(_.trim.nonEmpty).foreach { jar =>
val (isLocal, localizedPath) = distribute(jar, destName = Some(APP_JAR_NAME))
if (isLocal) {
require(localizedPath != null, s"Path $jar already distributed")
// If the resource is intended for local use only, handle this downstream
// by setting the appropriate property
sparkConf.set(APP_JAR, localizedPath)
}
}
/**
* Do the same for any additional resources passed in through ClientArguments.
* Each resource category is represented by a 3-tuple of:
* (1) comma separated list of resources in this category,
* (2) resource type, and
* (3) whether to add these resources to the classpath
*/
val cachedSecondaryJarLinks = ListBuffer.empty[String]
List(
(sparkConf.get(JARS_TO_DISTRIBUTE), LocalResourceType.FILE, true),
(sparkConf.get(FILES_TO_DISTRIBUTE), LocalResourceType.FILE, false),
(sparkConf.get(ARCHIVES_TO_DISTRIBUTE), LocalResourceType.ARCHIVE, false)
).foreach { case (flist, resType, addToClasspath) =>
flist.foreach { file =>
val (_, localizedPath) = distribute(file, resType = resType)
// If addToClassPath, we ignore adding jar multiple times to distributed cache.
if (addToClasspath) {
if (localizedPath != null) {
cachedSecondaryJarLinks += localizedPath
}
} else {
if (localizedPath == null) {
throw new IllegalArgumentException(s"Attempt to add ($file) multiple times" +
" to the distributed cache.")
}
}
}
}
if (cachedSecondaryJarLinks.nonEmpty) {
sparkConf.set(SECONDARY_JARS, cachedSecondaryJarLinks)
}
if (isClusterMode && args.primaryPyFile != null) {
distribute(args.primaryPyFile, appMasterOnly = true)
}
pySparkArchives.foreach { f => distribute(f) }
// The python files list needs to be treated especially. All files that are not an
// archive need to be placed in a subdirectory that will be added to PYTHONPATH.
sparkConf.get(PY_FILES).foreach { f =>
val targetDir = if (f.endsWith(".py")) Some(LOCALIZED_PYTHON_DIR) else None
distribute(f, targetDir = targetDir)
}
// Update the configuration with all the distributed files, minus the conf archive. The
// conf archive will be handled by the AM differently so that we avoid having to send
// this configuration by other means. See SPARK-14602 for one reason of why this is needed.
distCacheMgr.updateConfiguration(cachedResourcesConf)
// Upload the conf archive to HDFS manually, and record its location in the configuration.
// This will allow the AM to know where the conf archive is in HDFS, so that it can be
// distributed to the containers.
//
// This code forces the archive to be copied, so that unit tests pass (since in that case both
// file systems are the same and the archive wouldn't normally be copied). In most (all?)
// deployments, the archive would be copied anyway, since it's a temp file in the local file
// system.
val remoteConfArchivePath = new Path(destDir, LOCALIZED_CONF_ARCHIVE)
val remoteFs = FileSystem.get(remoteConfArchivePath.toUri(), hadoopConf)
cachedResourcesConf.set(CACHED_CONF_ARCHIVE, remoteConfArchivePath.toString())
val localConfArchive = new Path(createConfArchive().toURI())
copyFileToRemote(destDir, localConfArchive, replication, symlinkCache, force = true,
destName = Some(LOCALIZED_CONF_ARCHIVE))
// Manually add the config archive to the cache manager so that the AM is launched with
// the proper files set up.
distCacheMgr.addResource(
remoteFs, hadoopConf, remoteConfArchivePath, localResources, LocalResourceType.ARCHIVE,
LOCALIZED_CONF_DIR, statCache, appMasterOnly = false)
localResources
}
/**
* Create an archive with the config files for distribution.
*
* These will be used by AM and executors. The files are zipped and added to the job as an
* archive, so that YARN will explode it when distributing to AM and executors. This directory
* is then added to the classpath of AM and executor process, just to make sure that everybody
* is using the same default config.
*
* This follows the order of precedence set by the startup scripts, in which HADOOP_CONF_DIR
* shows up in the classpath before YARN_CONF_DIR.
*
* Currently this makes a shallow copy of the conf directory. If there are cases where a
* Hadoop config directory contains subdirectories, this code will have to be fixed.
*
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*/
private def createConfArchive(): File = {
val hadoopConfFiles = new HashMap[String, File]()
// SPARK_CONF_DIR shows up in the classpath before HADOOP_CONF_DIR/YARN_CONF_DIR
sys.env.get("SPARK_CONF_DIR").foreach { localConfDir =>
val dir = new File(localConfDir)
if (dir.isDirectory) {
val files = dir.listFiles(new FileFilter {
override def accept(pathname: File): Boolean = {
pathname.isFile && pathname.getName.endsWith(".xml")
}
})
files.foreach { f => hadoopConfFiles(f.getName) = f }
}
}
// SPARK-23630: during testing, Spark scripts filter out hadoop conf dirs so that user's
// environments do not interfere with tests. This allows a special env variable during
// tests so that custom conf dirs can be used by unit tests.
val confDirs = Seq("HADOOP_CONF_DIR", "YARN_CONF_DIR") ++
(if (Utils.isTesting) Seq("SPARK_TEST_HADOOP_CONF_DIR") else Nil)
confDirs.foreach { envKey =>
sys.env.get(envKey).foreach { path =>
val dir = new File(path)
if (dir.isDirectory()) {
val files = dir.listFiles()
if (files == null) {
logWarning("Failed to list files under directory " + dir)
} else {
files.foreach { file =>
if (file.isFile && !hadoopConfFiles.contains(file.getName())) {
hadoopConfFiles(file.getName()) = file
}
}
}
}
}
}
val confArchive = File.createTempFile(LOCALIZED_CONF_DIR, ".zip",
new File(Utils.getLocalDir(sparkConf)))
val confStream = new ZipOutputStream(new FileOutputStream(confArchive))
logDebug(s"Creating an archive with the config files for distribution at $confArchive.")
try {
confStream.setLevel(0)
// Upload $SPARK_CONF_DIR/log4j.properties file to the distributed cache to make sure that
// the executors will use the latest configurations instead of the default values. This is
// required when user changes log4j.properties directly to set the log configurations. If
// configuration file is provided through --files then executors will be taking configurations
// from --files instead of $SPARK_CONF_DIR/log4j.properties.
// Also upload metrics.properties to distributed cache if exists in classpath.
// If user specify this file using --files then executors will use the one
// from --files instead.
for { prop <- Seq("log4j.properties", "metrics.properties")
url <- Option(Utils.getContextOrSparkClassLoader.getResource(prop))
if url.getProtocol == "file" } {
val file = new File(url.getPath())
confStream.putNextEntry(new ZipEntry(file.getName()))
Files.copy(file, confStream)
confStream.closeEntry()
}
// Save the Hadoop config files under a separate directory in the archive. This directory
// is appended to the classpath so that the cluster-provided configuration takes precedence.
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/"))
confStream.closeEntry()
hadoopConfFiles.foreach { case (name, file) =>
if (file.canRead()) {
confStream.putNextEntry(new ZipEntry(s"$LOCALIZED_HADOOP_CONF_DIR/$name"))
Files.copy(file, confStream)
confStream.closeEntry()
}
}
// Save the YARN configuration into a separate file that will be overlayed on top of the
// cluster's Hadoop conf.
confStream.putNextEntry(new ZipEntry(SparkHadoopUtil.SPARK_HADOOP_CONF_FILE))
hadoopConf.writeXml(confStream)
confStream.closeEntry()
// Save Spark configuration to a file in the archive.
val props = confToProperties(sparkConf)
// If propagating the keytab to the AM, override the keytab name with the name of the
// distributed file.
amKeytabFileName.foreach { kt => props.setProperty(KEYTAB.key, kt) }
writePropertiesToArchive(props, SPARK_CONF_FILE, confStream)
// Write the distributed cache config to the archive.
writePropertiesToArchive(confToProperties(cachedResourcesConf), DIST_CACHE_CONF_FILE,
confStream)
} finally {
confStream.close()
}
confArchive
}
/**
* Set up the environment for launching our ApplicationMaster container.
*/
private def setupLaunchEnv(
stagingDirPath: Path,
pySparkArchives: Seq[String]): HashMap[String, String] = {
logInfo("Setting up the launch environment for our AM container")
val env = new HashMap[String, String]()
populateClasspath(args, hadoopConf, sparkConf, env, sparkConf.get(DRIVER_CLASS_PATH))
env("SPARK_YARN_STAGING_DIR") = stagingDirPath.toString
env("SPARK_USER") = UserGroupInformation.getCurrentUser().getShortUserName()
// Pick up any environment variables for the AM provided through spark.yarn.appMasterEnv.*
val amEnvPrefix = "spark.yarn.appMasterEnv."
sparkConf.getAll
.filter { case (k, v) => k.startsWith(amEnvPrefix) }
.map { case (k, v) => (k.substring(amEnvPrefix.length), v) }
.foreach { case (k, v) => YarnSparkHadoopUtil.addPathToEnvironment(env, k, v) }
// If pyFiles contains any .py files, we need to add LOCALIZED_PYTHON_DIR to the PYTHONPATH
// of the container processes too. Add all non-.py files directly to PYTHONPATH.
//
// NOTE: the code currently does not handle .py files defined with a "local:" scheme.
val pythonPath = new ListBuffer[String]()
val (pyFiles, pyArchives) = sparkConf.get(PY_FILES).partition(_.endsWith(".py"))
if (pyFiles.nonEmpty) {
pythonPath += buildPath(Environment.PWD.$$(), LOCALIZED_PYTHON_DIR)
}
(pySparkArchives ++ pyArchives).foreach { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme != Utils.LOCAL_SCHEME) {
pythonPath += buildPath(Environment.PWD.$$(), new Path(uri).getName())
} else {
pythonPath += uri.getPath()
}
}
// Finally, update the Spark config to propagate PYTHONPATH to the AM and executors.
if (pythonPath.nonEmpty) {
val pythonPathList = (sys.env.get("PYTHONPATH") ++ pythonPath)
env("PYTHONPATH") = (env.get("PYTHONPATH") ++ pythonPathList)
.mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
val pythonPathExecutorEnv = (sparkConf.getExecutorEnv.toMap.get("PYTHONPATH") ++
pythonPathList).mkString(ApplicationConstants.CLASS_PATH_SEPARATOR)
sparkConf.setExecutorEnv("PYTHONPATH", pythonPathExecutorEnv)
}
if (isClusterMode) {
// propagate PYSPARK_DRIVER_PYTHON and PYSPARK_PYTHON to driver in cluster mode
Seq("PYSPARK_DRIVER_PYTHON", "PYSPARK_PYTHON").foreach { envname =>
if (!env.contains(envname)) {
sys.env.get(envname).foreach(env(envname) = _)
}
}
sys.env.get("PYTHONHASHSEED").foreach(env.put("PYTHONHASHSEED", _))
}
sys.env.get(ENV_DIST_CLASSPATH).foreach { dcp =>
env(ENV_DIST_CLASSPATH) = dcp
}
env
}
/**
* Set up a ContainerLaunchContext to launch our ApplicationMaster container.
* This sets up the launch environment, java options, and the command for launching the AM.
*/
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
: ContainerLaunchContext = {
logInfo("Setting up container launch context for our AM")
val appId = newAppResponse.getApplicationId
val pySparkArchives =
if (sparkConf.get(IS_PYTHON_APP)) {
findPySparkArchives()
} else {
Nil
}
val launchEnv = setupLaunchEnv(stagingDirPath, pySparkArchives)
val localResources = prepareLocalResources(stagingDirPath, pySparkArchives)
val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
amContainer.setLocalResources(localResources.asJava)
amContainer.setEnvironment(launchEnv.asJava)
val javaOpts = ListBuffer[String]()
// Set the environment variable through a command prefix
// to append to the existing value of the variable
var prefixEnv: Option[String] = None
// Add Xmx for AM memory
javaOpts += "-Xmx" + amMemory + "m"
val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
javaOpts += "-Djava.io.tmpdir=" + tmpDir
// TODO: Remove once cpuset version is pushed out.
// The context is, default gc for server class machines ends up using all cores to do gc -
// hence if there are multiple containers in same node, Spark GC affects all other containers'
// performance (which can be that of other Spark containers)
// Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
// multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
// of cores on a node.
val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
if (useConcurrentAndIncrementalGC) {
// In our expts, using (default) throughput collector has severe perf ramifications in
// multi-tenant machines
javaOpts += "-XX:+UseConcMarkSweepGC"
javaOpts += "-XX:MaxTenuringThreshold=31"
javaOpts += "-XX:SurvivorRatio=8"
javaOpts += "-XX:+CMSIncrementalMode"
javaOpts += "-XX:+CMSIncrementalPacing"
javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
javaOpts += "-XX:CMSIncrementalDutyCycle=10"
}
// Include driver-specific java options if we are launching a driver
if (isClusterMode) {
sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
sys.props.get("spark.driver.libraryPath")).flatten
if (libraryPaths.nonEmpty) {
prefixEnv = Some(createLibraryPathPrefix(libraryPaths.mkString(File.pathSeparator),
sparkConf))
}
if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
}
} else {
// Validate and include yarn am specific java options in yarn-client mode.
sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
if (opts.contains("-Dspark")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
throw new SparkException(msg)
}
if (opts.contains("-Xmx")) {
val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
s"(was '$opts'). Use spark.yarn.am.memory instead."
throw new SparkException(msg)
}
javaOpts ++= Utils.splitCommandString(opts)
.map(Utils.substituteAppId(_, appId.toString))
.map(YarnSparkHadoopUtil.escapeForShell)
}
sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
prefixEnv = Some(createLibraryPathPrefix(paths, sparkConf))
}
}
// For log4j configuration to reference
javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
val userClass =
if (isClusterMode) {
Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
} else {
Nil
}
val userJar =
if (args.userJar != null) {
Seq("--jar", args.userJar)
} else {
Nil
}
val primaryPyFile =
if (isClusterMode && args.primaryPyFile != null) {
Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
} else {
Nil
}
val primaryRFile =
if (args.primaryRFile != null) {
Seq("--primary-r-file", args.primaryRFile)
} else {
Nil
}
val amClass =
if (isClusterMode) {
Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
} else {
Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
}
if (args.primaryRFile != null && args.primaryRFile.endsWith(".R")) {
args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
}
val userArgs = args.userArgs.flatMap { arg =>
Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
}
val amArgs =
Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
Seq("--properties-file",
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) ++
Seq("--dist-cache-conf",
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, DIST_CACHE_CONF_FILE))
// Command for the ApplicationMaster
val commands = prefixEnv ++
Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
javaOpts ++ amArgs ++
Seq(
"1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
"2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
// TODO: it would be nicer to just make sure there are no null commands here
val printableCommands = commands.map(s => if (s == null) "null" else s).toList
amContainer.setCommands(printableCommands.asJava)
logDebug("===============================================================================")
logDebug("YARN AM launch context:")
logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
logDebug(" env:")
if (log.isDebugEnabled) {
Utils.redact(sparkConf, launchEnv.toSeq).foreach { case (k, v) =>
logDebug(s" $k -> $v")
}
}
logDebug(" resources:")
localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
logDebug(" command:")
logDebug(s" ${printableCommands.mkString(" ")}")
logDebug("===============================================================================")
// send the acl settings into YARN to control who has access via YARN interfaces
val securityManager = new SecurityManager(sparkConf)
amContainer.setApplicationACLs(
YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
setupSecurityToken(amContainer)
amContainer
}
/**
* Report the state of an application until it has exited, either successfully or
* due to some failure, then return a pair of the yarn application state (FINISHED, FAILED,
* KILLED, or RUNNING) and the final application state (UNDEFINED, SUCCEEDED, FAILED,
* or KILLED).
*
* @param appId ID of the application to monitor.
* @param returnOnRunning Whether to also return the application state when it is RUNNING.
* @param logApplicationReport Whether to log details of the application report every iteration.
* @param interval How often to poll the YARN RM for application status (in ms).
* @return A pair of the yarn application state and the final application state.
*/
def monitorApplication(
appId: ApplicationId,
returnOnRunning: Boolean = false,
logApplicationReport: Boolean = true,
interval: Long = sparkConf.get(REPORT_INTERVAL)): YarnAppReport = {
var lastState: YarnApplicationState = null
while (true) {
Thread.sleep(interval)
val report: ApplicationReport =
try {
getApplicationReport(appId)
} catch {
case e: ApplicationNotFoundException =>
logError(s"Application $appId not found.")
cleanupStagingDir()
return YarnAppReport(YarnApplicationState.KILLED, FinalApplicationStatus.KILLED, None)
case NonFatal(e) =>
val msg = s"Failed to contact YARN for application $appId."
logError(msg, e)
// Don't necessarily clean up staging dir because status is unknown
return YarnAppReport(YarnApplicationState.FAILED, FinalApplicationStatus.FAILED,
Some(msg))
}
val state = report.getYarnApplicationState
if (logApplicationReport) {
logInfo(s"Application report for $appId (state: $state)")
// If DEBUG is enabled, log report details every iteration
// Otherwise, log them every time the application changes state
if (log.isDebugEnabled) {
logDebug(formatReportDetails(report))
} else if (lastState != state) {
logInfo(formatReportDetails(report))
}
}
if (lastState != state) {
state match {
case YarnApplicationState.RUNNING =>
reportLauncherState(SparkAppHandle.State.RUNNING)
case YarnApplicationState.FINISHED =>
report.getFinalApplicationStatus match {
case FinalApplicationStatus.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case FinalApplicationStatus.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
reportLauncherState(SparkAppHandle.State.FINISHED)
}
case YarnApplicationState.FAILED =>
reportLauncherState(SparkAppHandle.State.FAILED)
case YarnApplicationState.KILLED =>
reportLauncherState(SparkAppHandle.State.KILLED)
case _ =>
}
}
if (state == YarnApplicationState.FINISHED ||
state == YarnApplicationState.FAILED ||
state == YarnApplicationState.KILLED) {
cleanupStagingDir()
return createAppReport(report)
}
if (returnOnRunning && state == YarnApplicationState.RUNNING) {
return createAppReport(report)
}
if (state == YarnApplicationState.ACCEPTED && isClientUnmanagedAMEnabled &&
appMaster == null && report.getAMRMToken != null) {
appMaster = startApplicationMasterService(report)
}
lastState = state
}
// Never reached, but keeps compiler happy
throw new SparkException("While loop is depleted! This should never happen...")
}
private def startApplicationMasterService(report: ApplicationReport): ApplicationMaster = {
// Add AMRMToken to establish connection between RM and AM
val token = report.getAMRMToken
val amRMToken: org.apache.hadoop.security.token.Token[AMRMTokenIdentifier] =
new org.apache.hadoop.security.token.Token[AMRMTokenIdentifier](
token.getIdentifier().array(), token.getPassword().array,
new Text(token.getKind()), new Text(token.getService()))
val currentUGI = UserGroupInformation.getCurrentUser
currentUGI.addToken(amRMToken)
// Start Application Service in a separate thread and continue with application monitoring
val appMaster = new ApplicationMaster(
new ApplicationMasterArguments(Array.empty), sparkConf, hadoopConf)
val amService = new Thread("Unmanaged Application Master Service") {
override def run(): Unit = {
appMaster.runUnmanaged(rpcEnv, report.getCurrentApplicationAttemptId,
stagingDirPath, cachedResourcesConf)
}
}
amService.setDaemon(true)
amService.start()
appMaster
}
private def formatReportDetails(report: ApplicationReport): String = {
val details = Seq[(String, String)](
("client token", getClientToken(report)),
("diagnostics", report.getDiagnostics),
("ApplicationMaster host", report.getHost),
("ApplicationMaster RPC port", report.getRpcPort.toString),
("queue", report.getQueue),
("start time", report.getStartTime.toString),
("final status", report.getFinalApplicationStatus.toString),
("tracking URL", report.getTrackingUrl),
("user", report.getUser)
)
// Use more loggable format if value is null or empty
details.map { case (k, v) =>
val newValue = Option(v).filter(_.nonEmpty).getOrElse("N/A")
s"\\n\\t $k: $newValue"
}.mkString("")
}
/**
* Submit an application to the ResourceManager.
* If set spark.yarn.submit.waitAppCompletion to true, it will stay alive
* reporting the application's status until the application has exited for any reason.
* Otherwise, the client process will exit after submission.
* If the application finishes with a failed, killed, or undefined status,
* throw an appropriate SparkException.
*/
def run(): Unit = {
this.appId = submitApplication()
if (!launcherBackend.isConnected() && fireAndForget) {
val report = getApplicationReport(appId)
val state = report.getYarnApplicationState
logInfo(s"Application report for $appId (state: $state)")
logInfo(formatReportDetails(report))
if (state == YarnApplicationState.FAILED || state == YarnApplicationState.KILLED) {
throw new SparkException(s"Application $appId finished with status: $state")
}
} else {
val YarnAppReport(appState, finalState, diags) = monitorApplication(appId)
if (appState == YarnApplicationState.FAILED || finalState == FinalApplicationStatus.FAILED) {
diags.foreach { err =>
logError(s"Application diagnostics message: $err")
}
throw new SparkException(s"Application $appId finished with failed status")
}
if (appState == YarnApplicationState.KILLED || finalState == FinalApplicationStatus.KILLED) {
throw new SparkException(s"Application $appId is killed")
}
if (finalState == FinalApplicationStatus.UNDEFINED) {
throw new SparkException(s"The final status of application $appId is undefined")
}
}
}
private def findPySparkArchives(): Seq[String] = {
sys.env.get("PYSPARK_ARCHIVES_PATH")
.map(_.split(",").toSeq)
.getOrElse {
val pyLibPath = Seq(sys.env("SPARK_HOME"), "python", "lib").mkString(File.separator)
val pyArchivesFile = new File(pyLibPath, "pyspark.zip")
require(pyArchivesFile.exists(),
s"$pyArchivesFile not found; cannot run pyspark application in YARN mode.")
val py4jFile = new File(pyLibPath, PythonUtils.PY4J_ZIP_NAME)
require(py4jFile.exists(),
s"$py4jFile not found; cannot run pyspark application in YARN mode.")
Seq(pyArchivesFile.getAbsolutePath(), py4jFile.getAbsolutePath())
}
}
}
private object Client extends Logging {
// Alias for the user jar
val APP_JAR_NAME: String = "__app__.jar"
// Staging directory for any temporary jars or files
val SPARK_STAGING: String = ".sparkStaging"
// Staging directory is private! -> rwx--------
val STAGING_DIR_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("700", 8).toShort)
// App files are world-wide readable and owner writable -> rw-r--r--
val APP_FILE_PERMISSION: FsPermission =
FsPermission.createImmutable(Integer.parseInt("644", 8).toShort)
// Distribution-defined classpath to add to processes
val ENV_DIST_CLASSPATH = "SPARK_DIST_CLASSPATH"
// Subdirectory where the user's Spark and Hadoop config files will be placed.
val LOCALIZED_CONF_DIR = "__spark_conf__"
// Subdirectory in the conf directory containing Hadoop config files.
val LOCALIZED_HADOOP_CONF_DIR = "__hadoop_conf__"
// File containing the conf archive in the AM. See prepareLocalResources().
val LOCALIZED_CONF_ARCHIVE = LOCALIZED_CONF_DIR + ".zip"
// Name of the file in the conf archive containing Spark configuration.
val SPARK_CONF_FILE = "__spark_conf__.properties"
// Name of the file in the conf archive containing the distributed cache info.
val DIST_CACHE_CONF_FILE = "__spark_dist_cache__.properties"
// Subdirectory where the user's python files (not archives) will be placed.
val LOCALIZED_PYTHON_DIR = "__pyfiles__"
// Subdirectory where Spark libraries will be placed.
val LOCALIZED_LIB_DIR = "__spark_libs__"
/**
* Return the path to the given application's staging directory.
*/
private def getAppStagingDir(appId: ApplicationId): String = {
buildPath(SPARK_STAGING, appId.toString())
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private[yarn] def populateHadoopClasspath(conf: Configuration, env: HashMap[String, String])
: Unit = {
val classPathElementsToAdd = getYarnAppClasspath(conf) ++ getMRAppClasspath(conf)
classPathElementsToAdd.foreach { c =>
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, c.trim)
}
}
private def getYarnAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH)) match {
case Some(s) => s.toSeq
case None => getDefaultYarnApplicationClasspath
}
private def getMRAppClasspath(conf: Configuration): Seq[String] =
Option(conf.getStrings("mapreduce.application.classpath")) match {
case Some(s) => s.toSeq
case None => getDefaultMRApplicationClasspath
}
private[yarn] def getDefaultYarnApplicationClasspath: Seq[String] =
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH.toSeq
private[yarn] def getDefaultMRApplicationClasspath: Seq[String] =
StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH).toSeq
/**
* Populate the classpath entry in the given environment map.
*
* User jars are generally not added to the JVM's system classpath; those are handled by the AM
* and executor backend. When the deprecated `spark.yarn.user.classpath.first` is used, user jars
* are included in the system classpath, though. The extra class path and other uploaded files are
* always made available through the system class path.
*
* @param args Client arguments (when starting the AM) or null (when starting executors).
*/
private[yarn] def populateClasspath(
args: ClientArguments,
conf: Configuration,
sparkConf: SparkConf,
env: HashMap[String, String],
extraClassPath: Option[String] = None): Unit = {
extraClassPath.foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
addClasspathEntry(Environment.PWD.$$(), env)
addClasspathEntry(Environment.PWD.$$() + Path.SEPARATOR + LOCALIZED_CONF_DIR, env)
if (sparkConf.get(USER_CLASS_PATH_FIRST)) {
// in order to properly add the app jar when user classpath is first
// we have to do the mainJar separate in order to send the right thing
// into addFileToClasspath
val mainJar =
if (args != null) {
getMainJarUri(Option(args.userJar))
} else {
getMainJarUri(sparkConf.get(APP_JAR))
}
mainJar.foreach(addFileToClasspath(sparkConf, conf, _, APP_JAR_NAME, env))
val secondaryJars =
if (args != null) {
getSecondaryJarUris(Option(sparkConf.get(JARS_TO_DISTRIBUTE)))
} else {
getSecondaryJarUris(sparkConf.get(SECONDARY_JARS))
}
secondaryJars.foreach { x =>
addFileToClasspath(sparkConf, conf, x, null, env)
}
}
// Add the Spark jars to the classpath, depending on how they were distributed.
addClasspathEntry(buildPath(Environment.PWD.$$(), LOCALIZED_LIB_DIR, "*"), env)
if (sparkConf.get(SPARK_ARCHIVE).isEmpty) {
sparkConf.get(SPARK_JARS).foreach { jars =>
jars.filter(Utils.isLocalUri).foreach { jar =>
val uri = new URI(jar)
addClasspathEntry(getClusterPath(sparkConf, uri.getPath()), env)
}
}
}
populateHadoopClasspath(conf, env)
sys.env.get(ENV_DIST_CLASSPATH).foreach { cp =>
addClasspathEntry(getClusterPath(sparkConf, cp), env)
}
// Add the localized Hadoop config at the end of the classpath, in case it contains other
// files (such as configuration files for different services) that are not part of the
// YARN cluster's config.
addClasspathEntry(
buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, LOCALIZED_HADOOP_CONF_DIR), env)
}
/**
* Returns a list of URIs representing the user classpath.
*
* @param conf Spark configuration.
*/
def getUserClasspath(conf: SparkConf): Array[URI] = {
val mainUri = getMainJarUri(conf.get(APP_JAR))
val secondaryUris = getSecondaryJarUris(conf.get(SECONDARY_JARS))
(mainUri ++ secondaryUris).toArray
}
private def getMainJarUri(mainJar: Option[String]): Option[URI] = {
mainJar.flatMap { path =>
val uri = Utils.resolveURI(path)
if (uri.getScheme == Utils.LOCAL_SCHEME) Some(uri) else None
}.orElse(Some(new URI(APP_JAR_NAME)))
}
private def getSecondaryJarUris(secondaryJars: Option[Seq[String]]): Seq[URI] = {
secondaryJars.getOrElse(Nil).map(new URI(_))
}
/**
* Adds the given path to the classpath, handling "local:" URIs correctly.
*
* If an alternate name for the file is given, and it's not a "local:" file, the alternate
* name will be added to the classpath (relative to the job's work directory).
*
* If not a "local:" file and no alternate name, the linkName will be added to the classpath.
*
* @param conf Spark configuration.
* @param hadoopConf Hadoop configuration.
* @param uri URI to add to classpath (optional).
* @param fileName Alternate name for the file (optional).
* @param env Map holding the environment variables.
*/
private def addFileToClasspath(
conf: SparkConf,
hadoopConf: Configuration,
uri: URI,
fileName: String,
env: HashMap[String, String]): Unit = {
if (uri != null && uri.getScheme == Utils.LOCAL_SCHEME) {
addClasspathEntry(getClusterPath(conf, uri.getPath), env)
} else if (fileName != null) {
addClasspathEntry(buildPath(Environment.PWD.$$(), fileName), env)
} else if (uri != null) {
val localPath = getQualifiedLocalPath(uri, hadoopConf)
val linkName = Option(uri.getFragment()).getOrElse(localPath.getName())
addClasspathEntry(buildPath(Environment.PWD.$$(), linkName), env)
}
}
/**
* Add the given path to the classpath entry of the given environment map.
* If the classpath is already set, this appends the new path to the existing classpath.
*/
private def addClasspathEntry(path: String, env: HashMap[String, String]): Unit =
YarnSparkHadoopUtil.addPathToEnvironment(env, Environment.CLASSPATH.name, path)
/**
* Returns the path to be sent to the NM for a path that is valid on the gateway.
*
* This method uses two configuration values:
*
* - spark.yarn.config.gatewayPath: a string that identifies a portion of the input path that may
* only be valid in the gateway node.
* - spark.yarn.config.replacementPath: a string with which to replace the gateway path. This may
* contain, for example, env variable references, which will be expanded by the NMs when
* starting containers.
*
* If either config is not available, the input path is returned.
*/
def getClusterPath(conf: SparkConf, path: String): String = {
val localPath = conf.get(GATEWAY_ROOT_PATH)
val clusterPath = conf.get(REPLACEMENT_ROOT_PATH)
if (localPath != null && clusterPath != null) {
path.replace(localPath, clusterPath)
} else {
path
}
}
/**
* Return whether two URI represent file system are the same
*/
private[spark] def compareUri(srcUri: URI, dstUri: URI): Boolean = {
if (srcUri.getScheme() == null || srcUri.getScheme() != dstUri.getScheme()) {
return false
}
val srcAuthority = srcUri.getAuthority()
val dstAuthority = dstUri.getAuthority()
if (srcAuthority != null && !srcAuthority.equalsIgnoreCase(dstAuthority)) {
return false
}
var srcHost = srcUri.getHost()
var dstHost = dstUri.getHost()
// In HA or when using viewfs, the host part of the URI may not actually be a host, but the
// name of the HDFS namespace. Those names won't resolve, so avoid even trying if they
// match.
if (srcHost != null && dstHost != null && srcHost != dstHost) {
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName()
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName()
} catch {
case e: UnknownHostException =>
return false
}
}
Objects.equal(srcHost, dstHost) && srcUri.getPort() == dstUri.getPort()
}
/**
* Return whether the two file systems are the same.
*/
protected def compareFs(srcFs: FileSystem, destFs: FileSystem): Boolean = {
val srcUri = srcFs.getUri()
val dstUri = destFs.getUri()
compareUri(srcUri, dstUri)
}
/**
* Given a local URI, resolve it and return a qualified local path that corresponds to the URI.
* This is used for preparing local resources to be included in the container launch context.
*/
private def getQualifiedLocalPath(localURI: URI, hadoopConf: Configuration): Path = {
val qualifiedURI =
if (localURI.getScheme == null) {
// If not specified, assume this is in the local filesystem to keep the behavior
// consistent with that of Hadoop
new URI(FileSystem.getLocal(hadoopConf).makeQualified(new Path(localURI)).toString)
} else {
localURI
}
new Path(qualifiedURI)
}
/**
* Whether to consider jars provided by the user to have precedence over the Spark jars when
* loading user classes.
*/
def isUserClassPathFirst(conf: SparkConf, isDriver: Boolean): Boolean = {
if (isDriver) {
conf.get(DRIVER_USER_CLASS_PATH_FIRST)
} else {
conf.get(EXECUTOR_USER_CLASS_PATH_FIRST)
}
}
/**
* Joins all the path components using Path.SEPARATOR.
*/
def buildPath(components: String*): String = {
components.mkString(Path.SEPARATOR)
}
def createAppReport(report: ApplicationReport): YarnAppReport = {
val diags = report.getDiagnostics()
val diagsOpt = if (diags != null && diags.nonEmpty) Some(diags) else None
YarnAppReport(report.getYarnApplicationState(), report.getFinalApplicationStatus(), diagsOpt)
}
/**
* Create a properly quoted and escaped library path string to be added as a prefix to the command
* executed by YARN. This is different from normal quoting / escaping due to YARN executing the
* command through "bash -c".
*/
def createLibraryPathPrefix(libpath: String, conf: SparkConf): String = {
val cmdPrefix = if (Utils.isWindows) {
Utils.libraryPathEnvPrefix(Seq(libpath))
} else {
val envName = Utils.libraryPathEnvName
// For quotes, escape both the quote and the escape character when encoding in the command
// string.
val quoted = libpath.replace("\\"", "\\\\\\\\\\\\\\"")
envName + "=\\\\\\"" + quoted + File.pathSeparator + "$" + envName + "\\\\\\""
}
getClusterPath(conf, cmdPrefix)
}
def confToProperties(conf: SparkConf): Properties = {
val props = new Properties()
conf.getAll.foreach { case (k, v) =>
props.setProperty(k, v)
}
props
}
def writePropertiesToArchive(props: Properties, name: String, out: ZipOutputStream): Unit = {
out.putNextEntry(new ZipEntry(name))
val writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)
props.store(writer, "Spark configuration.")
writer.flush()
out.closeEntry()
}
}
private[spark] class YarnClusterApplication extends SparkApplication {
override def start(args: Array[String], conf: SparkConf): Unit = {
// SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
// so remove them from sparkConf here for yarn mode.
conf.remove(JARS)
conf.remove(FILES)
new Client(new ClientArguments(args), conf, null).run()
}
}
private[spark] case class YarnAppReport(
appState: YarnApplicationState,
finalState: FinalApplicationStatus,
diagnostics: Option[String])
| pgandhi999/spark | resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala | Scala | apache-2.0 | 65,444 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.scalar.lang
import org.apache.ignite._
import org.apache.ignite.internal.util.lang.GridClosure3X
/**
* Peer deploy aware adapter for Java's `GridClosure3X`.
*/
class ScalarClosure3X[E1, E2, E3, R](private val f: (E1, E2, E3) => R) extends GridClosure3X[E1, E2, E3, R] {
assert(f != null)
/**
* Delegates to passed in function.
*/
@throws(classOf[IgniteCheckedException])
def applyx(e1: E1, e2: E2, e3: E3): R = {
f(e1, e2, e3)
}
}
| vldpyatkov/ignite | modules/scalar/src/main/scala/org/apache/ignite/scalar/lang/ScalarClosure3X.scala | Scala | apache-2.0 | 1,304 |
package com.unrlab.dockerscala.client
import com.unrlab.dockerscala.config.{ConfigLoader, Loggable}
class DockerContainer {
private var image: String = _
var name: String = _
private var ports: Seq[PortMapping] = Seq()
private var envs: Seq[EnvVariable] = Seq()
def withImage(image: String): DockerContainer = {
this.image = image
this
}
def withPortMapping(ports: Seq[PortMapping]): DockerContainer = {
this.ports = ports
this
}
def withName(name: String): DockerContainer = {
this.name = name
this
}
def withEnv(envs: Seq[EnvVariable]): DockerContainer = {
this.envs = envs
this
}
def build(): Container = {
Container(None, image, ports, Some(name), envs)
}
}
case class PortMapping(internal: Int, external: Int)
case class EnvVariable(name: String, value: String)
object Docker extends ConfigLoader with OkHttp with Loggable {
def run(container: DockerContainer): HttpResponse = {
val buildedContainer: Container = container.build()
val uri: String = s"$buildUrl/containers/create?name=${buildedContainer.name.getOrElse("")}"
find(container.name) match {
case true =>
stop(container.name)
remove(container.name)
case false =>
}
val response = post(uri, Some(buildData(buildedContainer)))
response.statusCode match {
case 201 =>
val id = response.body.split(",").head.split(":").last
val startResult = post(s"$buildUrl/containers/${container.name}/start")
startResult.statusCode match {
case 204 =>
startResult
case 304 =>
logInfo(s"[DOCKER][START][ALREADY_STARTED] Container ${container.name} is already started")
startResult
case 404 =>
logError(s"[DOCKER][START][NO_SUCH_CONTAINER] Unable to start container : ${container.name}. Message : ${response.body}")
startResult
case _ =>
logError(s"[DOCKER][START][SERVER_ERROR] Unable to start container : ${container.name}. Message : ${response.body}")
startResult
}
case 400 =>
logError(s"[DOCKER][CREATE][BAD_REQUEST] Unable to create container : ${container.name}. Message : ${response.body}")
response
case 404 =>
logError(s"[DOCKER][CREATE][NO_SUCH_CONTAINER] Unable to create container : ${container.name}. Message : ${response.body}")
response
case 406 =>
logError(s"[DOCKER][CREATE][ATTACH] Unable to create container : ${container.name}. Message : ${response.body}")
response
case 409 =>
logError(s"[DOCKER][CREATE][CONFLICT] Unable to create container : ${container.name}. Message : ${response.body}")
response
case _ =>
logError(s"[DOCKER][CREATE][SERVER_ERROR] Unable to create container : ${container.name}. Message : ${response.body}")
response
}
}
def stop(containerName: String): Boolean = {
val uri = s"$buildUrl/containers/$containerName/stop"
val response = post(uri)
response.statusCode match {
case 204 =>
true
case 304 =>
true
case 404 =>
logError(s"[DOCKER][STOP][NO_SUCH_CONTAINER] Unable to stop container : $containerName. Message : ${response.body}")
false
case _ =>
logError(s"[DOCKER][STOP][SERVER_ERROR] Unable to stop container : $containerName. Message : ${response.body}")
false
}
}
def remove(containerName: String) = {
val uri = s"$buildUrl/containers/$containerName"
val response = delete(uri)
response.statusCode match {
case 204 =>
true
case 400 =>
logError(s"[DOCKER][REMOVE][BAD_REQUEST] Unable to remove container : $containerName. Message : ${response.body}")
false
case 404 =>
logError(s"[DOCKER][REMOVE][NO_SUCH_CONTAINER] Unable to remove container : $containerName. Message : ${response.body}")
false
case _ =>
logError(s"[DOCKER][REMOVE][SERVER_ERROR] Unable to remove container : $containerName. Message : ${response.body}")
false
}
}
def find(containerName: String): Boolean = {
val uri = s"""$buildUrl/containers/json?all=1&filters={%22name%22:[%22$containerName%22]}"""
val response = get(uri)
response.statusCode match {
case 200 =>
response.body.contains(containerName) match {
case false =>
false
case true =>
true
}
case 400 =>
logError(s"[DOCKER][FIND][BAD_REQUEST] Unable to find container : $containerName. Message : ${response.body}")
false
case _ =>
logError(s"[DOCKER][FIND][SERVER_ERROR] Unable to find container : $containerName. Message : ${response.body}")
false
}
}
private val buildUrl: String = s"${dockerConfig.host}/v${dockerConfig.version}"
private def buildData(container: Container): String = {
val envs: String = container.env.map{ e =>
s""""${e.name}=${e.value}""""
}.mkString(",")
val ports: String = container.port.map{ p =>
s""" "${p.internal}/tcp":[{"HostPort":"${p.external}"}] """
}.mkString("")
s"""
|{
| "Env":[
| $envs
| ],
| "Image":"${container.image}",
| "HostConfig":{
| "PortBindings":{
| $ports
| }
| }
|}
""".stripMargin
}
}
case class Container(id: Option[String] = None, image: String, port: Seq[PortMapping], name: Option[String], env: Seq[EnvVariable])
| jguido/docker-scala | src/main/scala/com/unrlab/dockerscala/client/Dockerclient.scala | Scala | gpl-3.0 | 5,594 |
/** Copyright 2014 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.data.examples
import io.prediction.data.storage.Event
import io.prediction.data.storage.StorageClientConfig
import io.prediction.data.storage.hbase.HBLEvents
import io.prediction.data.storage.hbase.StorageClient
import scala.concurrent.ExecutionContext.Implicits.global
object HBLEventsTest {
def main(args: Array[String]) {
val appId = args(0).toInt
val eventDb = new HBLEvents(
new StorageClient(new StorageClientConfig(Seq(), Seq(), false)).client,
"predictionio_eventdata")
eventDb.aggregateProperties(appId, "user").right.get.foreach(println(_))
}
}
| TheDataShed/PredictionIO | data/src/main/scala/examples/HBLEventsTest.scala | Scala | apache-2.0 | 1,222 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.feature
import java.lang
import java.util.Date
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class AvroSimpleFeatureUtilsTest extends Specification {
"AvroSimpleFeatureUtils" should {
"encode and decode lists" >> {
"that are null" >> {
val encoded = AvroSimpleFeatureUtils.encodeList(null, classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded must beNull
}
"that are empty" >> {
val orig = List.empty[Any]
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"of booleans" >> {
"in scala" >> {
val orig = List(true, false, false, true)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Boolean])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(lang.Boolean.TRUE, lang.Boolean.TRUE, lang.Boolean.FALSE, lang.Boolean.FALSE)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[java.lang.Boolean])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of ints" >> {
"in scala" >> {
val orig = List(1, 2, 3, 4)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Int])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(Integer.valueOf(1), Integer.valueOf(2), Integer.valueOf(3), Integer.valueOf(4))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[java.lang.Integer])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of longs" >> {
"in scala" >> {
val orig = List(1L, 2L, 3L, 4L)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Long])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(lang.Long.valueOf(1), lang.Long.valueOf(2),
lang.Long.valueOf(3), lang.Long.valueOf(4))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[java.lang.Long])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of strings" >> {
"in scala" >> {
val orig = List("1", "two", "three", "four")
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(new lang.String("1"), new lang.String("two"),
new lang.String("three"), new lang.String("four"))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"that are empty" >> {
val orig = List("", "", "non-empty", "")
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of doubles" >> {
"in scala" >> {
val orig = List(1.0, 2.0, 3.0, 4.0)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Double])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(lang.Double.valueOf(1.0), lang.Double.valueOf(2.0),
lang.Double.valueOf(3.0), lang.Double.valueOf(4.0))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[java.lang.Double])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of floats" >> {
"in scala" >> {
val orig = List(1.0f, 2.0f, 3.0f, 4.0f)
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Float])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = List(lang.Float.valueOf(1.0f), lang.Float.valueOf(2.0f),
lang.Float.valueOf(3.0f), lang.Float.valueOf(4.0f))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[java.lang.Float])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"of dates" >> {
val orig = List(new Date(0), new Date(), new Date(99999))
val encoded = AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Date])
val decoded = AvroSimpleFeatureUtils.decodeList(encoded)
decoded.asScala mustEqual orig
}
}
"encode and decode maps" >> {
"that are null" >> {
val encoded = AvroSimpleFeatureUtils.encodeMap(null, classOf[String], classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded must beNull
}
"that are empty" >> {
val orig = Map.empty[Any, Any]
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[String], classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"of booleans" >> {
"in scala" >> {
val orig = Map(true -> true, false -> true, false -> false, true -> false)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Boolean], classOf[Boolean])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
lang.Boolean.TRUE -> lang.Boolean.TRUE,
lang.Boolean.TRUE -> lang.Boolean.FALSE,
lang.Boolean.FALSE -> lang.Boolean.TRUE,
lang.Boolean.FALSE -> lang.Boolean.FALSE
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[java.lang.Boolean], classOf[java.lang.Boolean])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of ints" >> {
"in scala" >> {
val orig = Map(1 -> 2, 2 -> 3, 3 -> 4, 4 -> 1)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Int], classOf[Int])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
Integer.valueOf(1) -> Integer.valueOf(4),
Integer.valueOf(2) -> Integer.valueOf(3),
Integer.valueOf(3) -> Integer.valueOf(1),
Integer.valueOf(4) -> Integer.valueOf(2)
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Integer], classOf[Integer])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of longs" >> {
"in scala" >> {
val orig = Map(1L -> 2L, 2L -> 4L, 3L -> 3L, 4L -> 1L)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Long], classOf[Long])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
lang.Long.valueOf(1) -> lang.Long.valueOf(1),
lang.Long.valueOf(2) -> lang.Long.valueOf(2),
lang.Long.valueOf(3) -> lang.Long.valueOf(3),
lang.Long.valueOf(4) -> lang.Long.valueOf(4)
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[java.lang.Long], classOf[java.lang.Long])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of strings" >> {
"in scala" >> {
val orig = Map("1" -> "one", "two" -> "2", "three" -> "3", "four" -> "4")
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[String], classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
new lang.String("1") -> new lang.String("one"),
new lang.String("two") -> new lang.String("2"),
new lang.String("three") -> new lang.String("3"),
new lang.String("four") -> new lang.String("4")
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[java.lang.String], classOf[java.lang.String])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"that are empty" >> {
val orig = Map("" -> "", "two" -> "")
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[String], classOf[String])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of doubles" >> {
"in scala" >> {
val orig = Map(1.0 -> 2.0, 2.0 -> 3.0, 3.0 -> 4.0, 4.0 -> 1.0)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Double], classOf[Double])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
lang.Double.valueOf(1.0) -> lang.Double.valueOf(1.0),
lang.Double.valueOf(2.0) -> lang.Double.valueOf(4.0),
lang.Double.valueOf(3.0) -> lang.Double.valueOf(3.0),
lang.Double.valueOf(4.0) -> lang.Double.valueOf(2.0)
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[java.lang.Double], classOf[java.lang.Double])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of floats" >> {
"in scala" >> {
val orig = Map(1.0f -> 1.1f, 2.0f -> 2.1f, 3.0f -> 3.1f, 4.0f -> 4.1f)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Float], classOf[Float])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"in java" >> {
val orig = Map(
lang.Float.valueOf(1.0f) -> lang.Float.valueOf(0.9f),
lang.Float.valueOf(2.0f) -> lang.Float.valueOf(1.9f),
lang.Float.valueOf(3.0f) -> lang.Float.valueOf(2.9f),
lang.Float.valueOf(4.0f) -> lang.Float.valueOf(3.9f)
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[java.lang.Float], classOf[java.lang.Float])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"of dates" >> {
val orig = Map(
new Date(0) -> new Date(),
new Date() -> new Date(99999),
new Date(99999) -> new Date(0)
)
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[Date], classOf[Date])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
"of mixed keys and values" >> {
val orig = Map("key1" -> new Date(0), "key2" -> new Date(), "key3" -> new Date(99999))
val encoded = AvroSimpleFeatureUtils.encodeMap(orig.asJava, classOf[String], classOf[Date])
val decoded = AvroSimpleFeatureUtils.decodeMap(encoded)
decoded.asScala mustEqual orig
}
}
"fail with mixed classes" >> {
"in lists" >> {
val orig = List(1, "2")
AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[String]) must throwAn[ClassCastException]
}
"in map keys" >> {
val orig = Map("key1" -> 1, 1.0 -> 2)
AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[String], classOf[Int]) must throwAn[ClassCastException]
}
"in map values" >> {
val orig = Map("key1" -> 1, "key2" -> "value2")
AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[String], classOf[Int]) must throwAn[ClassCastException]
}
}
"fail-fast with non-primitive items" >> {
"in lists" >> {
val orig = List(new Object(), new Object())
AvroSimpleFeatureUtils.encodeList(orig.asJava, classOf[Object]) must throwAn[IllegalArgumentException]
}
"in map keys" >> {
val orig = Map(new Object() -> 1, new Object() -> 2)
AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[Object], classOf[Int]) must throwAn[IllegalArgumentException]
}
"in map values" >> {
val orig = Map(2 -> new Object(), 1 -> new Object())
AvroSimpleFeatureUtils.encodeMap(orig.asJava,
classOf[Int], classOf[Object]) must throwAn[IllegalArgumentException]
}
}
}
}
| mmatz-ccri/geomesa | geomesa-feature/src/test/scala/org/locationtech/geomesa/feature/AvroSimpleFeatureUtilsTest.scala | Scala | apache-2.0 | 14,425 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network
import java.io.Closeable
import java.nio.ByteBuffer
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration.Duration
import scala.reflect.ClassTag
import org.apache.spark.internal.Logging
import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer}
import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient}
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.storage.{BlockId, StorageLevel}
import org.apache.spark.util.ThreadUtils
private[spark]
abstract class BlockTransferService extends ShuffleClient with Closeable with Logging {
/**
* Initialize the transfer service by giving it the BlockDataManager that can be used to fetch
* local blocks or put local blocks.
*/
def init(blockDataManager: BlockDataManager): Unit
/**
* Tear down the transfer service.
*/
def close(): Unit
/**
* Port number the service is listening on, available only after [[init]] is invoked.
*/
def port: Int
/**
* Host name the service is listening on, available only after [[init]] is invoked.
*/
def hostName: String
/**
* Fetch a sequence of blocks from a remote node asynchronously,
* available only after [[init]] is invoked.
*
* Note that this API takes a sequence so the implementation can batch requests, and does not
* return a future so the underlying implementation can invoke onBlockFetchSuccess as soon as
* the data of a block is fetched, rather than waiting for all blocks to be fetched.
*/
override def fetchBlocks(
host: String,
port: Int,
execId: String,
blockIds: Array[String],
listener: BlockFetchingListener): Unit
/**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*/
def uploadBlock(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Future[Unit]
def mapOutputReady(
hostname: String,
port: Int,
shuffleId: Int,
mapId: Int,
numReduces: Int,
mapStatus: MapStatus): Future[Unit]
/**
* A special case of [[fetchBlocks]], as it fetches only one block and is blocking.
*
* It is also only available after [[init]] is invoked.
*/
def fetchBlockSync(host: String, port: Int, execId: String, blockId: String): ManagedBuffer = {
// A monitor for the thread to wait on.
val result = Promise[ManagedBuffer]()
fetchBlocks(host, port, execId, Array(blockId),
new BlockFetchingListener {
override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = {
result.failure(exception)
}
override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = {
val ret = ByteBuffer.allocate(data.size.toInt)
ret.put(data.nioByteBuffer())
ret.flip()
result.success(new NioManagedBuffer(ret))
}
})
ThreadUtils.awaitResult(result.future, Duration.Inf)
}
/**
* Upload a single block to a remote node, available only after [[init]] is invoked.
*
* This method is similar to [[uploadBlock]], except this one blocks the thread
* until the upload finishes.
*/
def uploadBlockSync(
hostname: String,
port: Int,
execId: String,
blockId: BlockId,
blockData: ManagedBuffer,
level: StorageLevel,
classTag: ClassTag[_]): Unit = {
val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag)
ThreadUtils.awaitResult(future, Duration.Inf)
}
}
| likithkailas/StreamingSystems | core/src/main/scala/org/apache/spark/network/BlockTransferService.scala | Scala | apache-2.0 | 4,484 |
object Outer0 {
object Inner {
class Bar(x: Int):
def this() = this(0)
}
export Inner.Bar
val _ = Bar()
val _ = Bar(2)
}
object Outer2 {
object Inner {
class Bar(x: Int):
def this() = this(0)
}
object test2:
export Inner._
val x = Bar()
val y = Bar(2)
object test3:
export Inner.Bar
def Bar: () => String = () => ""
val x = Bar()
}
object Outer3 {
export Outer0._
private val x = Bar()
private val y = Bar(2)
}
object Outer4 {
object Inner {
class Bar(x: Int):
def this() = this(0)
object Bar
}
export Inner._
val _ = Bar()
val _ = Bar(2)
}
| dotty-staging/dotty | tests/pos/i12299.scala | Scala | apache-2.0 | 649 |
/*
* GNU GENERAL PUBLIC LICENSE
* Version 2, June 1991
*
* Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
* Everyone is permitted to copy and distribute verbatim copies
* of this license document, but changing it is not allowed.
*
* Preamble
*
* The licenses for most software are designed to take away your
* freedom to share and change it. By contrast, the GNU General Public
* License is intended to guarantee your freedom to share and change free
* software--to make sure the software is free for all its users. This
* General Public License applies to most of the Free Software
* Foundation's software and to any other program whose authors commit to
* using it. (Some other Free Software Foundation software is covered by
* the GNU Lesser General Public License instead.) You can apply it to
* your programs, too.
*
* When we speak of free software, we are referring to freedom, not
* price. Our General Public Licenses are designed to make sure that you
* have the freedom to distribute copies of free software (and charge for
* this service if you wish), that you receive source code or can get it
* if you want it, that you can change the software or use pieces of it
* in new free programs; and that you know you can do these things.
*
* To protect your rights, we need to make restrictions that forbid
* anyone to deny you these rights or to ask you to surrender the rights.
* These restrictions translate to certain responsibilities for you if you
* distribute copies of the software, or if you modify it.
*
* For example, if you distribute copies of such a program, whether
* gratis or for a fee, you must give the recipients all the rights that
* you have. You must make sure that they, too, receive or can get the
* source code. And you must show them these terms so they know their
* rights.
*
* We protect your rights with two steps: (1) copyright the software, and
* (2) offer you this license which gives you legal permission to copy,
* distribute and/or modify the software.
*
* Also, for each author's protection and ours, we want to make certain
* that everyone understands that there is no warranty for this free
* software. If the software is modified by someone else and passed on, we
* want its recipients to know that what they have is not the original, so
* that any problems introduced by others will not reflect on the original
* authors' reputations.
*
* Finally, any free program is threatened constantly by software
* patents. We wish to avoid the danger that redistributors of a free
* program will individually obtain patent licenses, in effect making the
* program proprietary. To prevent this, we have made it clear that any
* patent must be licensed for everyone's free use or not licensed at all.
*
* The precise terms and conditions for copying, distribution and
* modification follow.
*
* GNU GENERAL PUBLIC LICENSE
* TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
*
* 0. This License applies to any program or other work which contains
* a notice placed by the copyright holder saying it may be distributed
* under the terms of this General Public License. The "Program", below,
* refers to any such program or work, and a "work based on the Program"
* means either the Program or any derivative work under copyright law:
* that is to say, a work containing the Program or a portion of it,
* either verbatim or with modifications and/or translated into another
* language. (Hereinafter, translation is included without limitation in
* the term "modification".) Each licensee is addressed as "you".
*
* Activities other than copying, distribution and modification are not
* covered by this License; they are outside its scope. The act of
* running the Program is not restricted, and the output from the Program
* is covered only if its contents constitute a work based on the
* Program (independent of having been made by running the Program).
* Whether that is true depends on what the Program does.
*
* 1. You may copy and distribute verbatim copies of the Program's
* source code as you receive it, in any medium, provided that you
* conspicuously and appropriately publish on each copy an appropriate
* copyright notice and disclaimer of warranty; keep intact all the
* notices that refer to this License and to the absence of any warranty;
* and give any other recipients of the Program a copy of this License
* along with the Program.
*
* You may charge a fee for the physical act of transferring a copy, and
* you may at your option offer warranty protection in exchange for a fee.
*
* 2. You may modify your copy or copies of the Program or any portion
* of it, thus forming a work based on the Program, and copy and
* distribute such modifications or work under the terms of Section 1
* above, provided that you also meet all of these conditions:
*
* a) You must cause the modified files to carry prominent notices
* stating that you changed the files and the date of any change.
*
* b) You must cause any work that you distribute or publish, that in
* whole or in part contains or is derived from the Program or any
* part thereof, to be licensed as a whole at no charge to all third
* parties under the terms of this License.
*
* c) If the modified program normally reads commands interactively
* when run, you must cause it, when started running for such
* interactive use in the most ordinary way, to print or display an
* announcement including an appropriate copyright notice and a
* notice that there is no warranty (or else, saying that you provide
* a warranty) and that users may redistribute the program under
* these conditions, and telling the user how to view a copy of this
* License. (Exception: if the Program itself is interactive but
* does not normally print such an announcement, your work based on
* the Program is not required to print an announcement.)
*
* These requirements apply to the modified work as a whole. If
* identifiable sections of that work are not derived from the Program,
* and can be reasonably considered independent and separate works in
* themselves, then this License, and its terms, do not apply to those
* sections when you distribute them as separate works. But when you
* distribute the same sections as part of a whole which is a work based
* on the Program, the distribution of the whole must be on the terms of
* this License, whose permissions for other licensees extend to the
* entire whole, and thus to each and every part regardless of who wrote it.
*
* Thus, it is not the intent of this section to claim rights or contest
* your rights to work written entirely by you; rather, the intent is to
* exercise the right to control the distribution of derivative or
* collective works based on the Program.
*
* In addition, mere aggregation of another work not based on the Program
* with the Program (or with a work based on the Program) on a volume of
* a storage or distribution medium does not bring the other work under
* the scope of this License.
*
* 3. You may copy and distribute the Program (or a work based on it,
* under Section 2) in object code or executable form under the terms of
* Sections 1 and 2 above provided that you also do one of the following:
*
* a) Accompany it with the complete corresponding machine-readable
* source code, which must be distributed under the terms of Sections
* 1 and 2 above on a medium customarily used for software interchange; or,
*
* b) Accompany it with a written offer, valid for at least three
* years, to give any third party, for a charge no more than your
* cost of physically performing source distribution, a complete
* machine-readable copy of the corresponding source code, to be
* distributed under the terms of Sections 1 and 2 above on a medium
* customarily used for software interchange; or,
*
* c) Accompany it with the information you received as to the offer
* to distribute corresponding source code. (This alternative is
* allowed only for noncommercial distribution and only if you
* received the program in object code or executable form with such
* an offer, in accord with Subsection b above.)
*
* The source code for a work means the preferred form of the work for
* making modifications to it. For an executable work, complete source
* code means all the source code for all modules it contains, plus any
* associated interface definition files, plus the scripts used to
* control compilation and installation of the executable. However, as a
* special exception, the source code distributed need not include
* anything that is normally distributed (in either source or binary
* form) with the major components (compiler, kernel, and so on) of the
* operating system on which the executable runs, unless that component
* itself accompanies the executable.
*
* If distribution of executable or object code is made by offering
* access to copy from a designated place, then offering equivalent
* access to copy the source code from the same place counts as
* distribution of the source code, even though third parties are not
* compelled to copy the source along with the object code.
*
* 4. You may not copy, modify, sublicense, or distribute the Program
* except as expressly provided under this License. Any attempt
* otherwise to copy, modify, sublicense or distribute the Program is
* void, and will automatically terminate your rights under this License.
* However, parties who have received copies, or rights, from you under
* this License will not have their licenses terminated so long as such
* parties remain in full compliance.
*
* 5. You are not required to accept this License, since you have not
* signed it. However, nothing else grants you permission to modify or
* distribute the Program or its derivative works. These actions are
* prohibited by law if you do not accept this License. Therefore, by
* modifying or distributing the Program (or any work based on the
* Program), you indicate your acceptance of this License to do so, and
* all its terms and conditions for copying, distributing or modifying
* the Program or works based on it.
*
* 6. Each time you redistribute the Program (or any work based on the
* Program), the recipient automatically receives a license from the
* original licensor to copy, distribute or modify the Program subject to
* these terms and conditions. You may not impose any further
* restrictions on the recipients' exercise of the rights granted herein.
* You are not responsible for enforcing compliance by third parties to
* this License.
*
* 7. If, as a consequence of a court judgment or allegation of patent
* infringement or for any other reason (not limited to patent issues),
* conditions are imposed on you (whether by court order, agreement or
* otherwise) that contradict the conditions of this License, they do not
* excuse you from the conditions of this License. If you cannot
* distribute so as to satisfy simultaneously your obligations under this
* License and any other pertinent obligations, then as a consequence you
* may not distribute the Program at all. For example, if a patent
* license would not permit royalty-free redistribution of the Program by
* all those who receive copies directly or indirectly through you, then
* the only way you could satisfy both it and this License would be to
* refrain entirely from distribution of the Program.
*
* If any portion of this section is held invalid or unenforceable under
* any particular circumstance, the balance of the section is intended to
* apply and the section as a whole is intended to apply in other
* circumstances.
*
* It is not the purpose of this section to induce you to infringe any
* patents or other property right claims or to contest validity of any
* such claims; this section has the sole purpose of protecting the
* integrity of the free software distribution system, which is
* implemented by public license practices. Many people have made
* generous contributions to the wide range of software distributed
* through that system in reliance on consistent application of that
* system; it is up to the author/donor to decide if he or she is willing
* to distribute software through any other system and a licensee cannot
* impose that choice.
*
* This section is intended to make thoroughly clear what is believed to
* be a consequence of the rest of this License.
*
* 8. If the distribution and/or use of the Program is restricted in
* certain countries either by patents or by copyrighted interfaces, the
* original copyright holder who places the Program under this License
* may add an explicit geographical distribution limitation excluding
* those countries, so that distribution is permitted only in or among
* countries not thus excluded. In such case, this License incorporates
* the limitation as if written in the body of this License.
*
* 9. The Free Software Foundation may publish revised and/or new versions
* of the General Public License from time to time. Such new versions will
* be similar in spirit to the present version, but may differ in detail to
* address new problems or concerns.
*
* Each version is given a distinguishing version number. If the Program
* specifies a version number of this License which applies to it and "any
* later version", you have the option of following the terms and conditions
* either of that version or of any later version published by the Free
* Software Foundation. If the Program does not specify a version number of
* this License, you may choose any version ever published by the Free Software
* Foundation.
*
* 10. If you wish to incorporate parts of the Program into other free
* programs whose distribution conditions are different, write to the author
* to ask for permission. For software which is copyrighted by the Free
* Software Foundation, write to the Free Software Foundation; we sometimes
* make exceptions for this. Our decision will be guided by the two goals
* of preserving the free status of all derivatives of our free software and
* of promoting the sharing and reuse of software generally.
*
* NO WARRANTY
*
* 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
* FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
* OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
* PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
* OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
* TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
* PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
* REPAIR OR CORRECTION.
*
* 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
* WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
* REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
* INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
* OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
* TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
* YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
* PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
* END OF TERMS AND CONDITIONS
*
* How to Apply These Terms to Your New Programs
*
* If you develop a new program, and you want it to be of the greatest
* possible use to the public, the best way to achieve this is to make it
* free software which everyone can redistribute and change under these terms.
*
* To do so, attach the following notices to the program. It is safest
* to attach them to the start of each source file to most effectively
* convey the exclusion of warranty; and each file should have at least
* the "copyright" line and a pointer to where the full notice is found.
*
* {description}
* Copyright (C) {year} {fullname}
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Also add information on how to contact you by electronic and paper mail.
*
* If the program is interactive, make it output a short notice like this
* when it starts in an interactive mode:
*
* Gnomovision version 69, Copyright (C) year name of author
* Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
* This is free software, and you are welcome to redistribute it
* under certain conditions; type `show c' for details.
*
* The hypothetical commands `show w' and `show c' should show the appropriate
* parts of the General Public License. Of course, the commands you use may
* be called something other than `show w' and `show c'; they could even be
* mouse-clicks or menu items--whatever suits your program.
*
* You should also get your employer (if you work as a programmer) or your
* school, if any, to sign a "copyright disclaimer" for the program, if
* necessary. Here is a sample; alter the names:
*
* Yoyodyne, Inc., hereby disclaims all copyright interest in the program
* `Gnomovision' (which makes passes at compilers) written by James Hacker.
*
* {signature of Ty Coon}, 1 April 1989
* Ty Coon, President of Vice
*
* This General Public License does not permit incorporating your program into
* proprietary programs. If your program is a subroutine library, you may
* consider it more useful to permit linking proprietary applications with the
* library. If this is what you want to do, use the GNU Lesser General
* Public License instead of this License.
*/
package models.fhs.pages.editcourses
import models.Transactions
import models.fhs.pages.JavaList
import models.persistence.location.{LectureRoom, RoomEntity}
import models.persistence.participants.{Course, Group, Participant, Student}
import models.persistence.subject.AbstractSubject
import org.hibernate.FetchMode
import org.hibernate.criterion.{CriteriaSpecification, Order, Projections, Restrictions}
import play.api.Logger
import play.api.Play._
import scala.collection.JavaConversions._
import scala.util.Random
import play.api.libs.json.Json
/**
* @author fabian
* on 14.03.14.
*/
object MEditCourses {
def cut[A](xs: Seq[A], n: Int) = {
val (quot, rem) = (xs.size / n, xs.size % n)
val (smaller, bigger) = xs.splitAt(xs.size - rem * (quot + 1))
smaller.grouped(quot) ++ bigger.grouped(quot + 1)
}
lazy val firstNames = current.configuration.getString("firstNames").getOrElse("").split(",").toList
lazy val lastNames = current.configuration.getString("lastNames").getOrElse("").split(",").toList
def findCourses() = {
Transactions.hibernateAction {
implicit session =>
session.createCriteria(classOf[Course]).setFetchMode("students", FetchMode.SELECT).addOrder(Order.asc("shortName")).list().asInstanceOf[java.util.List[Course]].toList
}
}
def findCourse(courseId: Long) = {
Transactions.hibernateAction {
implicit session =>
session.createCriteria(classOf[Course]).add(Restrictions.idEq(courseId)).setFetchMode("groups", FetchMode.JOIN).uniqueResult().asInstanceOf[Course]
}
}
def updateCourse(course: Course) {
Transactions {
implicit em =>
em.merge(course)
}
}
def findGroup(groupId: Long) = {
Transactions.hibernateAction {
implicit session =>
session.createCriteria(classOf[Group]).add(Restrictions.idEq(groupId)).uniqueResult().asInstanceOf[Group]
}
}
def removeGroup(groupId: Long) {
Transactions.hibernateAction {
implicit session =>
val group = session.createCriteria(classOf[Group]).add(Restrictions.idEq(groupId)).uniqueResult().asInstanceOf[Group]
val course = group.getCourse
if (course != null) {
course.setGroups(course.getGroups - group)
}
group.setCourse(null)
session.delete(group)
if (course != null) {
session.saveOrUpdate(course)
}
}
}
def getGroupCount(groupType: String, course: Course) = {
Transactions.hibernateAction {
implicit session =>
session.createCriteria(classOf[Group]).
add(Restrictions.eq("course", course)).
add(Restrictions.eq("groupType", groupType)).
setProjection(Projections.rowCount()).uniqueResult().asInstanceOf[Long].toInt
}
}
def findSubjectsWithCourse(courseId: Long): List[AbstractSubject] = {
Transactions.hibernateAction {
implicit s =>
val criterion = s.createCriteria(classOf[AbstractSubject]).setResultTransformer(CriteriaSpecification.DISTINCT_ROOT_ENTITY)
criterion.createCriteria("courses").add(Restrictions.idEq(courseId)).setFetchMode("students", FetchMode.SELECT)
criterion.list().toList.asInstanceOf[List[AbstractSubject]]
}
}
def removeCourse(courseId: Long) {
Transactions.hibernateAction {
implicit s =>
s.delete(s.createCriteria(classOf[Course]).add(Restrictions.idEq(courseId)).uniqueResult())
}
}
def findRooms() = {
Transactions.hibernateAction {
implicit s =>
s.createCriteria(classOf[RoomEntity]).setResultTransformer(CriteriaSpecification.DISTINCT_ROOT_ENTITY).list().toList.asInstanceOf[List[RoomEntity]]
}
}
def findRoom(id: Long) = {
Transactions.hibernateAction {
implicit s =>
s.createCriteria(classOf[RoomEntity]).add(Restrictions.idEq(id)).uniqueResult().asInstanceOf[RoomEntity]
}
}
implicit def roomEntity2LectureRoom(roomEntity: RoomEntity): LectureRoom = {
if (roomEntity == null) {
null
} else {
roomEntity.roomEntity2LectureRoom()
}
}
def createStudentsForCourse(course: Course) = {
val students = (1 to course.getSize).toSet.map {
_: Int =>
val student = new Student
student.setFirstName(Random.shuffle(firstNames).head)
student.setLastName(Random.shuffle(lastNames).head)
student
}
Transactions.hibernateAction {
implicit s =>
s.saveOrUpdate(course)
course.setStudents(students)
students.foreach(s.saveOrUpdate(_))
s.saveOrUpdate(course)
val groupTypes = course.getGroups.map(_.getGroupType).toSet
groupTypes.foreach {
gType =>
val groups = course.getGroups.filter(_.getGroupType.equals(gType))
val parts = cut(students.toSeq.sortBy(s => (s.getLastName, s.getFirstName)), groups.size).toList
(0 to groups.size - 1).foreach {
i =>
groups(i).setStudents(Set[Student]() ++ parts(i))
groups(i).setSize(groups(i).getStudents.size())
s.saveOrUpdate(groups(i))
}
}
}
}
def deleteStudent(studentId: Long) = {
Transactions.hibernateAction {
implicit s =>
val student = s.createCriteria(classOf[Student]).add(Restrictions.idEq(studentId)).uniqueResult().asInstanceOf[Student]
/** find all participants which contains the student */
val criterion = s.createCriteria(classOf[Participant])
criterion.createCriteria("students").add(Restrictions.idEq(studentId))
val participants = criterion.list().asInstanceOf[JavaList[Participant]].toList
participants.foreach {
p =>
p.setStudents(p.getStudents - student)
p.setSize(p.getStudents.size())
s.saveOrUpdate(p)
}
s.delete(student)
}
}
def deleteStudentsFromCourse(course: Course): Unit = {
Transactions.hibernateAction {
implicit s =>
s.saveOrUpdate(course)
course.getGroups.foreach {
group =>
group.setStudents(Set[Student]())
s.saveOrUpdate(group)
}
val students = course.getStudents
course.setStudents(Set[Student]())
students.foreach { student =>
s.saveOrUpdate(student)
s.delete(student)
}
s.saveOrUpdate(course)
}
}
def findStudentById(studentId: Long): Student = {
Transactions.hibernateAction {
implicit s =>
s.createCriteria(classOf[Student]).add(Restrictions.idEq(studentId)).uniqueResult().asInstanceOf[Student]
}
}
def updateStudent(student: Student) = {
Transactions.hibernateAction {
implicit s =>
s.saveOrUpdate(student)
}
}
def findStudentsForCourse(courseId: Long): List[Student] = {
Transactions.hibernateAction {
implicit s =>
val course = s.createCriteria(classOf[Course]).add(Restrictions.idEq(courseId)).uniqueResult().asInstanceOf[Course]
course.getStudents.toList
}
}
def deleteGroupType(courseId:Long,groupType:String)={
Transactions.hibernateAction{
implicit session=>
val course = session.createCriteria(classOf[Course]).add(Restrictions.idEq(courseId)).uniqueResult().asInstanceOf[Course]
val removeGroups = course.getGroups.filter(_.getGroupType.equals(groupType))
course.getGroups.removeAll(removeGroups)
session.saveOrUpdate(course)
}
}
def findMultipleStudentsById(studentIds:List[Long])= {
Transactions.hibernateAction{
implicit s=>
s.createCriteria(classOf[Student]).add(Restrictions.in("id",studentIds)).list().toList.asInstanceOf[List[Student]]
}
}
def updateGroups(groups:List[Group]): Unit ={
Transactions.hibernateAction{
implicit s=>
Logger.debug("updateGroups")
groups.foreach{
g=>
g.getStudents.foreach{
st=>
s.saveOrUpdate(st)
}
s.saveOrUpdate(g)
}
}
}
}
case class MCourse(longName: String, shortName: String, size: Int)
/** transport class for students
* contains id and groupindex
*/
case class MStudent(id:Long, groupindex:Int)
object MStudent {
implicit val studentFormat = Json.format[MStudent]
} | P1tt187/fhs-schedule-generator | app/models/fhs/pages/editcourses/MEditCourses.scala | Scala | gpl-2.0 | 27,338 |
object Foo {
class A
class BB
}
import Foo.{A => D, _}
class R extends B<ref>B | LPTK/intellij-scala | testdata/resolve/class/wild2/A.scala | Scala | apache-2.0 | 84 |
package com.mesosphere.universe.common
import io.circe.Encoder
import io.circe.Json
import io.circe.JsonObject
import io.circe.generic.semiauto._
import io.circe.syntax._
import org.scalatest.FreeSpec
import org.scalatest.prop.TableDrivenPropertyChecks
class JsonUtilSpec extends FreeSpec with TableDrivenPropertyChecks {
case class Foo(bar: Option[Int],far: Option[Int])
implicit val encodeFoo: Encoder[Foo] = {
deriveEncoder[Foo]
}
"dropNullKeys" in {
val ls = Foo(None,None)
val string = JsonUtil.dropNullKeysPrinter.pretty(ls.asJson)
assertResult("{}")(string)
}
"Merging JSON objects" - {
"should pass on all examples" in {
forAll (Examples) { (defaultsJson, optionsJson, mergedJson) =>
assertResult(mergedJson)(JsonUtil.merge(defaultsJson, optionsJson))
}
}
}
private[this] val Examples = Table(
("defaults JSON", "options JSON", "merged JSON"),
(JsonObject.empty, JsonObject.empty, JsonObject.empty),
(
JsonObject.empty,
JsonObject.singleton("a", Json.False),
JsonObject.singleton("a", Json.False)
),
(
JsonObject.singleton("a", Json.False),
JsonObject.empty,
JsonObject.singleton("a", Json.False)
),
(
JsonObject.singleton("a", Json.False),
JsonObject.singleton("a", Json.True),
JsonObject.singleton("a", Json.True)
),
(
JsonObject.singleton("a", Json.obj("a" -> Json.False)),
JsonObject.singleton("a", Json.obj()),
JsonObject.singleton("a", Json.obj("a" -> Json.False))
),
(
JsonObject.singleton("a", Json.obj("a" -> Json.False)),
JsonObject.singleton("a", Json.obj("a" -> Json.True)),
JsonObject.singleton("a", Json.obj("a" -> Json.True))
),
(
JsonObject.singleton("a", Json.obj("a" -> Json.False)),
JsonObject.singleton("a", Json.obj("b" -> Json.False)),
JsonObject.singleton("a", Json.obj("a" -> Json.False, "b" -> Json.False))
),
(
JsonObject.singleton("a", Json.obj("a" -> Json.False)),
JsonObject.singleton("a", Json.True),
JsonObject.singleton("a", Json.True)
)
)
}
| dcos/cosmos | cosmos-test-common/src/test/scala/com/mesosphere/universe/common/JsonUtilSpec.scala | Scala | apache-2.0 | 2,144 |
package wav.devtools.sbt.karaf
import java.util.concurrent.atomic.AtomicReference
import sbt.Keys._
import sbt._
import wav.devtools.karaf.manager._
import wav.devtools.karaf.mbeans._
import KarafKeys._
import packaging.KarafPackagingKeys._
object KarafDefaults {
lazy val karafContainerArgsSetting = Def.setting(DefaultContainerArgs)
private val C = Def.setting(new ExtendedKarafJMXClient(karafContainerArgs.value))
lazy val karafResetServerTask = Def.task(handled(C.value.System(_.rebootCleanAll("now"))))
// TODO: refresh by url
lazy val karafUpdateBundleTask = Def.task {
val (n,v) = karafUpdateBundleName.value
val b = Bundle(-1, n, v, BundleState.Active)
handled(C.value.updateBundle(b))
}
lazy val karafDeployFeatureTask = Def.task {
val ff = featuresFile.value
require(ff.isDefined, "`featuresFile` must produce a features file")
val repo = ff.get.getAbsoluteFile.toURI.toString
handled(C.value.startFeature(repo, name.value, version.value))
}
lazy val karafUndeployFeatureTask = Def.task {
val ff = featuresFile.value
require(ff.isDefined, "`featuresFile` must produce a features file")
val repo = ff.get.getAbsoluteFile.toURI.toString
handled(C.value.Features(_.removeRepository(repo, true)))
}
private lazy val karafContainer = settingKey[AtomicReference[Option[KarafContainer]]]("The managed karaf container")
lazy val karafStartServerTask = Def.task {
val log = streams.value.log
log.warn("Ignoring `karafContainerArgsSetting`")
val ref = karafContainer.value
if (ref.get.isEmpty) {
val karafBase = unpackKarafDistribution.value
val config = KarafContainer.createDefaultConfig(karafBase.getAbsolutePath)
log.debug(config.toString)
val container = new KarafContainer(config)
container.start()
Thread.sleep(500)
if (container.isAlive) ref.set(Some(container))
else sys.error(container.log)
}
}
lazy val karafStopServerTask = Def.task {
val ref = karafContainer.value
if (ref.get.isDefined) {
val Some(container) = ref.get
container.stop()
ref.set(None)
}
}
private lazy val defaultBundleName = Def.setting((organization.value + "." + name.value).replace("-", "."))
lazy val karafSettings: Seq[Setting[_]] =
Seq(karafContainer := new AtomicReference(None),
karafStartServer := karafStartServerTask.value,
karafStopServer := karafStopServerTask.value,
karafResetServer := karafResetServerTask.value,
karafStatus := println(karafContainer.value.get.foreach(c => println("Alive: " + c.isAlive))),
karafContainerArgs := karafContainerArgsSetting.value,
karafDeployFeature := karafDeployFeatureTask.value,
karafUndeployFeature := karafUndeployFeatureTask.value,
karafUpdateBundleName := Tuple2(defaultBundleName.value, version.value),
karafUpdateBundle := karafUpdateBundleTask.value,
karafUpdateBundle <<= karafUpdateBundle dependsOn (karafDeployFeature))
} | wav/osgi-tooling | sbt-karaf/src/main/scala/wav/devtools/sbt/karaf/Defaults.scala | Scala | apache-2.0 | 3,028 |
package org.zalando.grafter.macros
case class D1()
case class D2()
@dependentReader
case class D(d1: D1, d2: D2)
object DependentReaderMacroTest {
case class Config()
//Absence one of these readers should raise compilation error
implicit val rc1: cats.data.Reader[Config, D1] = null
implicit val rc2: cats.data.Reader[Config, D2] = null
val r1: cats.data.Reader[Config, D] = D.dependentReader[Config]
}
| jcranky/grafter | macros/src/test/scala/org/zalando/grafter/macros/DependentReaderMacroTest.scala | Scala | mit | 419 |
/**
* @author Francisco Miguel Arámburo Torres - atfm05@gmail.com
*/
package requests
import play.api.libs.json._
/** Verifies and encapsulates all the parameters needed to register an http hook for [[http.OutReports]] to use.
*
* A json example with all possible parameters:
{{{
{
"token": "asdw-12d24-awdqsr1-qwed2",
"host": "census-control",
"port": 9000
}
}}}
* And here the public data structure attributes that has the request:
{{{
class ControlComputeRequest {
val token: String
val host: String
val port: Int
}
}}}
*
* @constructor creates a data structure with all the request's parameters.
* @param json of the request.
*/
class SetOutReportsRequest (json: JsValue) extends Request {
/** The request uniquer identifier. */
val token: String = null
/** The http hook hostname. */
val host: String =
(json \\ "host").asOpt[String] match {
case None => errors += "'host' field missing."; ""
case Some(data) => data replaceAll ("http://", "")
}
/** The http hook port. */
var port: Int =
(json \\ "port").asOpt[Int] match {
case None => errors += "'port' field missing."; 0
case Some(data) => data
}
}
| FrancoAra/census | app/requests/SetOutReportsRequest.scala | Scala | mit | 1,204 |
package skinny.controller.feature
import skinny.SkinnyEnv
import skinny.engine.SkinnyEngineBase
import skinny.engine.context.SkinnyEngineContext
import skinny.engine.response.{ SeeOther, Found, MovedPermanently, ActionResult }
/**
* Explicit redirect method support.
*/
trait ExplicitRedirectFeature extends SkinnyEngineBase {
/**
* Responds as "301 Moved Permanently"
*
* @return ActionResult
*/
def redirect301(location: String, headers: Map[String, String] = Map.empty, reason: String = "")(
implicit ctx: SkinnyEngineContext = context): ActionResult = {
val result = MovedPermanently(fullUrl(location, includeServletPath = false), headers, reason)
if (SkinnyEnv.isTest()) result else halt(result)
}
/**
* Responds as "302 Found"
*
* @return ActionResult
*/
def redirect302(location: String, headers: Map[String, String] = Map.empty, reason: String = "")(
implicit ctx: SkinnyEngineContext = context): ActionResult = {
val result = Found(fullUrl(location, includeServletPath = false), headers, reason)
if (SkinnyEnv.isTest()) result else halt(result)
}
/**
* Responds as "303 See Other"
*
* @return ActionResult
*/
def redirect303(location: String, headers: Map[String, String] = Map.empty, reason: String = "")(
implicit ctx: SkinnyEngineContext = context): ActionResult = {
val result = SeeOther(fullUrl(location, includeServletPath = false), headers, reason)
if (SkinnyEnv.isTest()) result else halt(result)
}
}
| holycattle/skinny-framework | framework/src/main/scala/skinny/controller/feature/ExplicitRedirectFeature.scala | Scala | mit | 1,516 |
package edu.rice.habanero.benchmarks.big
import edu.rice.habanero.actors.{JumiActor, JumiActorState, JumiPool}
import edu.rice.habanero.benchmarks.big.BigConfig.{ExitMessage, Message, PingMessage, PongMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner, PseudoRandom}
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu)
*/
object BigJumiActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BigJumiActorBenchmark)
}
private final class BigJumiActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BigConfig.parseArgs(args)
}
def printArgInfo() {
BigConfig.printArgs()
}
def runIteration() {
val sinkActor = new SinkActor(BigConfig.W)
sinkActor.start()
val bigActors = Array.tabulate[JumiActor[AnyRef]](BigConfig.W)(i => {
val loopActor = new BigActor(i, BigConfig.N, sinkActor)
loopActor.start()
loopActor
})
val neighborMessage = new NeighborMessage(bigActors)
sinkActor.send(neighborMessage)
bigActors.foreach(loopActor => {
loopActor.send(neighborMessage)
})
bigActors.foreach(loopActor => {
loopActor.send(new PongMessage(-1))
})
JumiActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
JumiPool.shutdown()
}
}
}
private case class NeighborMessage(neighbors: Array[JumiActor[AnyRef]]) extends Message
private class BigActor(id: Int, numMessages: Int, sinkActor: JumiActor[AnyRef]) extends JumiActor[AnyRef] {
private var numPings = 0
private var expPinger = -1
private val random = new PseudoRandom(id)
private var neighbors: Array[JumiActor[AnyRef]] = null
private val myPingMessage = new PingMessage(id)
private val myPongMessage = new PongMessage(id)
override def process(msg: AnyRef) {
msg match {
case pm: PingMessage =>
val sender = neighbors(pm.sender)
sender.send(myPongMessage)
case pm: PongMessage =>
if (pm.sender != expPinger) {
println("ERROR: Expected: " + expPinger + ", but received ping from " + pm.sender)
}
if (numPings == numMessages) {
sinkActor.send(ExitMessage.ONLY)
} else {
sendPing()
numPings += 1
}
case em: ExitMessage =>
exit()
case nm: NeighborMessage =>
neighbors = nm.neighbors
}
}
private def sendPing(): Unit = {
val target = random.nextInt(neighbors.size)
val targetActor = neighbors(target)
expPinger = target
targetActor.send(myPingMessage)
}
}
private class SinkActor(numWorkers: Int) extends JumiActor[AnyRef] {
private var numMessages = 0
private var neighbors: Array[JumiActor[AnyRef]] = null
override def process(msg: AnyRef) {
msg match {
case em: ExitMessage =>
numMessages += 1
if (numMessages == numWorkers) {
neighbors.foreach(loopWorker => loopWorker.send(ExitMessage.ONLY))
exit()
}
case nm: NeighborMessage =>
neighbors = nm.neighbors
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/big/BigJumiActorBenchmark.scala | Scala | gpl-2.0 | 3,362 |
package scalaoauth2.provider
import scala.concurrent.{ExecutionContext, Future}
trait TokenEndpoint {
val handlers = Map.empty[String, GrantHandler]
def handleRequest[U](
request: AuthorizationRequest,
handler: AuthorizationHandler[U]
)(implicit
ctx: ExecutionContext
): Future[Either[OAuthError, GrantHandlerResult[U]]] =
try {
val grantType = request.grantType
val grantHandler = () =>
handlers.getOrElse(
grantType,
throw new UnsupportedGrantType(s"$grantType is not supported")
)
request.parseClientCredential
.map { maybeCredential =>
maybeCredential.fold(
invalid => Future.successful(Left(invalid)),
clientCredential => {
handler
.validateClient(Some(clientCredential), request)
.flatMap { isValidClient =>
if (!isValidClient) {
Future.successful(
Left(
new InvalidClient(
"Invalid client or client is not authorized"
)
)
)
} else {
grantHandler()
.handleRequest(Some(clientCredential), request, handler)
.map(Right(_))
}
}
.recover { case e: OAuthError =>
Left(e)
}
}
)
}
.getOrElse {
val gh = grantHandler()
if (gh.clientCredentialRequired) {
throw new InvalidRequest("Client credential is not found")
} else {
gh.handleRequest(None, request, handler).map(Right(_)).recover {
case e: OAuthError => Left(e)
}
}
}
} catch {
case e: OAuthError => Future.successful(Left(e))
}
}
object TokenEndpoint extends TokenEndpoint
| nulab/scala-oauth2-provider | src/main/scala/scalaoauth2/provider/TokenEndpoint.scala | Scala | mit | 1,996 |
package org.jetbrains.plugins.scala
package refactoring.extractMethod
import java.io.File
import _root_.com.intellij.openapi.fileEditor.{FileEditorManager, OpenFileDescriptor}
import _root_.org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import _root_.org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import _root_.org.jetbrains.plugins.scala.lang.refactoring.extractMethod.ScalaExtractMethodHandler
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.testFramework.UsefulTestCase
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.junit.Assert._
/**
* User: Alexander Podkhalyuzin
* Date: 06.04.2010
*/
abstract class ScalaExtractMethodTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
private val startMarker = "/*start*/"
private val endMarker = "/*end*/"
def folderPath: String = baseRootPath() + "extractMethod/"
protected def doTest() {
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
var fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
val startOffset = fileText.indexOf(startMarker)
assert(startOffset != -1, "Not specified start marker in test case. Use /*start*/ in scala file for this.")
fileText = fileText.replace(startMarker, "")
val endOffset = fileText.indexOf(endMarker)
assert(endOffset != -1, "Not specified end marker in test case. Use /*end*/ in scala file for this.")
fileText = fileText.replace(endMarker, "")
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val fileEditorManager = FileEditorManager.getInstance(getProjectAdapter)
val editor = fileEditorManager.openTextEditor(new OpenFileDescriptor(getProjectAdapter, file, startOffset), false)
editor.getSelectionModel.setSelection(startOffset, endOffset)
var res: String = null
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
//start to inline
try {
val handler = new ScalaExtractMethodHandler
handler.invoke(getProjectAdapter, editor, scalaFile, null)
UsefulTestCase.doPostponedFormatting(getProjectAdapter)
res = scalaFile.getText.substring(0, lastPsi.getTextOffset).trim
}
catch {
case e: Exception => assert(assertion = false, message = e.getMessage + "\\n" + e.getStackTrace.map(_.toString).mkString(" \\n"))
}
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ =>
assertTrue("Test result must be in last comment statement.", false)
""
}
assertEquals(output, res)
}
} | whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/refactoring/extractMethod/ScalaExtractMethodTestBase.scala | Scala | apache-2.0 | 3,180 |
import com.google.inject.{ TypeLiteral, Scopes, AbstractModule }
import net.codingwell.scalaguice.ScalaModule
import securesocial.core.{ BasicProfile, RuntimeEnvironment }
import service.{ MyEnvironment, DemoUser }
class DemoModule extends AbstractModule with ScalaModule {
override def configure() {
val environment: MyEnvironment = new MyEnvironment
bind(new TypeLiteral[RuntimeEnvironment] {}).toInstance(environment)
}
}
| normenmueller/securesocial | samples/scala/demo/app/DemoModule.scala | Scala | apache-2.0 | 439 |
package com.avsystem.commons
package serialization
import com.avsystem.commons.annotation.{bincompat, positioned}
import com.avsystem.commons.meta._
sealed trait GenInfo[T] extends TypedMetadata[T] {
def sourceName: String
def annotName: Opt[name]
def rawName: String = annotName.fold(sourceName)(_.name)
}
case class GenParamInfo[T](
@reifyName sourceName: String,
@optional @reifyAnnot annotName: Opt[name],
@isAnnotated[optionalParam] optional: Boolean,
@isAnnotated[whenAbsent[T]] hasWhenAbsent: Boolean,
@isAnnotated[transientDefault] transientDefault: Boolean,
@isAnnotated[outOfOrder] outOfOrder: Boolean,
@reifyFlags flags: ParamFlags
) extends GenInfo[T] {
@bincompat private[commons] def this(
sourceName: String, annotName: Opt[name], hasWhenAbsent: Boolean, transientDefault: Boolean, outOfOrder: Boolean, flags: ParamFlags
) = this(sourceName, annotName, false, hasWhenAbsent, transientDefault, outOfOrder, flags)
}
sealed trait GenCodecStructure[T] extends GenInfo[T] {
def flags: TypeFlags
}
@positioned(positioned.here) case class GenUnionInfo[T](
@reifyFlags flags: TypeFlags,
@reifyName sourceName: String,
@optional @reifyAnnot annotName: Opt[name],
@optional @reifyAnnot flatten: Opt[flatten]
) extends GenCodecStructure[T]
object GenUnionInfo extends AdtMetadataCompanion[GenUnionInfo]
@positioned(positioned.here) case class GenCaseInfo[T](
@reifyFlags flags: TypeFlags,
@reifyName sourceName: String,
@optional @reifyAnnot annotName: Opt[name],
@isAnnotated[transparent] transparent: Boolean,
@isAnnotated[defaultCase] defaultCase: Boolean
) extends GenCodecStructure[T]
object GenCaseInfo extends AdtMetadataCompanion[GenCaseInfo]
| AVSystem/scala-commons | commons-core/src/main/scala/com/avsystem/commons/serialization/GenCodecStructure.scala | Scala | mit | 1,710 |
object Solution {
import scala.collection.Searching._
def isPrime(n: Long): Boolean = {
val smallPrimes = List(2, 3, 5, 7)
if (n < 10) smallPrimes.contains(n)
else {
val canBeDivided = smallPrimes.exists(n % _ == 0)
if (canBeDivided) return false
var s = 11
while (s * s < n) {
if (n % s == 0) return false
s += 1
}
true
}
}
val pandigitalPrimeTable = Array.fill(10)(Vector.empty[Long])
for (i <- 4 to 9 if (i * (i + 1) / 2) % 3 != 0) {
val primeList = (1 to i).permutations.filter({ x =>
val lastDigit = x(i - 1)
if (lastDigit % 2 == 0 || lastDigit == 5) false
else {
val p = x.foldLeft(0l) { (x,y) => x * 10 + y }
isPrime(p)
}
}).map({ x => x.foldLeft(0l) { (x,y) => x * 10 + y } }).toVector
pandigitalPrimeTable(i) = primeList
}
def getLargestPrime(n: Long): Long = {
val numOfDigits = math.min(n.toString.size, 9)
val primes = pandigitalPrimeTable(numOfDigits)
var prime = -1l
if (primes.size > 0 && primes(0) <= n)
prime = primes.search(n) match {
case Found(idx) => n
// note: There is a bug in scala 2.11.0 which hackerrank runs.
// see https://issues.scala-lang.org/browse/SI-7372 for more details,
// if you want to run this code on hackerrank, change idx - 1 => idx
// if hackerrank haven't update their scala runtime.
case InsertionPoint(idx) => primes(idx - 1)
}
else {
(numOfDigits - 1 to 4 by -1) exists { x =>
val size = pandigitalPrimeTable(x).size
if (size > 0) { prime = pandigitalPrimeTable(x)(size - 1); true }
else false
}
}
prime
}
def main(args: Array[String]) {
val t = readLine.toInt
for (i <- 1 to t) {
val n = readLine.toLong
println(getLargestPrime(n))
}
}
}
| advancedxy/hackerrank | project-euler/problem-41/PandigitalPrime.scala | Scala | mit | 1,919 |
package lila.common
import org.joda.time.DateTime
object Uptime {
val startedAt = DateTime.now
val startedAtMillis = nowMillis
def seconds = nowSeconds - startedAt.getSeconds
def startedSinceMinutes(minutes: Int) =
startedSinceSeconds(minutes * 60)
def startedSinceSeconds(seconds: Int) =
startedAtMillis < (nowMillis - (seconds * 1000))
}
| luanlv/lila | modules/common/src/main/Uptime.scala | Scala | mit | 370 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Assert._
import org.junit._
class CalcValidationTest extends TableTestBase {
@Test
def testSelectInvalidFieldFields(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Cannot resolve field [foo], input field list:[a, b, c].")
val util = batchTestUtil()
util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
// must fail. Field 'foo does not exist
.select('a, 'foo)
}
@Test(expected = classOf[ValidationException])
def testFilterInvalidFieldName(): Unit = {
val util = batchTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
// must fail. Field 'foo does not exist
t.filter( 'foo === 2 )
}
@Test(expected = classOf[ValidationException])
def testSelectInvalidField() {
val util = batchTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
// Must fail. Field foo does not exist
t.select($"a" + 1, $"foo" + 2)
}
@Test(expected = classOf[ValidationException])
def testSelectAmbiguousFieldNames() {
val util = batchTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
// Must fail. Field foo does not exist
t.select($"a" + 1 as "foo", $"b" + 2 as "foo")
}
@Test(expected = classOf[ValidationException])
def testFilterInvalidField() {
val util = batchTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
// Must fail. Field foo does not exist.
t.filter($"foo" === 17)
}
@Test
def testAliasStarException(): Unit = {
val util = batchTestUtil()
try {
util.addTableSource[(Int, Long, String)]("Table1", '*, 'b, 'c)
fail("TableException expected")
} catch {
case _: ValidationException => //ignore
}
try {
util.addTableSource[(Int, Long, String)]("Table2")
.select('_1 as '*, '_2 as 'b, '_1 as 'c)
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
try {
util.addTableSource[(Int, Long, String)]("Table3").as("*", "b", "c")
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
try {
util.addTableSource[(Int, Long, String)]("Table4", 'a, 'b, 'c).select('*, 'b)
fail("ValidationException expected")
} catch {
case _: ValidationException => //ignore
}
}
@Test(expected = classOf[ValidationException])
def testDuplicateFlattening(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[((Int, Long), (String, Boolean), String)]("MyTable", 'a, 'b, 'c)
table.select('a.flatten(), 'a.flatten())
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/table/validation/CalcValidationTest.scala | Scala | apache-2.0 | 3,815 |
package scavlink
import akka.actor.{Actor, ActorRef, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import com.codahale.metrics.MetricRegistry
import com.typesafe.config.Config
import scavlink.connection._
import scavlink.connection.frame.DefaultMarshallerFactory
import scavlink.connection.marshal.MarshallerFactory
import scavlink.settings.ScavlinkConfig
import scala.concurrent.Future
/**
* Handle to an instance of the Scavlink library.
* Only one instance is typically needed in an application, as it will handle multiple connections
* and multiple vehicles per connection.
*
* @param name name of the instance
* @param events where library-wide events are published
* @param supervisor top-level supervisor for all actors
* @author Nick Rossi
*/
class ScavlinkInstance(val name: String, val config: ScavlinkConfig, val events: ConnectionEventBus, val supervisor: ActorRef) {
def startConnection(settings: ConnectionSettings)(implicit sender: ActorRef = Actor.noSender): Unit =
supervisor ! StartConnection(settings)
def stopConnection(settings: ConnectionSettings)(implicit sender: ActorRef = Actor.noSender): Unit =
supervisor ! StopConnection(settings)
def getVehicles()(implicit sender: ActorRef = Actor.noSender): Unit =
supervisor ! GetVehicles
def askVehicles()(implicit timeout: Timeout): Future[Vehicles] =
(supervisor ? GetVehicles).mapTo[Vehicles]
def shutdown()(implicit sender: ActorRef = Actor.noSender) =
supervisor ! Shutdown
}
object ScavlinkInstance {
/**
* Initializes an instance of the Scavlink library.
*
* The configuration object is taken from the actor system if not provided as an override here.
* Scavlink configuration should be under the root level in a block called "scavlink"
* (or the alternate instance name passed in here).
*
* The default connection factory provides a TCP client, UDP listener, and serial port connection.
* An application may extend it with additional handlers.
*
* The default marshaller factory returns a message marshaller for a given autopilot.
* An application may extend it with marshallers for other autopilot types.
*
* @param system Akka actor system
* @param config optional config override
* @param linkAuthorizer if specified, requires an initial valid AuthKey from remote GCS when a link is established
* @param vehicleAuthorizer if specified, requires an initial valid AuthKey from each vehicle
* @param initializers callbacks for application-specific initialization when the library starts
* @param vehicleInitializers callbacks for application-specific initialization when a new vehicle appears
* @param connectionFactory factory that knows how to create new communication links
* @param marshallerFactory factory that knows how to obtain a marshaller for a given autopilot type
* @param name optional name to differentiate multiple instances (defaults to "scavlink")
* @author Nick Rossi
*/
def apply(system: ActorSystem,
name: String = "scavlink",
config: Option[Config] = None,
initializers: Seq[ScavlinkInitializer] = DefaultScavlinkInitializers,
vehicleInitializers: Seq[VehicleInitializer] = DefaultVehicleInitializers,
linkAuthorizer: Option[KeyAuthorizer] = None,
vehicleAuthorizer: Option[KeyAuthorizer] = None,
connectionFactory: ConnectionFactory = DefaultConnectionFactory,
marshallerFactory: MarshallerFactory = DefaultMarshallerFactory.apply): ScavlinkInstance = {
val metrics = new MetricRegistry
val events = new ConnectionEventBus
metrics.register("ConnectionEventBus subscribers", events.gauge)
val scfg = new ScavlinkConfig(config.getOrElse(system.settings.config), name)
val sctx = ScavlinkContext(events, scfg, metrics, vehicleInitializers, linkAuthorizer, vehicleAuthorizer, marshallerFactory)
val supervisor = system.actorOf(ScavlinkSupervisor.props(sctx, initializers, connectionFactory), name)
new ScavlinkInstance(name, scfg, events, supervisor)
}
}
| nickolasrossi/scavlink | src/main/scala/scavlink/ScavlinkInstance.scala | Scala | mit | 4,114 |
package org.scalaide.core.ui
import org.eclipse.jdt.ui.text.IJavaPartitions
import org.eclipse.jface.text.Document
import org.eclipse.jface.text.IDocument
import org.eclipse.jface.text.IDocumentExtension3
import org.junit.ComparisonFailure
import org.scalaide.CompilerSupportTests
import org.scalaide.core.lexical.ScalaCodePartitioner
import org.scalaide.core.testsetup.SDTTestUtils
import org.scalaide.util.eclipse.EclipseUtils
/**
* This class provides basic test behavior for all text changing operations that
* need to be tested.
*
* It provides the following DSL for a test:
* {{{
* class MyTextEditTest extends TextEditTests with EclipseDocumentSupport {
* case class MyOperation(value: String) extends Operation {
* def execute() = ???
* }
* @Test
* def test() = {
* "object A^" becomes "object AB^" after MyOperation("B")
* }
* }
* }}}
*
* The overall design of this test suite is as follows:
*
* - `TextEditTests` provides basic functionality and the DSL
* - `TextEditTests` contains the class `Operation` which needs to be implemented.
* It provides the test logic that is executed by the test suite.
* - Different tests require different test setups. By implementing the `prepare`
* method one can provide such a setup which is then invoked right before the
* `execute` method of the `Operation` class is called.
* - The `execute` method of class `Operation` needs to change `caretOffset` in
* `TextEditTests` if the position of the cursor changes.
*/
abstract class TextEditTests {
abstract class Operation {
/** This value is initialized before the `execute` method is called. */
var caretOffset: Int = _
/**
* Contains the test logic for a specific operation. This method is invoked
* by the test suite.
*/
def execute(): Unit
/**
* This function can handle Eclipse' linked mode model. To depict such a
* model in the test simply surround the identifiers that should be
* considered by the linked model with [[ and ]]. The cursor is always
* represented by a ^.
*
* This function needs to be called by a concrete operation to add the [[
* and ]] markers to `doc`. `cursorPos` is the position of the cursor where
* the ^ marker should be added to `doc`. This function updates the cursor
* position if necessary and returns its updated value. `groups` are pairs
* of `(offset, length)` which span the area that should be surrounded
* by the [[ and ]] markers.
*/
def applyLinkedModel(doc: IDocument, cursorPos: Int, positionGroups: Seq[(Int, Int)]): Int = {
val groups = positionGroups.sortBy(-_._1)
val cursorOffset = groups.takeWhile(_._1 < cursorPos).size*4
groups foreach {
case (offset, length) =>
doc.replace(offset+length, 0, "]]")
doc.replace(offset, 0, "[[")
}
cursorPos+cursorOffset
}
}
/** This method allows subclasses to provide their own test setup. */
def runTest(source: String, operation: Operation): Unit
/** This method allows the test suite to access the sources on which a test is executed. */
def source: String
final implicit class StringAsTest(input: String) {
def becomes(expectedOutput: String) = input -> expectedOutput
def isNotModified = input -> input
}
final implicit class TestExecutor(testData: (String, String)) {
def after(operation: Operation, marker: Char = '$') = test(testData._1, testData._2, operation, marker)
}
/**
* Tests if the input string is equal to the expected output after it is applied
* to the given operation.
*
* For each input and output string, there must be set the cursor position
* which is denoted by a ^ sign and must occur once.
*
* If the operation is `Remove` the string of this operation must be placed
* before the caret in the input string.
*
* Sometimes it can happen that the input or output must contain trailing
* white spaces. If this is the case then the sign passed to `marker` must be
* set to the position after the expected number of white spaces.
*/
final def test(input: String, expectedOutput: String, operation: Operation, marker: Char = '$'): Unit = {
require(input.count(_ == '^') == 1, "the cursor in the input isn't set correctly")
require(expectedOutput.count(_ == '^') == 1, "the cursor in the expected output isn't set correctly")
val inputWithoutDollarSigns = input.filterNot(_ == marker)
val caretOffset = inputWithoutDollarSigns.indexOf('^')
val inputWithoutCursor = inputWithoutDollarSigns.filterNot(_ == '^')
operation.caretOffset = caretOffset
runTest(inputWithoutCursor, operation)
val expected = expectedOutput.filterNot(_ == marker)
val actual = new StringBuilder(source).insert(operation.caretOffset, "^").toString()
if (expected != actual) {
throw new ComparisonFailure("", expected, actual)
}
}
}
trait EclipseDocumentSupport {
this: TextEditTests =>
var doc: Document = _
override def runTest(source: String, operation: Operation): Unit = {
doc = new Document(source)
val partitioner = ScalaCodePartitioner.documentPartitioner()
doc.setDocumentPartitioner(IJavaPartitions.JAVA_PARTITIONING, partitioner)
doc.setDocumentPartitioner(IDocumentExtension3.DEFAULT_PARTITIONING, partitioner)
partitioner.connect(doc)
operation.execute()
}
override def source: String =
doc.get()
}
trait CompilerSupport extends EclipseDocumentSupport with CompilerSupportTests {
this: TextEditTests =>
override def runTest(source: String, operation: Operation): Unit = {
EclipseUtils.workspaceRunnableIn(SDTTestUtils.workspace) { _ =>
super.runTest(source, operation)
}
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/ui/TextEditTests.scala | Scala | bsd-3-clause | 5,787 |
import reflect.{ClassTag, classTag}
trait Extractors {
type T
implicit val tTag: ClassTag[T]
object ExtractT {
def unapply(x: T) = Some(x)
}
def apply(a: Any) = a match {
case ExtractT(x) => println(x +" is a "+ implicitly[ClassTag[T]])
case _ => println(a+ " is not a "+ implicitly[ClassTag[T]] +"; it's a "+ a.getClass)
}
}
object Test extends dotty.runtime.LegacyApp {
def typeMatch[T: ClassTag](a: Any) = a match {
case x : T => println(x +" is a "+ implicitly[ClassTag[T]])
case _ => println(a+ " is not a "+ implicitly[ClassTag[T]] +"; it's a "+ a.getClass)
}
// the same match as typeMatch, but using an extractor
def extractorMatch[S: ClassTag](a: Any) =
(new Extractors { type T = S; val tTag = classTag[T] })(a)
typeMatch[Int](1)
typeMatch[Integer](1)
typeMatch[String](1)
typeMatch[Any](true)
typeMatch[String]("woele")
extractorMatch[Int](1)
extractorMatch[Integer](1)
extractorMatch[String](1)
extractorMatch[Any](true)
extractorMatch[String]("woele")
}
| yusuke2255/dotty | tests/pending/run/virtpatmat_typetag.scala | Scala | bsd-3-clause | 1,037 |
package com.github.chaabaj.openid
import com.github.chaabaj.openid.utils.SnakifiedSprayJsonSupport
package object oauth extends SnakifiedSprayJsonSupport {
}
| jkugiya/openid-scala | src/main/scala/com/github/chaabaj/openid/oauth/package.scala | Scala | mit | 161 |
package com.twitter.finatra.thrift.internal.routing
import com.twitter.finagle.Service
import com.twitter.finagle.thrift.ThriftService
private[thrift] case class Services(
service: Service[Array[Byte], Array[Byte]],
thriftService: ThriftService)
| twitter/finatra | thrift/src/main/scala/com/twitter/finatra/thrift/internal/routing/Services.scala | Scala | apache-2.0 | 252 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package scaps.utils
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class SampleSeqOpsSpecs extends FlatSpec with Matchers {
"sample" should "yield the same list when invoked with n >= list.length" in {
val l = Seq(1, 2, 3)
l.sample(l.length) should be(l)
}
it should "yield an empty seq on n == 0" in {
Seq(1, 2, 3).sample(0) should be(Seq())
}
it should "yield a list with n elements" in {
val l = Seq(1, 2, 3, 4, 5)
for { n <- 0 to l.length } {
l.sample(n).length should be(n)
}
}
it should "take random samples" in {
val l = Seq(1, 2, 3, 4, 5)
var sampleWasDifferent = false
for { _ <- 1 to 100 } {
sampleWasDifferent |= (l.sample(3) != l.sample(3))
}
sampleWasDifferent should be(true)
}
it should "evenly distribute values" in {
val l = Seq(1, 2, 3)
val res = List.newBuilder[Int]
for { _ <- 1 to 1000 } {
res ++= l.sample(2)
}
val countPerVal = res.result().groupBy(identity).mapValues(_.length)
countPerVal.foreach { i =>
i._2 shouldBe (666 +- 60)
}
}
}
| scala-search/scaps | core/src/test/scala/scaps/utils/SampleSeqOpsSpecs.scala | Scala | mpl-2.0 | 1,311 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.activity
import com.krux.hyperion.adt.{HS3Uri, HString}
import com.krux.hyperion.common.S3Uri
import com.krux.hyperion.common.{BaseFields, PipelineObjectId}
import com.krux.hyperion.expression.RunnableObject
import com.krux.hyperion.HyperionContext
import com.krux.hyperion.resource.{Ec2Resource, Resource}
/**
* Shell command activity that runs a given Jar
*/
case class JarActivity private (
baseFields: BaseFields,
activityFields: ActivityFields[Ec2Resource],
shellCommandActivityFields: ShellCommandActivityFields,
jarUri: HS3Uri,
mainClass: Option[MainClass],
options: Seq[HString],
environmentUri: Option[HS3Uri],
classpath: Seq[HS3Uri]
) extends BaseShellCommandActivity with WithS3Input with WithS3Output {
type Self = JarActivity
assert(script.uri.nonEmpty)
def updateBaseFields(fields: BaseFields) = copy(baseFields = fields)
def updateActivityFields(fields: ActivityFields[Ec2Resource]) = copy(activityFields = fields)
def updateShellCommandActivityFields(fields: ShellCommandActivityFields) = copy(shellCommandActivityFields = fields)
def withMainClass(mainClass: MainClass) = copy(mainClass = Option(mainClass))
def withOptions(opts: HString*) = copy(options = options ++ opts)
def withEnvironmentUri(environmentUri: HS3Uri) = copy(environmentUri = Option(environmentUri))
def withClasspath(jar: HS3Uri) = copy(classpath = classpath :+ jar)
override def scriptArguments =
classpath.flatMap(jar => Seq[HString]("--cp", jar.serialize)) ++
environmentUri.toSeq.flatMap(uri => Seq[HString]("--env", uri.serialize)) ++
Seq[HString]("--jar", jarUri.serialize) ++
options ++
mainClass.map(_.fullName: HString) ++
shellCommandActivityFields.scriptArguments
}
object JarActivity extends RunnableObject {
def apply(jarUri: HS3Uri)(runsOn: Resource[Ec2Resource])(implicit hc: HyperionContext): JarActivity =
new JarActivity(
baseFields = BaseFields(PipelineObjectId(JarActivity.getClass)),
activityFields = ActivityFields(runsOn),
shellCommandActivityFields = ShellCommandActivityFields(S3Uri(s"${hc.scriptUri}activities/run-jar.sh")),
jarUri = jarUri,
mainClass = None,
options = Seq.empty,
environmentUri = hc.ec2EnvironmentUri.map(S3Uri(_)),
classpath = Seq.empty
)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/activity/JarActivity.scala | Scala | bsd-3-clause | 2,560 |
package com.fang.data
import com.fang.ImplicitConvert._
import com.fang.UserStatus.UpdateUS
import com.fang.ajax.UserAPI
import com.fang.ajax.UserStatusAPI.{ReceiveType, TypedUserStatusSocket}
import com.fang.data.AjaxResult.{Error, Ok}
import com.fang.{ErrorMessage, UserSession, UserStatus}
import com.thoughtworks.binding.Binding.Var
import com.thoughtworks.binding.{Binding, dom}
import org.scalajs.dom.{CloseEvent, Event, window}
object GlobalValue {
val userSession: Var[Option[UserSession]] = Var(None)
var userStatusSession: Option[TypedUserStatusSocket] = None
val userStatus: Var[Option[UserStatus]] = Var(None)
val errorMessage: Var[Option[ErrorMessage]] = Var(None)
val userEventListener: EventListener[ReceiveType] = new EventListener[ReceiveType]()
val isAdmin: Var[Boolean] = Var(false)
val windowHeight: Var[Int] = Var(0)
val windowWidth: Var[Int] = Var(0)
@dom val sizeForBoard: Binding[Int] = Math.min(windowWidth.bind - 30, windowHeight.bind - 108)
@dom val boardStyle: Binding[String] = s"height: ${sizeForBoard.bind}px; width: ${sizeForBoard.bind}px"
window.setInterval(() => updateUserSession(), 30000)
userEventListener.addListener {
case Left(a) =>
errorMessage.value = Some(a)
window.alert(a.message)
case Right(b) =>
if(b.isInvited && userStatus.value.isDefined && !userStatus.value.get.isInvited){
val status = userStatus.value.get
if(status.userId == status.inviteStatus.get.user2){
val other = status.inviteStatus.get.user1
if(userStatusSession.isDefined){
if(window.confirm(s"you have received an invitation from $other, accept or not?")){
userStatusSession.get.sendMessage(UpdateUS(UserStatus.playing(status.userId, "")))
}else{
userStatusSession.get.sendMessage(UpdateUS(UserStatus.idle(status.userId)))
}
}
}
}
userStatus.value = Some(b)
if(b.isPlaying) window.location.hash = "game/" + b.playOn.get
}
def updateUserSession(): Unit = {
UserAPI.logStatus().foreach {
case Ok(value) =>
userSession.value = Some(value)
if(value.role == UserSession.USER){
isAdmin.value = false
if(userStatusSession.isDefined){
if(userStatusSession.get.userId != value.id){
println(s"userStatusSession different ${userStatusSession.get.userId} ${value.id}")
userStatusSession.get.close()
userStatusSession = None
}
}
if(userStatusSession.isDefined) {
}else{
userStatusSession = Some(new TypedUserStatusSocket(value.id) {
override def onErrorMessage(message: String): Unit = {
userEventListener.broadCast(Left(ErrorMessage(message)))
}
override def onData(userStatus: UserStatus): Unit = {
userEventListener.broadCast(Right(userStatus))
}
override def onError(message: String): Unit = {
userEventListener.broadCast(Left(ErrorMessage(message)))
}
override def onClose(event: CloseEvent): Unit = {
userStatusSession = None
println(s"connection to user status $userId is closed")
}
override def onOpen(event: Event): Unit = {
println(s"new connection to user status $userId")
}
})
}
}else if(value.role == UserSession.ADMIN){
isAdmin.value = true
closeConnection()
}
case Error(_, _) =>
closeConnection()
userSession.value = None
}
}
def closeConnection(): Unit = {
userStatus.value = None
errorMessage.value = None
if(userStatusSession.isDefined){
userStatusSession.get.close()
}
}
}
| TianhaoFang/online-go | js/src/main/scala/com/fang/data/GlobalValue.scala | Scala | mit | 3,895 |
package org.substeps.report
import com.typesafe.config.Config
/**
* Created by ian on 18/08/16.
*/
trait IndexPageTemplate {
def buildStatsBlock(name: String, counters : Counters) = {
s"""
| <div class="row-fluid">
|
| <div class="col-md-2">${name} <span class="badge">${counters.total}</span></div>
|
| <div class="col-md-10">
|
| <div class="progress">
| <div class="progress-bar progress-bar-success" style="width: ${counters.successPC}%;">${counters.successPC} Success (${counters.passed})</div>
| <div class="progress-bar progress-bar-danger" style="width: ${counters.failedPC}%">${counters.failedPC}% Failure (${counters.failed})</div>
| <div class="progress-bar progress-bar-warning" style="width: ${counters.skippedPC}%">${counters.skippedPC}% Not run (${counters.skipped})</div>
| </div>
|
| </div>
|
| </div>
|
""".stripMargin
}
def buildReportFrame(masterConfig : Config, rootNodeSummaries: List[RootNodeSummary], stats : ExecutionStats, dateTimeString : String) = {
val featureProgressBlock = buildStatsBlock("Features", stats.featuresCounter)
val scenarioProgressBlock = buildStatsBlock("Scenarios", stats.scenarioCounters)
val scenarioStepProgressBlock = buildStatsBlock("Scenario steps", stats.stepCounters)
val stepImplBlock = buildStatsBlock("Step Impls", stats.stepImplCounters)
val rootNodeSummary = rootNodeSummaries.head
val suiteDescription = masterConfig.getString("org.substeps.config.description")
val reportTitle = Option(suiteDescription).getOrElse("Substeps Test Report")
// TODO pull out some of the other things from the node summary - tags, nonfatal tags and environment
val env = rootNodeSummary.environment
val nonFatalTagsOption =
rootNodeSummary.nonFatalTags.map(t => s"""<p class="navbar-text navbar-left">Non Fatal Tags: ${t}</p>""" )
val nonFatalTags = nonFatalTagsOption.getOrElse("")
val tags = rootNodeSummary.tags.getOrElse("")
val envHeader =
if(rootNodeSummaries.size == 1) {
s""" <p class="navbar-text navbar-left">Environment: ${env}</p>
<p class="navbar-text navbar-left">Tags: ${tags}</p>
${nonFatalTags}"""
}
else {
""
}
s"""
|<!DOCTYPE html>
|<!-- Original Copyright Technophobia Ltd 2012, later revisions by others, see github -->
|<html lang="en">
|
|<head>
| <title>Substeps report</title>
| <meta charset="UTF-8">
| <link href="css/bootstrap.min.css" rel="stylesheet"/>
| <link href="css/bootstrap-responsive.min.css" rel="stylesheet"/>
| <link href="css/substeps.css" rel="stylesheet"/>
|
| <link rel="stylesheet" href="css/jstree/style.min.css" />
|
|</head>
|
|<body>
|
|<nav class="navbar navbar-default navbar-fixed-top">
| <div class="container-fluid">
|
| <div class="navbar-header">
| <span class="navbar-brand" href="#">${reportTitle}</span>
| <span class="navbar-brand" >${dateTimeString}</span>
| ${envHeader}
| </div>
|
| <div class="collapse navbar-collapse">
|
| <ul class="nav navbar-nav navbar-right">
| <li class="active"><a href="#summary">Summary</a></li>
| <li><a href="#feature-tag-summary" onclick="javascript:toggle('feature-tag-summary')">Features by tag</a></li>
| <li><a href="#scenario-tag-summary" onclick="javascript:toggle('scenario-tag-summary')">Scenario tag summary</a></li>
| <li><a href="#test-detail">Test detail</a></li>
| <li><a href="usage-tree.html">Usage <span class="label label-warning">Beta</span></a></li>
| <li><a href="glossary.html">Glossary</a></li>
|
| </ul>
| </div>
| </div>
|</nav>
|
|
|<div class="container-fluid" style="padding-top:10px">
| <div class="panel panel-default">
| <div class="panel-heading">
| <h3 class="panel-title">Summary</h3>
| </div>
| <div class="panel-body">
|
| ${featureProgressBlock}
|
| ${scenarioProgressBlock}
|
| ${scenarioStepProgressBlock}
|
|
| </div>
| </div>
|
| <div class="panel panel-default">
| <div class="panel-heading">
| <div class="row-fluid">
| <div class="col-md-11">
| <h3 class="panel-title">Summary table</h3>
| </div>
| <div class="col-md-1">
| <a class="btn btn-primary btn-xs pull-right" role="button" data-toggle="collapse" href="#summaryTable" aria-expanded="false" aria-controls="summaryTable">Show</a>
| </div>
| </div>
|
| </div>
| <div class="panel-body">
|
| <div id="summaryTable" class="row-fluid collapse">
|
| <table class="table table-striped table-bordered">
| <thead>
| <tr>
| <th><h4>Summary</h4></th>
| <th>Number</th>
| <th>Run</th>
| <th>Passed</th>
| <th>Failed</th>
| <th>Skipped</th>
| <th>Success %</th>
| </tr>
| </thead>
| <tbody>
| <tr>
| <td>Features</td>
| <td>${stats.featuresCounter.total}</td>
| <td>${stats.featuresCounter.run}</td>
| <td>${stats.featuresCounter.passed}</td>
| <td>${stats.featuresCounter.failed}</td>
| <td>${stats.featuresCounter.skipped}</td>
| <td>${stats.featuresCounter.successPC} %</td>
| </tr>
|
| <tr>
| <td>Scenarios</td>
| <td>${stats.scenarioCounters.total}</td>
| <td>${stats.scenarioCounters.run}</td>
| <td>${stats.scenarioCounters.passed}</td>
| <td>${stats.scenarioCounters.failed}</td>
| <td>${stats.scenarioCounters.skipped}</td>
| <td>${stats.scenarioCounters.successPC} %</td>
| </tr>
|
| <tr>
| <td>Scenario steps</td>
| <td>${stats.stepCounters.total}</td>
| <td>${stats.stepCounters.run}</td>
| <td>${stats.stepCounters.passed}</td>
| <td>${stats.stepCounters.failed}</td>
| <td>${stats.stepCounters.skipped}</td>
| <td>${stats.stepCounters.successPC} %</td>
| </tr>
| </tbody>
| </table>
| </div>
|
| <div class="container">
| <div id="feature-tag-summary" class="row" style="display:none;">
| <header>
| <h4>Feature breakdown by tag</h4>
| </header>
|
| <div id="feature-stats-div"></div>
|
| </div>
| </div>
| <div class="container">
| <div id="scenario-tag-summary" class="row" style="display:none;">
| <header>
| <h4>Scenario breakdown by tag</h4>
| </header>
|
| <div id="scenario-stats-div"></div>
|
| </div>
| </div>
|
| </div>
| </div>
|
| <div class="panel panel-default">
| <div class="panel-heading">
| <div class="row-fluid">
| <div class="col-md-6">
| <h3 class="panel-title">Test details</h3>
| </div>
|
| <div class="col-md-1">
| <label style=" margin-bottom: 0px;">Key:</label>
| </div>
| <div class="col-md-5">
|
| <label class="icon-key">
| <img class="key-img" src="img/PASSED.png" alt="Passed"> <span>Passed</span>
| </label>
|
| <label class="icon-key">
| <img class="key-img" src="img/FAILED.png" alt="Failure"> <span>Failed</span>
| </label>
|
|
| <label class="icon-key">
| <img class="key-img" src="img/CHILD_FAILED.png" alt="Child failed"> <span>Child failed</span>
| </label>
|
|
|
| <label class="icon-key">
| <img class="key-img" src="img/NOT_RUN2.png" alt="Not run"> <span>Not run</span>
| </label>
|
|
| <label class="icon-key">
| <img class="key-img" src="img/NON_CRITICAL_FAILURE.png" alt="Non critical failure"> <span>Non critical failure</span>
| </label>
|
|
| </div>
| </div>
| </div>
| <div class="panel-body">
|
| <div>
| <input id="hide-not-run-chk" type="checkbox"/>Hide not run
| </div>
|
|
| <noscript>
| <h3>Please enable Javascript to view Test details</h3>
| <p>Non java script variants of this report were not viable, sorry. We found that there was simply too much data to display and page load times were approaching unacceptable.</p>
| <p>Please enable javascript and reload this page</p>
| </noscript>
|
| <div id="test-detail" class="row-fluid">
|
| <div id="feature-tree" class="span7">
|
| </div>
|
| <div class="span5" id="detail-div-container">
| <div id="affix-marker" data-spy="affix" data-offset-top="200"></div>
| <div id="feature-detail" class="detail-div"></div>
| </div>
| </div>
| </div>
| </div>
|</div>
|
|<script type="text/javascript" src="js/jquery.min.js"></script>
|<script type="text/javascript" src="js/jquery-ui.min.js"></script>
|<script type="text/javascript" src="js/bootstrap.min.js"></script>
|<script type="text/javascript" src="js/datatables.min.js"></script>
|<script type="text/javascript" src="js/jstree.min.js"></script>
|<script type="text/javascript" src="js/substeps.js"></script>
|<script type="text/javascript" src="substeps-results-tree.js"></script>
|<script type="text/javascript" src="detail_data.js"></script>
|<script type="text/javascript" src="substeps-stats-by-tag.js"></script>
|
|
|</body>
|</html>
""".stripMargin
}
}
| Substeps/substeps-framework | core/src/main/scala/org/substeps/report/ReportFrame.scala | Scala | lgpl-3.0 | 11,724 |
package com.kjetland.jackson.jsonSchema.testDataScala
import com.fasterxml.jackson.annotation.JsonProperty
import com.kjetland.jackson.jsonSchema.annotations.JsonSchemaDefault
import com.kjetland.jackson.jsonSchema.annotations.JsonSchemaExamples;
case class DefaultAndExamples
(
@JsonSchemaExamples(Array("user@example.com"))
emailValue:String,
@JsonSchemaDefault("12")
@JsonSchemaExamples(Array("10", "14", "18"))
fontSize:Int,
@JsonProperty( defaultValue = "ds")
defaultStringViaJsonValue:String,
@JsonProperty( defaultValue = "1")
defaultIntViaJsonValue:Int,
@JsonProperty( defaultValue = "true")
defaultBoolViaJsonValue:Boolean
)
| mbknor/mbknor-jackson-jsonSchema | src/test/scala/com/kjetland/jackson/jsonSchema/testDataScala/DefaultAndExamples.scala | Scala | mit | 659 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import java.nio.ByteBuffer
import scala.collection.mutable
import scala.language.implicitConversions
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.streaming.Time
import org.apache.spark.streaming.util.{WriteAheadLog, WriteAheadLogUtils}
import org.apache.spark.util.{Clock, Utils}
import org.apache.spark.{Logging, SparkConf, SparkException}
/** Trait representing any event in the ReceivedBlockTracker that updates its state. */
private[streaming] sealed trait ReceivedBlockTrackerLogEvent
private[streaming] case class BlockAdditionEvent(receivedBlockInfo: ReceivedBlockInfo)
extends ReceivedBlockTrackerLogEvent
private[streaming] case class BatchAllocationEvent(time: Time, allocatedBlocks: AllocatedBlocks)
extends ReceivedBlockTrackerLogEvent
private[streaming] case class BatchCleanupEvent(times: Seq[Time])
extends ReceivedBlockTrackerLogEvent
/** Class representing the blocks of all the streams allocated to a batch */
private[streaming]
case class AllocatedBlocks(streamIdToAllocatedBlocks: Map[Int, Seq[ReceivedBlockInfo]]) {
def getBlocksOfStream(streamId: Int): Seq[ReceivedBlockInfo] = {
streamIdToAllocatedBlocks.getOrElse(streamId, Seq.empty)
}
}
/**
* Class that keep track of all the received blocks, and allocate them to batches
* when required. All actions taken by this class can be saved to a write ahead log
* (if a checkpoint directory has been provided), so that the state of the tracker
* (received blocks and block-to-batch allocations) can be recovered after driver failure.
*
* Note that when any instance of this class is created with a checkpoint directory,
* it will try reading events from logs in the directory.
*/
private[streaming] class ReceivedBlockTracker(
conf: SparkConf,
hadoopConf: Configuration,
streamIds: Seq[Int],
clock: Clock,
recoverFromWriteAheadLog: Boolean,
checkpointDirOption: Option[String])
extends Logging {
private type ReceivedBlockQueue = mutable.Queue[ReceivedBlockInfo]
private val streamIdToUnallocatedBlockQueues = new mutable.HashMap[Int, ReceivedBlockQueue]
private val timeToAllocatedBlocks = new mutable.HashMap[Time, AllocatedBlocks]
private val writeAheadLogOption = createWriteAheadLog()
private var lastAllocatedBatchTime: Time = null
// Recover block information from write ahead logs
if (recoverFromWriteAheadLog) {
recoverPastEvents()
}
/** Add received block. This event will get written to the write ahead log (if enabled). */
def addBlock(receivedBlockInfo: ReceivedBlockInfo): Boolean = synchronized {
try {
writeToLog(BlockAdditionEvent(receivedBlockInfo))
getReceivedBlockQueue(receivedBlockInfo.streamId) += receivedBlockInfo
logDebug(s"Stream ${receivedBlockInfo.streamId} received " +
s"block ${receivedBlockInfo.blockStoreResult.blockId}")
true
} catch {
case e: Exception =>
logError(s"Error adding block $receivedBlockInfo", e)
false
}
}
/**
* Allocate all unallocated blocks to the given batch.
* This event will get written to the write ahead log (if enabled).
*/
def allocateBlocksToBatch(batchTime: Time): Unit = synchronized {
if (lastAllocatedBatchTime == null || batchTime > lastAllocatedBatchTime) {
val streamIdToBlocks = streamIds.map { streamId =>
(streamId, getReceivedBlockQueue(streamId).dequeueAll(x => true))
}.toMap
val allocatedBlocks = AllocatedBlocks(streamIdToBlocks)
writeToLog(BatchAllocationEvent(batchTime, allocatedBlocks))
timeToAllocatedBlocks(batchTime) = allocatedBlocks
lastAllocatedBatchTime = batchTime
allocatedBlocks
} else {
// This situation occurs when:
// 1. WAL is ended with BatchAllocationEvent, but without BatchCleanupEvent,
// possibly processed batch job or half-processed batch job need to be processed again,
// so the batchTime will be equal to lastAllocatedBatchTime.
// 2. Slow checkpointing makes recovered batch time older than WAL recovered
// lastAllocatedBatchTime.
// This situation will only occurs in recovery time.
logInfo(s"Possibly processed batch $batchTime need to be processed again in WAL recovery")
}
}
/** Get the blocks allocated to the given batch. */
def getBlocksOfBatch(batchTime: Time): Map[Int, Seq[ReceivedBlockInfo]] = synchronized {
timeToAllocatedBlocks.get(batchTime).map { _.streamIdToAllocatedBlocks }.getOrElse(Map.empty)
}
/** Get the blocks allocated to the given batch and stream. */
def getBlocksOfBatchAndStream(batchTime: Time, streamId: Int): Seq[ReceivedBlockInfo] = {
synchronized {
timeToAllocatedBlocks.get(batchTime).map {
_.getBlocksOfStream(streamId)
}.getOrElse(Seq.empty)
}
}
/** Check if any blocks are left to be allocated to batches. */
def hasUnallocatedReceivedBlocks: Boolean = synchronized {
!streamIdToUnallocatedBlockQueues.values.forall(_.isEmpty)
}
/**
* Get blocks that have been added but not yet allocated to any batch. This method
* is primarily used for testing.
*/
def getUnallocatedBlocks(streamId: Int): Seq[ReceivedBlockInfo] = synchronized {
getReceivedBlockQueue(streamId).toSeq
}
/**
* Clean up block information of old batches. If waitForCompletion is true, this method
* returns only after the files are cleaned up.
*/
def cleanupOldBatches(cleanupThreshTime: Time, waitForCompletion: Boolean): Unit = synchronized {
require(cleanupThreshTime.milliseconds < clock.getTimeMillis())
val timesToCleanup = timeToAllocatedBlocks.keys.filter { _ < cleanupThreshTime }.toSeq
logInfo("Deleting batches " + timesToCleanup)
writeToLog(BatchCleanupEvent(timesToCleanup))
timeToAllocatedBlocks --= timesToCleanup
writeAheadLogOption.foreach(_.clean(cleanupThreshTime.milliseconds, waitForCompletion))
}
/** Stop the block tracker. */
def stop() {
writeAheadLogOption.foreach { _.close() }
}
/**
* Recover all the tracker actions from the write ahead logs to recover the state (unallocated
* and allocated block info) prior to failure.
*/
private def recoverPastEvents(): Unit = synchronized {
// Insert the recovered block information
def insertAddedBlock(receivedBlockInfo: ReceivedBlockInfo) {
logTrace(s"Recovery: Inserting added block $receivedBlockInfo")
receivedBlockInfo.setBlockIdInvalid()
getReceivedBlockQueue(receivedBlockInfo.streamId) += receivedBlockInfo
}
// Insert the recovered block-to-batch allocations and clear the queue of received blocks
// (when the blocks were originally allocated to the batch, the queue must have been cleared).
def insertAllocatedBatch(batchTime: Time, allocatedBlocks: AllocatedBlocks) {
logTrace(s"Recovery: Inserting allocated batch for time $batchTime to " +
s"${allocatedBlocks.streamIdToAllocatedBlocks}")
streamIdToUnallocatedBlockQueues.values.foreach { _.clear() }
lastAllocatedBatchTime = batchTime
timeToAllocatedBlocks.put(batchTime, allocatedBlocks)
}
// Cleanup the batch allocations
def cleanupBatches(batchTimes: Seq[Time]) {
logTrace(s"Recovery: Cleaning up batches $batchTimes")
timeToAllocatedBlocks --= batchTimes
}
writeAheadLogOption.foreach { writeAheadLog =>
logInfo(s"Recovering from write ahead logs in ${checkpointDirOption.get}")
import scala.collection.JavaConversions._
writeAheadLog.readAll().foreach { byteBuffer =>
logTrace("Recovering record " + byteBuffer)
Utils.deserialize[ReceivedBlockTrackerLogEvent](byteBuffer.array) match {
case BlockAdditionEvent(receivedBlockInfo) =>
insertAddedBlock(receivedBlockInfo)
case BatchAllocationEvent(time, allocatedBlocks) =>
insertAllocatedBatch(time, allocatedBlocks)
case BatchCleanupEvent(batchTimes) =>
cleanupBatches(batchTimes)
}
}
}
}
/** Write an update to the tracker to the write ahead log */
private def writeToLog(record: ReceivedBlockTrackerLogEvent) {
if (isWriteAheadLogEnabled) {
logDebug(s"Writing to log $record")
writeAheadLogOption.foreach { logManager =>
logManager.write(ByteBuffer.wrap(Utils.serialize(record)), clock.getTimeMillis())
}
}
}
/** Get the queue of received blocks belonging to a particular stream */
private def getReceivedBlockQueue(streamId: Int): ReceivedBlockQueue = {
streamIdToUnallocatedBlockQueues.getOrElseUpdate(streamId, new ReceivedBlockQueue)
}
/** Optionally create the write ahead log manager only if the feature is enabled */
private def createWriteAheadLog(): Option[WriteAheadLog] = {
checkpointDirOption.map { checkpointDir =>
val logDir = ReceivedBlockTracker.checkpointDirToLogDir(checkpointDirOption.get)
WriteAheadLogUtils.createLogForDriver(conf, logDir, hadoopConf)
}
}
/** Check if the write ahead log is enabled. This is only used for testing purposes. */
private[streaming] def isWriteAheadLogEnabled: Boolean = writeAheadLogOption.nonEmpty
}
private[streaming] object ReceivedBlockTracker {
def checkpointDirToLogDir(checkpointDir: String): String = {
new Path(checkpointDir, "receivedBlockMetadata").toString
}
}
| andrewor14/iolap | streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceivedBlockTracker.scala | Scala | apache-2.0 | 10,248 |
class A {
def m(x: Int, y: Int, x: Boolean) { }
}
object Main { def main(args: Array[String]) { } }
| tobast/compil-petitscala | tests/typing/bad/testfile-multiple_parameters2-1.scala | Scala | gpl-3.0 | 104 |
/**
* Copyright (C) 2016 Nicola Justus <nicola.justus@mni.thm.de>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package de.thm.move.util
/** A monad for calculating values which could return a value with warnings.
* @tparam A the type of the value
* @tparam E the type of the warning
*/
sealed trait Validation[+A, +E] {
/** Maps the value with the given function fn, copying the warnings from this Validation to the
* returning Validation.
*/
def map[B](fn: A => B): Validation[B, E]
/** Combines this Validation with the Validation returned from calling fn.
*
* fn is called with the value from this Validation. If this Validation and the returning Validation
* from fn contains warnings they will be joined.
*/
def flatMap[B, EE >: E](fn: A => Validation[B,EE]): Validation[B, EE]
/** Returns the value of this Validation */
def getValue:A
/** Returns the warnings of this Validation */
def getWarnings:List[E]
}
/** A validation without a warning. */
case class ValidationSuccess[A](value:A) extends Validation[A, Nothing] {
def map[B](fn: A => B): Validation[B, Nothing] = ValidationSuccess(fn(value))
def flatMap[B, EE >: Nothing](fn: A => Validation[B,EE]): Validation[B, EE] = fn(value)
def getValue:A = value
def getWarnings:List[Nothing] = Nil
}
/** A validation with warnings. */
case class ValidationWarning[A, E](value:A, warnings:List[E]) extends Validation[A, E] {
def map[B](fn: A => B): Validation[B, E] = ValidationWarning(fn(value), warnings)
def flatMap[B, EE >: E](fn: A => Validation[B,EE]): Validation[B, EE] = fn(value) match {
case ValidationSuccess(v) => ValidationWarning(v, warnings)
case ValidationWarning(v, xs) => ValidationWarning(v, warnings ++ xs)
}
def getValue:A = value
def getWarnings:List[E] = warnings
}
object Validation {
/** Returns a ValidationSuccess with the given value a */
def apply[A, E](a:A):Validation[A,E] = ValidationSuccess(a)
}
object ValidationWarning {
/** Returns a ValidationWarning with the given value a and the given warning w wrapped inside a list. */
def apply[A, E](a:A, w:E):ValidationWarning[A,E] = ValidationWarning(a, List(w))
}
| THM-MoTE/MoVE | src/main/scala/de/thm/move/util/Validation.scala | Scala | mpl-2.0 | 2,348 |
package breeze
import breeze.linalg._
package object integrate {
def trapezoid(f: Double => Double, start: Double, end: Double, nodes: Int): Double = {
if (nodes < 2)
throw new Exception("When using trapezoid, you have to use at least two nodes.")
val h = (end - start) / (nodes - 1)
val s = sum(for (i <- 0 until nodes) yield f(start + i * h))
h * (s - (f(start) + f(end)) / 2.0)
}
def simpson(f: Double => Double, start: Double, end: Double, nodes: Int): Double = {
if (nodes < 2)
throw new Exception("When using simpson, you have to use at least two nodes.")
val h = (end - start) / (nodes - 1)
val s = sum(for (i <- 0 until nodes - 1) yield f(start + (i + 0.5) * h))
trapezoid(f, start, end, nodes) / 3.0 + s * 2 / 3.0 * h
}
/*
* ODE functions return a sequence of states corresponding to each value in t.
*
* @param f a first order differential equation of the form dy = f(y, t)
* @param y0 the initial values of the state y at initial time t(0)
* @param t the times at which a calculation of state y is desired
* @param relTol relative error tolerance values, must be same length as y0
* @param absTol absolute error tolerance values, must be same length as y0
*/
def ode45(
f: (DenseVector[Double], Double) => DenseVector[Double],
y0: DenseVector[Double],
t: Array[Double],
relTol: DenseVector[Double] = null,
absTol: DenseVector[Double] = null
): Array[DenseVector[Double]] = {
val integrator = new DormandPrince54Integrator(0.0, 1.0, relTol, absTol)
integrator.integrate(f, y0, t)
}
}
| scalanlp/breeze | math/src/main/scala/breeze/integrate/package.scala | Scala | apache-2.0 | 1,625 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import java.util.Properties
import java.util.concurrent.{CountDownLatch, LinkedBlockingQueue}
import com.yammer.metrics.Metrics
import com.yammer.metrics.core.Timer
import kafka.api.LeaderAndIsr
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.TestUtils
import kafka.zk._
import org.junit.{After, Before, Test}
import org.junit.Assert.{assertEquals, assertTrue}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.{ControllerMovedException, StaleBrokerEpochException}
import org.apache.log4j.Level
import kafka.utils.LogCaptureAppender
import org.apache.kafka.common.metrics.KafkaMetric
import org.scalatest.Assertions.fail
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.Seq
import scala.util.{Failure, Success, Try}
class ControllerIntegrationTest extends ZooKeeperTestHarness {
var servers = Seq.empty[KafkaServer]
val firstControllerEpoch = KafkaController.InitialControllerEpoch + 1
val firstControllerEpochZkVersion = KafkaController.InitialControllerEpochZkVersion + 1
@Before
override def setUp(): Unit = {
super.setUp
servers = Seq.empty[KafkaServer]
}
@After
override def tearDown(): Unit = {
TestUtils.shutdownServers(servers)
super.tearDown
}
@Test
def testEmptyCluster(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkClient.getControllerId.isDefined, "failed to elect a controller")
waitUntilControllerEpoch(firstControllerEpoch, "broker failed to set controller epoch")
}
@Test
def testControllerEpochPersistsWhenAllBrokersDown(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkClient.getControllerId.isDefined, "failed to elect a controller")
waitUntilControllerEpoch(firstControllerEpoch, "broker failed to set controller epoch")
servers.head.shutdown()
servers.head.awaitShutdown()
TestUtils.waitUntilTrue(() => !zkClient.getControllerId.isDefined, "failed to kill controller")
waitUntilControllerEpoch(firstControllerEpoch, "controller epoch was not persisted after broker failure")
}
@Test
def testControllerMoveIncrementsControllerEpoch(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilTrue(() => zkClient.getControllerId.isDefined, "failed to elect a controller")
waitUntilControllerEpoch(firstControllerEpoch, "broker failed to set controller epoch")
servers.head.shutdown()
servers.head.awaitShutdown()
servers.head.startup()
TestUtils.waitUntilTrue(() => zkClient.getControllerId.isDefined, "failed to elect a controller")
waitUntilControllerEpoch(firstControllerEpoch + 1, "controller epoch was not incremented after controller move")
}
@Test
def testMetadataPropagationOnControlPlane(): Unit = {
servers = makeServers(1,
listeners = Some("PLAINTEXT://localhost:0,CONTROLLER://localhost:0"),
listenerSecurityProtocolMap = Some("PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT"),
controlPlaneListenerName = Some("CONTROLLER"))
TestUtils.waitUntilBrokerMetadataIsPropagated(servers)
val controlPlaneMetricMap = mutable.Map[String, KafkaMetric]()
val dataPlaneMetricMap = mutable.Map[String, KafkaMetric]()
servers.head.metrics.metrics().values().asScala.foreach { kafkaMetric =>
if (kafkaMetric.metricName().tags().values().contains("CONTROLLER")) {
controlPlaneMetricMap.put(kafkaMetric.metricName().name(), kafkaMetric)
}
if (kafkaMetric.metricName().tags().values().contains("PLAINTEXT")) {
dataPlaneMetricMap.put(kafkaMetric.metricName().name(), kafkaMetric)
}
}
assertEquals(1e-0, controlPlaneMetricMap("response-total").metricValue().asInstanceOf[Double], 0)
assertEquals(0e-0, dataPlaneMetricMap("response-total").metricValue().asInstanceOf[Double], 0)
assertEquals(1e-0, controlPlaneMetricMap("request-total").metricValue().asInstanceOf[Double], 0)
assertEquals(0e-0, dataPlaneMetricMap("request-total").metricValue().asInstanceOf[Double], 0)
assertTrue(controlPlaneMetricMap("incoming-byte-total").metricValue().asInstanceOf[Double] > 1.0)
assertTrue(dataPlaneMetricMap("incoming-byte-total").metricValue().asInstanceOf[Double] == 0.0)
assertTrue(controlPlaneMetricMap("network-io-total").metricValue().asInstanceOf[Double] == 2.0)
assertTrue(dataPlaneMetricMap("network-io-total").metricValue().asInstanceOf[Double] == 0.0)
}
// This test case is used to ensure that there will be no correctness issue after we avoid sending out full
// UpdateMetadataRequest to all brokers in the cluster
@Test
def testMetadataPropagationOnBrokerChange(): Unit = {
servers = makeServers(3)
TestUtils.waitUntilBrokerMetadataIsPropagated(servers)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
// Need to make sure the broker we shutdown and startup are not the controller. Otherwise we will send out
// full UpdateMetadataReuqest to all brokers during controller failover.
val testBroker = servers.filter(e => e.config.brokerId != controllerId).head
val remainingBrokers = servers.filter(_.config.brokerId != testBroker.config.brokerId)
val topic = "topic1"
// Make sure shutdown the test broker will not require any leadership change to test avoid sending out full
// UpdateMetadataRequest on broker failure
val assignment = Map(
0 -> Seq(remainingBrokers(0).config.brokerId, testBroker.config.brokerId),
1 -> remainingBrokers.map(_.config.brokerId))
// Create topic
TestUtils.createTopic(zkClient, topic, assignment, servers)
// Shutdown the broker
testBroker.shutdown()
testBroker.awaitShutdown()
TestUtils.waitUntilBrokerMetadataIsPropagated(remainingBrokers)
remainingBrokers.foreach { server =>
val offlineReplicaPartitionInfo = server.metadataCache.getPartitionInfo(topic, 0).get
assertEquals(1, offlineReplicaPartitionInfo.offlineReplicas.size())
assertEquals(testBroker.config.brokerId, offlineReplicaPartitionInfo.offlineReplicas.get(0))
assertEquals(assignment(0).asJava, offlineReplicaPartitionInfo.replicas)
assertEquals(Seq(remainingBrokers.head.config.brokerId).asJava, offlineReplicaPartitionInfo.isr)
val onlinePartitionInfo = server.metadataCache.getPartitionInfo(topic, 1).get
assertEquals(assignment(1).asJava, onlinePartitionInfo.replicas)
assertTrue(onlinePartitionInfo.offlineReplicas.isEmpty)
}
// Startup the broker
testBroker.startup()
TestUtils.waitUntilTrue( () => {
!servers.exists { server =>
assignment.exists { case (partitionId, replicas) =>
val partitionInfoOpt = server.metadataCache.getPartitionInfo(topic, partitionId)
if (partitionInfoOpt.isDefined) {
val partitionInfo = partitionInfoOpt.get
!partitionInfo.offlineReplicas.isEmpty || !partitionInfo.replicas.asScala.equals(replicas)
} else {
true
}
}
}
}, "Inconsistent metadata after broker startup")
}
@Test
def testMetadataPropagationForOfflineReplicas(): Unit = {
servers = makeServers(3)
TestUtils.waitUntilBrokerMetadataIsPropagated(servers)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
//get brokerId for topic creation with single partition and RF =1
val replicaBroker = servers.filter(e => e.config.brokerId != controllerId).head
val controllerBroker = servers.filter(e => e.config.brokerId == controllerId).head
val otherBroker = servers.filter(e => e.config.brokerId != controllerId &&
e.config.brokerId != replicaBroker.config.brokerId).head
val topic = "topic1"
val assignment = Map(0 -> Seq(replicaBroker.config.brokerId))
// Create topic
TestUtils.createTopic(zkClient, topic, assignment, servers)
// Shutdown the other broker
otherBroker.shutdown()
otherBroker.awaitShutdown()
// Shutdown the broker with replica
replicaBroker.shutdown()
replicaBroker.awaitShutdown()
//Shutdown controller broker
controllerBroker.shutdown()
controllerBroker.awaitShutdown()
def verifyMetadata(broker: KafkaServer): Unit = {
broker.startup()
TestUtils.waitUntilTrue(() => {
val partitionInfoOpt = broker.metadataCache.getPartitionInfo(topic, 0)
if (partitionInfoOpt.isDefined) {
val partitionInfo = partitionInfoOpt.get
(!partitionInfo.offlineReplicas.isEmpty && partitionInfo.leader == -1
&& !partitionInfo.replicas.isEmpty && !partitionInfo.isr.isEmpty)
} else {
false
}
}, "Inconsistent metadata after broker startup")
}
//Start controller broker and check metadata
verifyMetadata(controllerBroker)
//Start other broker and check metadata
verifyMetadata(otherBroker)
}
@Test
def testTopicCreation(): Unit = {
servers = makeServers(1)
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, firstControllerEpoch, 0, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
}
@Test
def testTopicCreationWithOfflineReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers.take(1))
waitForPartitionState(tp, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
}
@Test
def testTopicPartitionExpansion(): Unit = {
servers = makeServers(1)
val tp0 = new TopicPartition("t", 0)
val tp1 = new TopicPartition("t", 1)
val assignment = Map(tp0.partition -> Seq(0))
val expandedAssignment = Map(
tp0 -> ReplicaAssignment(Seq(0), Seq(), Seq()),
tp1 -> ReplicaAssignment(Seq(0), Seq(), Seq()))
TestUtils.createTopic(zkClient, tp0.topic, partitionReplicaAssignment = assignment, servers = servers)
zkClient.setTopicAssignment(tp0.topic, expandedAssignment, firstControllerEpochZkVersion)
waitForPartitionState(tp1, firstControllerEpoch, 0, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic partition expansion")
TestUtils.waitUntilMetadataIsPropagated(servers, tp1.topic, tp1.partition)
}
@Test
def testTopicPartitionExpansionWithOfflineReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp0 = new TopicPartition("t", 0)
val tp1 = new TopicPartition("t", 1)
val assignment = Map(tp0.partition -> Seq(otherBrokerId, controllerId))
val expandedAssignment = Map(
tp0 -> ReplicaAssignment(Seq(otherBrokerId, controllerId), Seq(), Seq()),
tp1 -> ReplicaAssignment(Seq(otherBrokerId, controllerId), Seq(), Seq()))
TestUtils.createTopic(zkClient, tp0.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkClient.setTopicAssignment(tp0.topic, expandedAssignment, firstControllerEpochZkVersion)
waitForPartitionState(tp1, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic partition expansion")
TestUtils.waitUntilMetadataIsPropagated(Seq(servers(controllerId)), tp1.topic, tp1.partition)
}
@Test
def testPartitionReassignment(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val metricName = s"kafka.controller:type=ControllerStats,name=${ControllerState.AlterPartitionReassignment.rateAndTimeMetricName.get}"
val timerCount = timer(metricName).count
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> ReplicaAssignment(Seq(otherBrokerId), List(), List()))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
zkClient.createPartitionReassignment(reassignment.mapValues(_.replicas).toMap)
waitForPartitionState(tp, firstControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 3,
"failed to get expected partition state after partition reassignment")
TestUtils.waitUntilTrue(() => zkClient.getFullReplicaAssignmentForTopics(Set(tp.topic)) == reassignment,
"failed to get updated partition assignment on topic znode after partition reassignment")
TestUtils.waitUntilTrue(() => !zkClient.reassignPartitionsInProgress(),
"failed to remove reassign partitions path after completion")
val updatedTimerCount = timer(metricName).count
assertTrue(s"Timer count $updatedTimerCount should be greater than $timerCount", updatedTimerCount > timerCount)
}
@Test
def testPartitionReassignmentWithOfflineReplicaHaltingProgress(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> Seq(otherBrokerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
val controller = getController()
zkClient.setOrCreatePartitionReassignment(reassignment, controller.kafkaController.controllerContext.epochZkVersion)
waitForPartitionState(tp, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state during partition reassignment with offline replica")
TestUtils.waitUntilTrue(() => zkClient.reassignPartitionsInProgress(),
"partition reassignment path should remain while reassignment in progress")
}
@Test
def testPartitionReassignmentResumesAfterReplicaComesOnline(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(controllerId))
val reassignment = Map(tp -> ReplicaAssignment(Seq(otherBrokerId), List(), List()))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkClient.createPartitionReassignment(reassignment.mapValues(_.replicas).toMap)
waitForPartitionState(tp, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state during partition reassignment with offline replica")
servers(otherBrokerId).startup()
waitForPartitionState(tp, firstControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 4,
"failed to get expected partition state after partition reassignment")
TestUtils.waitUntilTrue(() => zkClient.getFullReplicaAssignmentForTopics(Set(tp.topic)) == reassignment,
"failed to get updated partition assignment on topic znode after partition reassignment")
TestUtils.waitUntilTrue(() => !zkClient.reassignPartitionsInProgress(),
"failed to remove reassign partitions path after completion")
}
@Test
def testPreferredReplicaLeaderElection(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBroker = servers.find(_.config.brokerId != controllerId).get
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBroker.config.brokerId, controllerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
preferredReplicaLeaderElection(controllerId, otherBroker, tp, assignment(tp.partition).toSet, LeaderAndIsr.initialLeaderEpoch)
}
@Test
def testBackToBackPreferredReplicaLeaderElections(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBroker = servers.find(_.config.brokerId != controllerId).get
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBroker.config.brokerId, controllerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
preferredReplicaLeaderElection(controllerId, otherBroker, tp, assignment(tp.partition).toSet, LeaderAndIsr.initialLeaderEpoch)
preferredReplicaLeaderElection(controllerId, otherBroker, tp, assignment(tp.partition).toSet, LeaderAndIsr.initialLeaderEpoch + 2)
}
@Test
def testPreferredReplicaLeaderElectionWithOfflinePreferredReplica(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId, controllerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
zkClient.createPreferredReplicaElection(Set(tp))
TestUtils.waitUntilTrue(() => !zkClient.pathExists(PreferredReplicaElectionZNode.path),
"failed to remove preferred replica leader election path after giving up")
waitForPartitionState(tp, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
}
@Test
def testAutoPreferredReplicaLeaderElection(): Unit = {
servers = makeServers(2, autoLeaderRebalanceEnable = true)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(1, 0))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
waitForPartitionState(tp, firstControllerEpoch, controllerId, LeaderAndIsr.initialLeaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
servers(otherBrokerId).startup()
waitForPartitionState(tp, firstControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch + 2,
"failed to get expected partition state upon broker startup")
}
@Test
def testLeaderAndIsrWhenEntireIsrOfflineAndUncleanLeaderElectionDisabled(): Unit = {
servers = makeServers(2)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, firstControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
servers(otherBrokerId).shutdown()
servers(otherBrokerId).awaitShutdown()
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkClient.getTopicPartitionStates(Seq(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), firstControllerEpoch, LeaderAndIsr.NoLeader, LeaderAndIsr.initialLeaderEpoch + 1) &&
leaderIsrAndControllerEpochMap(tp).leaderAndIsr.isr == List(otherBrokerId)
}, "failed to get expected partition state after entire isr went offline")
}
@Test
def testLeaderAndIsrWhenEntireIsrOfflineAndUncleanLeaderElectionEnabled(): Unit = {
servers = makeServers(2, uncleanLeaderElectionEnable = true)
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
val otherBrokerId = servers.map(_.config.brokerId).filter(_ != controllerId).head
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(otherBrokerId))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, firstControllerEpoch, otherBrokerId, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
servers(1).shutdown()
servers(1).awaitShutdown()
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkClient.getTopicPartitionStates(Seq(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), firstControllerEpoch, LeaderAndIsr.NoLeader, LeaderAndIsr.initialLeaderEpoch + 1) &&
leaderIsrAndControllerEpochMap(tp).leaderAndIsr.isr == List(otherBrokerId)
}, "failed to get expected partition state after entire isr went offline")
}
@Test
def testControlledShutdown(): Unit = {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
val partition = 0
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// create the topic
TestUtils.createTopic(zkClient, topic, partitionReplicaAssignment = expectedReplicaAssignment, servers = servers)
val controllerId = zkClient.getControllerId.get
val controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
val resultQueue = new LinkedBlockingQueue[Try[collection.Set[TopicPartition]]]()
val controlledShutdownCallback = (controlledShutdownResult: Try[collection.Set[TopicPartition]]) => resultQueue.put(controlledShutdownResult)
controller.controlledShutdown(2, servers.find(_.config.brokerId == 2).get.kafkaController.brokerEpoch, controlledShutdownCallback)
var partitionsRemaining = resultQueue.take().get
var activeServers = servers.filter(s => s.config.brokerId != 2)
// wait for the update metadata request to trickle to the brokers
TestUtils.waitUntilTrue(() =>
activeServers.forall(_.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic,partition).get.isr.size != 3),
"Topic test not created after timeout")
assertEquals(0, partitionsRemaining.size)
var partitionStateInfo = activeServers.head.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic,partition).get
var leaderAfterShutdown = partitionStateInfo.leader
assertEquals(0, leaderAfterShutdown)
assertEquals(2, partitionStateInfo.isr.size)
assertEquals(List(0,1), partitionStateInfo.isr.asScala)
controller.controlledShutdown(1, servers.find(_.config.brokerId == 1).get.kafkaController.brokerEpoch, controlledShutdownCallback)
partitionsRemaining = resultQueue.take() match {
case Success(partitions) => partitions
case Failure(exception) => fail("Controlled shutdown failed due to error", exception)
}
assertEquals(0, partitionsRemaining.size)
activeServers = servers.filter(s => s.config.brokerId == 0)
partitionStateInfo = activeServers.head.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic,partition).get
leaderAfterShutdown = partitionStateInfo.leader
assertEquals(0, leaderAfterShutdown)
assertTrue(servers.forall(_.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic,partition).get.leader == 0))
controller.controlledShutdown(0, servers.find(_.config.brokerId == 0).get.kafkaController.brokerEpoch, controlledShutdownCallback)
partitionsRemaining = resultQueue.take().get
assertEquals(1, partitionsRemaining.size)
// leader doesn't change since all the replicas are shut down
assertTrue(servers.forall(_.dataPlaneRequestProcessor.metadataCache.getPartitionInfo(topic,partition).get.leader == 0))
}
@Test
def testControllerRejectControlledShutdownRequestWithStaleBrokerEpoch(): Unit = {
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(2, zkConnect, false).map(KafkaConfig.fromProps)
servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
val controller = getController().kafkaController
val otherBroker = servers.find(e => e.config.brokerId != controller.config.brokerId).get
@volatile var staleBrokerEpochDetected = false
controller.controlledShutdown(otherBroker.config.brokerId, otherBroker.kafkaController.brokerEpoch - 1, {
case scala.util.Failure(exception) if exception.isInstanceOf[StaleBrokerEpochException] => staleBrokerEpochDetected = true
case _ =>
})
TestUtils.waitUntilTrue(() => staleBrokerEpochDetected, "Fail to detect stale broker epoch")
}
@Test
def testControllerMoveOnTopicCreation(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilControllerElected(zkClient)
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
testControllerMove(() => {
val adminZkClient = new AdminZkClient(zkClient)
adminZkClient.createTopicWithAssignment(tp.topic, config = new Properties(), assignment)
})
}
@Test
def testControllerMoveOnTopicDeletion(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilControllerElected(zkClient)
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
TestUtils.createTopic(zkClient, tp.topic(), assignment, servers)
testControllerMove(() => {
val adminZkClient = new AdminZkClient(zkClient)
adminZkClient.deleteTopic(tp.topic())
})
}
@Test
def testControllerMoveOnPreferredReplicaElection(): Unit = {
servers = makeServers(1)
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
TestUtils.createTopic(zkClient, tp.topic(), assignment, servers)
testControllerMove(() => zkClient.createPreferredReplicaElection(Set(tp)))
}
@Test
def testControllerMoveOnPartitionReassignment(): Unit = {
servers = makeServers(1)
TestUtils.waitUntilControllerElected(zkClient)
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0))
TestUtils.createTopic(zkClient, tp.topic(), assignment, servers)
val reassignment = Map(tp -> Seq(0))
testControllerMove(() => zkClient.createPartitionReassignment(reassignment))
}
@Test
def testControllerDetectsBouncedBrokers(): Unit = {
servers = makeServers(2, enableControlledShutdown = false)
val controller = getController().kafkaController
val otherBroker = servers.find(e => e.config.brokerId != controller.config.brokerId).get
// Create a topic
val tp = new TopicPartition("t", 0)
val assignment = Map(tp.partition -> Seq(0, 1))
TestUtils.createTopic(zkClient, tp.topic, partitionReplicaAssignment = assignment, servers = servers)
waitForPartitionState(tp, firstControllerEpoch, 0, LeaderAndIsr.initialLeaderEpoch,
"failed to get expected partition state upon topic creation")
// Wait until the event thread is idle
TestUtils.waitUntilTrue(() => {
controller.eventManager.state == ControllerState.Idle
}, "Controller event thread is still busy")
val latch = new CountDownLatch(1)
// Let the controller event thread await on a latch until broker bounce finishes.
// This is used to simulate fast broker bounce
controller.eventManager.put(new MockEvent(ControllerState.TopicChange) {
override def process(): Unit = latch.await()
})
otherBroker.shutdown()
otherBroker.startup()
assertEquals(0, otherBroker.replicaManager.partitionCount.value())
// Release the latch so that controller can process broker change event
latch.countDown()
TestUtils.waitUntilTrue(() => {
otherBroker.replicaManager.partitionCount.value() == 1 &&
otherBroker.replicaManager.metadataCache.getAllTopics().size == 1 &&
otherBroker.replicaManager.metadataCache.getAliveBrokers.size == 2
}, "Broker fail to initialize after restart")
}
private def testControllerMove(fun: () => Unit): Unit = {
val controller = getController().kafkaController
val appender = LogCaptureAppender.createAndRegister()
val previousLevel = LogCaptureAppender.setClassLoggerLevel(controller.getClass, Level.INFO)
try {
TestUtils.waitUntilTrue(() => {
controller.eventManager.state == ControllerState.Idle
}, "Controller event thread is still busy")
val latch = new CountDownLatch(1)
// Let the controller event thread await on a latch before the pre-defined logic is triggered.
// This is used to make sure that when the event thread resumes and starts processing events, the controller has already moved.
controller.eventManager.put(new MockEvent(ControllerState.TopicChange) {
override def process(): Unit = latch.await()
})
// Execute pre-defined logic. This can be topic creation/deletion, preferred leader election, etc.
fun()
// Delete the controller path, re-create /controller znode to emulate controller movement
zkClient.deleteController(controller.controllerContext.epochZkVersion)
zkClient.registerControllerAndIncrementControllerEpoch(servers.size)
// Resume the controller event thread. At this point, the controller should see mismatch controller epoch zkVersion and resign
latch.countDown()
TestUtils.waitUntilTrue(() => !controller.isActive, "Controller fails to resign")
// Expect to capture the ControllerMovedException in the log of ControllerEventThread
val event = appender.getMessages.find(e => e.getLevel == Level.INFO
&& e.getThrowableInformation != null
&& e.getThrowableInformation.getThrowable.getClass.getName.equals(classOf[ControllerMovedException].getName))
assertTrue(event.isDefined)
} finally {
LogCaptureAppender.unregister(appender)
LogCaptureAppender.setClassLoggerLevel(controller.eventManager.thread.getClass, previousLevel)
}
}
private def preferredReplicaLeaderElection(controllerId: Int, otherBroker: KafkaServer, tp: TopicPartition,
replicas: Set[Int], leaderEpoch: Int): Unit = {
otherBroker.shutdown()
otherBroker.awaitShutdown()
waitForPartitionState(tp, firstControllerEpoch, controllerId, leaderEpoch + 1,
"failed to get expected partition state upon broker shutdown")
otherBroker.startup()
TestUtils.waitUntilTrue(() => zkClient.getInSyncReplicasForPartition(new TopicPartition(tp.topic, tp.partition)).get.toSet == replicas, "restarted broker failed to join in-sync replicas")
zkClient.createPreferredReplicaElection(Set(tp))
TestUtils.waitUntilTrue(() => !zkClient.pathExists(PreferredReplicaElectionZNode.path),
"failed to remove preferred replica leader election path after completion")
waitForPartitionState(tp, firstControllerEpoch, otherBroker.config.brokerId, leaderEpoch + 2,
"failed to get expected partition state upon broker startup")
}
private def waitUntilControllerEpoch(epoch: Int, message: String): Unit = {
TestUtils.waitUntilTrue(() => zkClient.getControllerEpoch.map(_._1).contains(epoch) , message)
}
private def waitForPartitionState(tp: TopicPartition,
controllerEpoch: Int,
leader: Int,
leaderEpoch: Int,
message: String): Unit = {
TestUtils.waitUntilTrue(() => {
val leaderIsrAndControllerEpochMap = zkClient.getTopicPartitionStates(Seq(tp))
leaderIsrAndControllerEpochMap.contains(tp) &&
isExpectedPartitionState(leaderIsrAndControllerEpochMap(tp), controllerEpoch, leader, leaderEpoch)
}, message)
}
private def isExpectedPartitionState(leaderIsrAndControllerEpoch: LeaderIsrAndControllerEpoch,
controllerEpoch: Int,
leader: Int,
leaderEpoch: Int) =
leaderIsrAndControllerEpoch.controllerEpoch == controllerEpoch &&
leaderIsrAndControllerEpoch.leaderAndIsr.leader == leader &&
leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch == leaderEpoch
private def makeServers(numConfigs: Int,
autoLeaderRebalanceEnable: Boolean = false,
uncleanLeaderElectionEnable: Boolean = false,
enableControlledShutdown: Boolean = true,
listeners : Option[String] = None,
listenerSecurityProtocolMap : Option[String] = None,
controlPlaneListenerName : Option[String] = None) = {
val configs = TestUtils.createBrokerConfigs(numConfigs, zkConnect, enableControlledShutdown = enableControlledShutdown)
configs.foreach { config =>
config.setProperty(KafkaConfig.AutoLeaderRebalanceEnableProp, autoLeaderRebalanceEnable.toString)
config.setProperty(KafkaConfig.UncleanLeaderElectionEnableProp, uncleanLeaderElectionEnable.toString)
config.setProperty(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp, "1")
listeners.foreach(listener => config.setProperty(KafkaConfig.ListenersProp, listener))
listenerSecurityProtocolMap.foreach(listenerMap => config.setProperty(KafkaConfig.ListenerSecurityProtocolMapProp, listenerMap))
controlPlaneListenerName.foreach(controlPlaneListener => config.setProperty(KafkaConfig.ControlPlaneListenerNameProp, controlPlaneListener))
}
configs.map(config => TestUtils.createServer(KafkaConfig.fromProps(config)))
}
private def timer(metricName: String): Timer = {
Metrics.defaultRegistry.allMetrics.asScala.filterKeys(_.getMBeanName == metricName).values.headOption
.getOrElse(fail(s"Unable to find metric $metricName")).asInstanceOf[Timer]
}
private def getController(): KafkaServer = {
val controllerId = TestUtils.waitUntilControllerElected(zkClient)
servers.filter(s => s.config.brokerId == controllerId).head
}
}
| noslowerdna/kafka | core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala | Scala | apache-2.0 | 36,005 |
package com.equalinformation.scala.programs.algo
/**
* Created by bpupadhyaya on 6/16/16.
*/
object Factorial {
def main(args: Array[String]): Unit = {
println(factorial(5))
}
def factorial(x: BigInt): BigInt = {
if (x == 0) 1 else
x * factorial(x - 1)
}
}
| bpupadhyaya/scala-programs-collection | scala-programs-collection/src/main/scala/com/equalinformation/scala/programs/algo/Factorial.scala | Scala | apache-2.0 | 283 |
import com.example.FactoryBoy
import org.scalatest._
class FactorySpec extends FlatSpec with Matchers {
"FactoryBoy" should "calc squares " in {
val squares:(Int => Int) = {x => x*x}
val sqFactory = FactoryBoy(squares)
sqFactory.calc(3) should be (9)
}
"FactoryBoy" should "calc cubes " in {
val cubes:(Int => Int) = {x => x*x*x}
val cbFactory = FactoryBoy(cubes)
cbFactory.calc(3) should be (27)
}
} | sajit/learnyou | scala/minimal-scala/src/test/scala/FactorySpec.scala | Scala | mit | 445 |
package counters.mem
import java.util.concurrent.Executors
import counters.adder.LongAdderCounter
import counters.striped.FixedSizeStripedLongCounterV8
import minmaxcounters.LongAdderWithPhaser
import org.jctools.counters.{Counter, FixedSizeStripedLongCounter}
class Incrementer(counter: Counter) extends Runnable {
override def run(): Unit = {
while (true) {
counter.increment()
}
}
}
class Geter(counter: Counter) extends Runnable {
override def run(): Unit = {
for (_ <- 1 to 100000) {
println(counter.get()+ " " + Thread.currentThread().getName)
Thread.sleep(100)
}
}
}
//object MemTest extends App {
// val pool = Executors.newFixedThreadPool(50)
//
// val counters = Seq.newBuilder[Counter]
//// for(_ <- 1 to 2) {
//// counters += FixedSizeStripedLongCounterV8()
//// counters += new LongAdderCounter()
// counters += new LongAdderWithPhaser()
//// }
//
// counters.result().foreach { counter =>
// for (_ <- 1 to 500) {
// pool.submit(new Incrementer(counter))
// pool.submit(new Geter(counter))
// }
// }
//
// Thread.sleep(90000)
// pool.shutdownNow()
//}
| dpsoft/Counters | src/main/scala/counters/mem/MemTest.scala | Scala | apache-2.0 | 1,170 |
package monocle.std
object all extends StdInstances
trait StdInstances
extends BigDecimalOptics
with BigIntOptics
with ByteOptics
with CharOptics
with DoubleOptics
with EitherOptics
with FunctionOptics
with IntOptics
with ListOptics
with LongOptics
with MapOptics
with MaybeOptics
with OptionOptics
with StringOptics
with Tuple1Optics
// Scalaz Instances
with CofreeOptics
with Either3Optics
with DisjunctionOptics
with TheseOptics
with IListInstances
with IMapOptics
with NonEmptyListOptics
with TreeOptics
with ValidationOptics
| rperry/Monocle | core/shared/src/main/scala/monocle/std/All.scala | Scala | mit | 651 |
package cms
import play.api.Application
import play.api.Play.current
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoURI
import com.novus.salat._
import com.novus.salat.global._
import cms.dto.Entry
import cache.Cache._
import cms.dto.EntryType
object ContentManager {
val defaultLanguage = "en"
private val cachePrefix = "entries_"
private val fullParsingRegex = """mongodb://(.+)[:](.+)[@](.+)[:](\\d+)[/](.+)""".r
private val simpleParsingRegex = """mongodb://(.+)[/](.+)""".r
private lazy val mongoDB = {
val mongoUri = current.configuration.getString("mongodb.uri").getOrElse {
sys.error("mongodb.uri could not be resolved")
}
mongoUri match {
case fullParsingRegex(username, password, server, port, database) => {
val db = MongoConnection(MongoURI(mongoUri))(database)
db.authenticate(username, password)
db
}
case simpleParsingRegex(server, database) => MongoConnection(MongoURI(mongoUri))(database)
case _ => sys.error("Not able to parse mongodb.uri")
}
}
private lazy val entries = mongoDB("entries")
def remove(entry: Entry) = {
entries.remove(grater[Entry].asDBObject(entry))
invalidateCache(cachePrefix + entry.key)
invalidateCache(cachePrefix + "all")
invalidateCache(cachePrefix + "filtered_" + entry.entryType)
}
def create(entry: Entry) = {
entries += grater[Entry].asDBObject(entry)
invalidateCache(cachePrefix + "all")
invalidateCache(cachePrefix + "filtered_" + entry.entryType)
}
def update(entry: Entry) = {
val q = MongoDBObject("key" -> entry.key)
val oldEntry = entries.findOne(q).get
entries.update(oldEntry, grater[Entry].asDBObject(entry))
// update cache with newly updated entry
val fullKey = cachePrefix + entry.key
updateCache(fullKey, Some(entry))
invalidateCache(cachePrefix + "all")
invalidateCache(cachePrefix + "filtered_" + entry.entryType)
}
def find(key: String) = {
val fullKey = cachePrefix + key
cached(fullKey) {
val q = MongoDBObject("key" -> key)
entries.findOne(q).map(grater[Entry].asObject(_))
}
}
def all = {
cached(cachePrefix + "all") {
entries.map(grater[Entry].asObject(_)).toList
}
}
def filtered(entryType: EntryType.Value) = {
cached(cachePrefix + "filtered_" + entryType) {
val q = MongoDBObject("entryType" -> entryType.toString())
entries.find(q).map(grater[Entry].asObject(_)).toList
}
}
}
| lukaszbudnik/hackaton-portal | app/cms/ContentManager.scala | Scala | apache-2.0 | 2,508 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.soteradefense.dga.graphx
import java.util.Date
import com.soteradefense.dga.graphx.config.Config
import com.soteradefense.dga.graphx.harness.Harness
import com.soteradefense.dga.graphx.hbse.HDFSHBSERunner
import com.soteradefense.dga.graphx.io.formats.EdgeInputFormat
import com.soteradefense.dga.graphx.kryo.DGAKryoRegistrator
import com.soteradefense.dga.graphx.lc.HDFSLCRunner
import com.soteradefense.dga.graphx.louvain.HDFSLouvainRunner
import com.soteradefense.dga.graphx.neighboringcommunity.HDFSNeighboringCommunityRunner
import com.soteradefense.dga.graphx.parser.CommandLineParser
import com.soteradefense.dga.graphx.pr.HDFSPRRunner
import com.soteradefense.dga.graphx.wcc.HDFSWCCRunner
import com.typesafe.config.ConfigFactory
import org.apache.spark.graphx.Graph
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.{SparkConf, SparkContext}
/**
* Object for kicking off analytics.
*/
object DGARunner {
val WeaklyConnectedComponents = "wcc"
val HighBetweennessSetExtraction = "hbse"
val LouvainModularity = "louvain"
val PageRank = "pr"
val LeafCompression = "lc"
val PageRankGraphX = "prGraphX"
val WeaklyConnectedComponentsGraphX = "wccGraphX"
val NeighboringCommunities = "neighboringCommunities"
val MinProgressConfiguration = "minProgress"
val MinProgressDefaultConfiguration = "2000"
val ProgressCounterConfiguration = "progressCounter"
val ProgressCounterDefaultConfiguration = "1"
val DeltaConvergenceConfiguration = "delta"
val DeltaConvergenceDefaultConfiguration = "0.001"
val ParallelismConfiguration = "parallelism"
val ParallelismDefaultConfiguration = "-1"
val DefaultJarSplitDelimiter = ","
val CommunitySplitConfiguration = "community.split"
val CommunitySplitDefaultDelimiter = ":"
/**
* Main method for parsing arguments and running analytics.
* @param args Commandline arguments.
*/
def main(args: Array[String]) {
val analytic = args(0)
println(s"Analytic: $analytic")
val newArgs = args.slice(1, args.length)
val applicationConfig = ConfigFactory.load()
val commandLineConfig: Config = CommandLineParser.parseCommandLine(newArgs)
commandLineConfig.systemProperties.foreach({
case (systemPropertyKey, systemPropertyValue) =>
System.setProperty(systemPropertyKey, systemPropertyValue)
})
val sparkConf = buildSparkConf(commandLineConfig, applicationConfig)
val sparkContext = new SparkContext(sparkConf)
val parallelism = Integer.parseInt(commandLineConfig.customArguments.getOrElse(ParallelismConfiguration, applicationConfig.getString("parallelism")))
var inputFormat: EdgeInputFormat = null
val hdfsUrl = applicationConfig.getString("hdfs.url")
val inputPath = hdfsUrl + commandLineConfig.inputPath
var outputPath = hdfsUrl + commandLineConfig.outputPath
outputPath = if (outputPath.endsWith("/")) outputPath else outputPath + "/"
outputPath = outputPath + (new Date).getTime
if (parallelism != -1)
inputFormat = new EdgeInputFormat(inputPath, commandLineConfig.edgeDelimiter, parallelism)
else
inputFormat = new EdgeInputFormat(inputPath, commandLineConfig.edgeDelimiter)
val edgeRDD = inputFormat.getEdgeRDD(sparkContext)
val initialGraph = Graph.fromEdges(edgeRDD, None)
var runner: Harness = null
analytic match {
case WeaklyConnectedComponents | WeaklyConnectedComponentsGraphX =>
runner = new HDFSWCCRunner(outputPath, commandLineConfig.edgeDelimiter)
case HighBetweennessSetExtraction =>
runner = new HDFSHBSERunner(outputPath, commandLineConfig.edgeDelimiter)
case LouvainModularity =>
val minProgress = commandLineConfig.customArguments.getOrElse(MinProgressConfiguration, MinProgressDefaultConfiguration).toInt
val progressCounter = commandLineConfig.customArguments.getOrElse(ProgressCounterConfiguration, ProgressCounterDefaultConfiguration).toInt
runner = new HDFSLouvainRunner(minProgress, progressCounter, outputPath)
case LeafCompression =>
runner = new HDFSLCRunner(outputPath, commandLineConfig.edgeDelimiter)
case PageRank | PageRankGraphX =>
val delta = commandLineConfig.customArguments.getOrElse(DeltaConvergenceConfiguration, DeltaConvergenceDefaultConfiguration).toDouble
runner = new HDFSPRRunner(outputPath, commandLineConfig.edgeDelimiter, delta)
case NeighboringCommunities =>
val minProgress = commandLineConfig.customArguments.getOrElse(MinProgressConfiguration, MinProgressDefaultConfiguration).toInt
val progressCounter = commandLineConfig.customArguments.getOrElse(ProgressCounterConfiguration, ProgressCounterDefaultConfiguration).toInt
val communitySplit = commandLineConfig.customArguments.getOrElse(CommunitySplitConfiguration, CommunitySplitDefaultDelimiter)
runner = new HDFSNeighboringCommunityRunner(minProgress, progressCounter, outputPath, commandLineConfig.edgeDelimiter, communitySplit)
case _ =>
throw new IllegalArgumentException(s"$analytic is not supported")
}
analytic match {
case WeaklyConnectedComponents |
HighBetweennessSetExtraction |
LouvainModularity |
LeafCompression |
PageRank |
NeighboringCommunities =>
runner.run(sparkContext, initialGraph)
case PageRankGraphX =>
runner.asInstanceOf[HDFSPRRunner].runGraphXImplementation(initialGraph)
case WeaklyConnectedComponentsGraphX =>
runner.asInstanceOf[HDFSWCCRunner].runGraphXImplementation(initialGraph)
}
}
private def buildSparkConf(commandLineConfig: Config, applicationConfig: com.typesafe.config.Config): SparkConf = {
val sparkConf = new SparkConf()
.setAppName(commandLineConfig.sparkAppName)
.setJars(commandLineConfig.sparkJars.split(DefaultJarSplitDelimiter))
if (applicationConfig.hasPath("spark.master.url"))
sparkConf.setMaster(applicationConfig.getString("spark.master.url"))
if (applicationConfig.hasPath("spark.home"))
sparkConf.setSparkHome(applicationConfig.getString("spark.home"))
sparkConf.setAll(commandLineConfig.customArguments)
if (commandLineConfig.useKryoSerializer) {
sparkConf.set("spark.serializer", classOf[KryoSerializer].getCanonicalName)
sparkConf.set("spark.kryo.registrator", classOf[DGAKryoRegistrator].getCanonicalName)
}
sparkConf
}
}
| atomicjets/distributed-graph-analytics | dga-graphx/src/main/scala/com/soteradefense/dga/graphx/DGARunner.scala | Scala | apache-2.0 | 7,273 |
package carldata.sf.core
import carldata.sf.Runtime
/**
* Core functions and types which can be accessed from the script
*/
object HydrologyModule {
// Header which will be provided to the compiler
val header: String =
"""
|external def manning_flow(n: Number, d: Number, s: Number, u: String): Number
|external def manning_velocity(n: Number, d: Number, s: Number, u: String): Number
""".stripMargin
def apply(): HydrologyModule = new HydrologyModule()
}
class HydrologyModule extends Runtime {
// Function definition
/**
* Calculate manning velocity & flow:
* 'n': roughness coefficient,
* 'd': hydraulic diameter, (hydraulic radius is one quater in the case of full pipe)
* 's': hydraulic slope,
* 'u': units: mm or in
*/
def $manning_flow(n: Double, d: Double, s: Double, u: String): Double = {
$manning_velocity(n, d, s, u) * cross_sectional_area(d)
}
def $manning_velocity(n: Double, d: Double, s: Double, u: String): Double = {
u match {
case "mm" => (1 / n) * Math.pow(0.25 * d / 1000, 0.6667) * Math.pow(s, 0.5)
case _ => (1.49 / n) * Math.pow(0.25 * d / 12, 0.6667) * Math.pow(s, 0.5)
}
}
/**
* For pipe flowing full
**/
private def cross_sectional_area(d: Double): Double = {
(Math.PI * d * d) / 4
}
}
| carldata/flow-script | src/main/scala/carldata/sf/core/HydrologyModule.scala | Scala | apache-2.0 | 1,333 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.util
import org.apache.hadoop.hive.ql.parse.SemanticException
object QueryRewriteUtils {
def cacheToAlterTable(cmd: String): String = {
val CACHE_TABLE_DEFAULT = "(?i)CACHE ([^ ]+)".r
val CACHE_TABLE_IN = "(?i)CACHE ([^ ]+) IN ([^ ]+)".r
cmd match {
case CACHE_TABLE_DEFAULT(tableName) =>
s"ALTER TABLE $tableName SET TBLPROPERTIES ('shark.cache' = 'memory')"
case CACHE_TABLE_IN(tableName, cacheType) =>
s"ALTER TABLE $tableName SET TBLPROPERTIES ('shark.cache' = '$cacheType')"
case _ =>
throw new SemanticException(
s"CACHE accepts a single table name: 'CACHE <table name> [IN <cache type>]'" +
s" (received command: '$cmd')")
}
}
def uncacheToAlterTable(cmd: String): String = {
val cmdSplit = cmd.split(' ')
if (cmdSplit.size == 2) {
val tableName = cmdSplit(1)
"ALTER TABLE %s SET TBLPROPERTIES ('shark.cache' = 'false')".format(tableName)
} else {
throw new SemanticException(
s"UNCACHE accepts a single table name: 'UNCACHE <table name>' (received command: '$cmd')")
}
}
}
| lzshlzsh/shark | src/main/scala/shark/util/QueryRewriteUtils.scala | Scala | apache-2.0 | 1,787 |
/**
* Copyright (C) 2014 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package test.e2e.specs
import com.debiki.core.Prelude._
import org.scalatest.DoNotDiscover
import org.scalatest.Suites
import test.e2e.code._
/** Runs the CreateEmbeddedCommentsSiteSpec suite, with an admin that logs in
* with Gmail.
*
* In Play: test-only test.e2e.specs.CreateEmbeddedCommentsSiteGmailLoginSpecRunner
* In test:console: (new test.e2e.specs.CreateEmbeddedCommentsSiteGmailLoginSpecRunner).execute()
*/
class CreateEmbeddedCommentsSiteGmailLoginSpecRunner
extends org.scalatest.Suites(new CreateEmbeddedCommentsSiteGmailLoginSpec)
with StartServerAndChromeDriverFactory
/** Runs the CreateEmbeddedCommentsSiteSpec suite, with an admin that creates
* a new password account and logs in with it.
*
* In Play:
* test-only test.e2e.specs.CreateEmbeddedCommentsSiteNewPasswordAccountSpecRunner
* In test:console:
* (new test.e2e.specs.CreateEmbeddedCommentsSiteNewPasswordAccountSpecRunner).execute()
*/
class CreateEmbeddedCommentsSiteNewPasswordAccountSpecRunner
extends org.scalatest.Suites(new CreateEmbeddedCommentsSiteNewPasswordAccountSpec)
with StartServerAndChromeDriverFactory
/** Runs the CreateEmbeddedCommentsSiteSpec suite, with an admin that creates
* a new password account and logs in with it. It runs
* [[test.e2e.specs.CreateEmbeddedCommentsSiteNewPasswordAccountSpec]] first, because
* it creates a password account that we can reuse.
*
* In Play:
* test-only test.e2e.specs.CreateEmbeddedCommentsSiteOldPasswordAccountSpecRunner
* In test:console:
* (new test.e2e.specs.CreateEmbeddedCommentsSiteOldPasswordAccountSpecRunner).execute()
*/
@DoNotDiscover
class CreateEmbeddedCommentsSiteOldPasswordAccountSpecRunner
extends org.scalatest.Suites(
new CreateEmbeddedCommentsSiteNewPasswordAccountSpec,
new CreateEmbeddedCommentsSiteOldPasswordAccountSpec)
with StartServerAndChromeDriverFactory
/** Runs the ContinueEmbeddSiteTestsManually suite.
* in SBT:
* test-only test.e2e.specs.ContinueEmbeddSiteTestsManuallyRunner
*/
@DoNotDiscover
class ContinueEmbeddSiteTestsManuallyRunner extends Suites(new ContinueEmbeddSiteTestsManually)
with StartServerAndChromeDriverFactory {
override val emptyDatabaseBeforeAll = false
}
/** Tests creation of an embedded comment site, logs in with Gmail.
*/
@test.tags.EndToEndTest
@DoNotDiscover
class CreateEmbeddedCommentsSiteGmailLoginSpec
extends CreateEmbeddedCommentsSiteSpecConstructor {
def loginToCreateSite() {
login()
}
def loginToAdminPage() {
login()
}
private def login() {
info("login with Gmail OpenID")
loginWithGmailFullscreen()
}
}
/** Tests creation of an embedded comment site, creates and logs in with a new
* password account.
*/
@test.tags.EndToEndTest
@DoNotDiscover
class CreateEmbeddedCommentsSiteNewPasswordAccountSpec
extends CreateEmbeddedCommentsSiteSpecConstructor {
val AdminsEmail = "admin@example.com"
val AdminsPassword = "Admins_password"
def loginToCreateSite() {
createNewPasswordAccount(
email = AdminsEmail,
password = AdminsPassword,
displayName = "Admins_display_name",
country = "Admins_country",
fullName = "Admins_full_name")
}
def loginToAdminPage() {
loginWithPasswordFullscreen(AdminsEmail, AdminsPassword)
}
}
/** Tests creation of an embedded comment site, creates and logs in with an old
* password account. Assumes that
* [[test.e2e.specs.CreateEmbeddedCommentsSiteNewPasswordAccountSpec]] has just
* been run and created a password account.
*/
@test.tags.EndToEndTest
@DoNotDiscover
class CreateEmbeddedCommentsSiteOldPasswordAccountSpec
extends CreateEmbeddedCommentsSiteSpecConstructor {
val AdminsEmail = "admin@example.com"
val AdminsPassword = "Admins_password"
def loginToCreateSite() {
login()
}
def loginToAdminPage() {
login()
}
def login() {
loginWithPasswordFullscreen(AdminsEmail, AdminsPassword)
}
}
/** Assumes you've just run a create-embedded-site E2E test, and
* logs in to the admin dashboard of that site (without emptying the
* database first, which all other E2E tests do).
*/
@DoNotDiscover
class ContinueEmbeddSiteTestsManually extends DebikiBrowserSpec with TestLoginner {
"login to the dashboard of ebedded site 11" in {
go to ("http://site-11.localhost:19001/-/admin/")
loginWithPasswordFullscreen("admin@example.com", "Admins_password")
}
"wait until browser closed" in {
waitUntilBrowserClosed()
}
}
/** Tests creation of embedded comment sites.
*
* Logs in as debiki.tester@gmail.com and creates an embedded comments site,
* checks the admin dashboard and tests to add some comments to an embedded discussions.
*
* You need to add entries to your hosts file:
* 127.0.0.1 mycomputer
* 127.0.0.1 site-11.localhost
* 127.0.0.1 site-12.localhost
* 127.0.0.1 site-13.localhost
* ...perhaps some more or less.
*/
abstract class CreateEmbeddedCommentsSiteSpecConstructor
extends DebikiBrowserSpec with TestSiteCreator with TestReplyer {
/** Subclasses override and test various login methods. */
def loginToCreateSite()
def loginToAdminPage()
/** You need an entry '127.0.0.1 mycomputer' in your hosts file. */
val EmbeddingSiteUrl = "http://mycomputer:8080"
val EmbddingSiteUrlInputId = "embeddingSiteUrl"
val AdminReplyText = "Reply_by_admin"
val GuestReplyText = "Reply_by_guest"
var embeddedSiteId: Option[String] = None
def embeddingPageUrl = s"$EmbeddingSiteUrl/embeds-site-${embeddedSiteId.get}-topic-id-empty.html"
"A user with a browser can" - {
"go to site creation page" in {
go to createEmbeddedCommentsSiteStartPage
}
"login" in {
loginToCreateSite()
eventually {
find(EmbddingSiteUrlInputId) must not equal(None)
}
}
"specify obviously invalid addresses of embedding site, find them rejected" in {
pending
}
"specify address of embedding site" in {
click on EmbddingSiteUrlInputId
enter(EmbeddingSiteUrl)
}
"not proceed before terms accepted" in {
click on cssSelector("input[type=submit]")
find(EmbddingSiteUrlInputId) must not equal(None)
}
"accept terms and proceed" in {
click on "accepts-terms"
click on cssSelector("input[type=submit]")
eventually {
find(EmbddingSiteUrlInputId) mustEqual None
}
}
"find welcome page" in {
eventually {
pageSource must include ("Embedded comments have been setup")
pageSource must include (EmbeddingSiteUrl)
}
}
"find site id in browser address bar" in {
val EmbeddedSiteUrl = "^http://site-([0-9a-z]+)\\\\..*$".r
webDriver.getCurrentUrl() match {
case EmbeddedSiteUrl(siteId) =>
embeddedSiteId = Some(siteId)
case _ =>
fail()
}
}
"goto instructions page" in {
click on linkText ("Continue")
}
"find instructions page" in {
eventually {
pageSource must include ("Installation instructions for")
pageSource must include (EmbeddingSiteUrl)
}
}
"go to admin page and login" in {
val adminPageUrl = s"http://site-${embeddedSiteId.get}.${debiki.Globals.baseDomain}/-/admin/"
click on cssSelector(s"a[href='$adminPageUrl']")
loginToAdminPage()
eventually {
pageSource must include ("Settings")
pageSource must include ("Special Contents")
pageSource must include ("Moderation")
}
}
/*
"not allow site creation if there's no `new-website-domain' config value" in {
// Please sync this test with the same test in CreateSiteSpec.
pending
}
*/
"find empty page list" in {
// Test bug: It might be empty simply because the page list doesn't load that quickly.
// ???
pending
}
"find empty activity list" in {
// Test bug: It might be empty simply because the activity list doesn't load that quickly.
click on linkText("Moderation")
find(cssSelector("table tbody tr")) mustEqual None
pending
}
"view empty discussion in embedding page" in {
go to embeddingPageUrl
eventually {
pageSource must include ("Text text text")
}
}
"Switch to embedded comments <iframe>" in {
rememberEmbeddedCommentsIframe()
switchToAnyEmbeddedCommentsIframe()
}
"add a reply, as admin" in {
replyToArticle(AdminReplyText)
}
"add a reply, as guest" in {
logoutIfLoggedIn()
loginAsGuestInPopup("TestGuest")
replyToComment(postId = 2, text = GuestReplyText)
}
"reload page, find both replies" in {
reloadPage()
switchToAnyEmbeddedCommentsIframe()
eventually {
pageSource must include (AdminReplyText)
pageSource must include (GuestReplyText)
}
}
}
}
| debiki/debiki-server-old | test/e2e/specs/CreateEmbeddedCommentsSiteSpec.scala | Scala | agpl-3.0 | 9,673 |
package com.sksamuel.elastic4s.searches.queries.matches
import org.elasticsearch.index.query.{MatchPhrasePrefixQueryBuilder, QueryBuilders}
object MatchPhrasePrefixBuilder {
def apply(q: MatchPhrasePrefixDefinition): MatchPhrasePrefixQueryBuilder = {
val _builder = QueryBuilders.matchPhrasePrefixQuery(q.field, q.value.toString)
q.queryName.foreach(_builder.queryName)
q.boost.map(_.toFloat).foreach(_builder.boost)
q.analyzer.foreach(_builder.analyzer)
q.maxExpansions.foreach(_builder.maxExpansions)
q.queryName.foreach(_builder.queryName)
q.slop.foreach(_builder.slop)
_builder
}
}
| aroundus-inc/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/queries/matches/MatchPhrasePrefixBuilder.scala | Scala | apache-2.0 | 624 |
package nexus.diff.ops
import nexus.diff._
import nexus._
/**
* @author Tongfei Chen
*/
object Clamp extends ParameterizedPolyOp1 {
implicit def clampF[T[_], R, I](implicit T: IsRealTensorK[T, R]) = (range: (R, R)) =>
new P[T[I], T[I]] {
val (min, max) = range
def name = s"Clamp[$min, $max]"
def tag = Tag.realTensor[T, R, I]
def forward(x: T[I]) = ???
def backward(dy: T[I], y: T[I], x: T[I]) = ???
}
}
| ctongfei/nexus | diff/src/main/scala/nexus/diff/ops/clamp.scala | Scala | mit | 466 |
package spinoco.fs2.http.internal
import cats.effect.IO
import fs2._
import spinoco.fs2.http
import spinoco.fs2.http.HttpRequest
import spinoco.protocol.http.Uri
object HttpClientApp extends App {
import spinoco.fs2.http.Resources._
http.client[IO]().flatMap { httpClient =>
httpClient.request(HttpRequest.get(Uri.https("www.google.cz", "/"))).flatMap { resp =>
Stream.eval(resp.bodyAsString)
}.compile.toVector.map {
println
}
}.unsafeRunSync()
}
| Spinoco/fs2-http | src/test/scala/spinoco/fs2/http/internal/HttpClientApp.scala | Scala | mit | 487 |
package au.com.dius.pact.consumer
import io.netty.handler.codec.{http => netty}
import au.com.dius.pact.model.{MockProviderConfig, Request, Response}
import au.com.dius.pact.model.unfiltered.Conversions
import unfiltered.{netty => unetty}
import unfiltered.netty.{cycle => unettyc}
import unfiltered.{request => ureq}
import unfiltered.{response => uresp}
import io.netty.channel.ChannelHandler.Sharable
class UnfilteredMockProvider(val config: MockProviderConfig) extends StatefulMockProvider {
type UnfilteredRequest = ureq.HttpRequest[unetty.ReceivedMessage]
type UnfilteredResponse = uresp.ResponseFunction[netty.HttpResponse]
private val server = unetty.Server.http(config.port, config.hostname).chunked(1048576).handler(Routes)
@Sharable
object Routes extends unettyc.Plan
with unettyc.SynchronousExecution
with unetty.ServerErrorResponse {
override def intent: unettyc.Plan.Intent = {
case req => convertResponse(handleRequest(convertRequest(req)))
}
def convertRequest(nr: UnfilteredRequest): Request = Conversions.unfilteredRequestToPactRequest(nr)
def convertResponse(response: Response): UnfilteredResponse = Conversions.pactToUnfilteredResponse(response)
}
def start(): Unit = server.start()
def stop(): Unit = server.stop()
}
| caoquendo/pact-jvm | pact-jvm-consumer/src/main/scala/au/com/dius/pact/consumer/UnfilteredMockProvider.scala | Scala | apache-2.0 | 1,331 |
/*
* Copyright (c) 2015 e.e d3si9n
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package scalaxb.compiler
import java.io.File
import scala.collection.immutable.{ Map, Set, Seq }
case class Config(items: Map[String, ConfigEntry]) {
import Config._
import ConfigEntry._
def values: Vector[ConfigEntry] = items.values.toVector
def packageNames: Map[Option[String], Option[String]] =
(get[PackageNames] getOrElse defaultPackageNames).value
def classPrefix: Option[String] =
get[ClassPrefix] map {_.value}
def classPostfix: Option[String] =
get[ClassPostfix] map {_.value}
def paramPrefix: Option[String] =
get[ParamPrefix] map {_.value}
def attributePrefix: Option[String] =
get[AttributePrefix] map {_.value}
def opOutputWrapperPostfix: String =
(get[OpOutputWrapperPostfix] getOrElse defaultOpOutputWrapperPostfix).value
def outdir: File =
(get[Outdir] getOrElse defaultOutdir).value
def packageDir: Boolean = values contains GeneratePackageDir
def wrappedComplexTypes: List[String] =
(get[WrappedComplexTypes] getOrElse defaultWrappedComplexTypes).value
def prependFamilyName: Boolean = values contains PrependFamilyName
def seperateProtocol: Boolean = values contains SeperateProtocol
def protocolFileName: String =
(get[ProtocolFileName] getOrElse defaultProtocolFileName).value
def protocolPackageName: Option[String] =
(get[ProtocolPackageName] getOrElse defaultProtocolPackageName).value
def defaultNamespace: Option[String] =
(get[DefaultNamespace] getOrElse defaultDefaultNamespace).value
def generateRuntime: Boolean = values contains GenerateRuntime
def generateDispatchClient: Boolean = values contains GenerateDispatchClient
def generateDispatchAs: Boolean = values contains GenerateDispatchAs
def generateGigahorseClient: Boolean = values contains GenerateGigahorseClient
def contentsSizeLimit: Int =
(get[ContentsSizeLimit] getOrElse defaultContentsSizeLimit).value
def sequenceChunkSize: Int =
(get[SequenceChunkSize] getOrElse defaultSequenceChunkSize).value
def namedAttributes: Boolean = values contains NamedAttributes
def laxAny: Boolean = values contains LaxAny
def async: Boolean = values contains GenerateAsync
def dispatchVersion: String =
(get[DispatchVersion] getOrElse defaultDispatchVersion).value
def gigahorseVersion: String =
(get[GigahorseVersion] getOrElse defaultGigahorseVersion).value
def gigahorseBackend: String =
(get[GigahorseBackend] getOrElse defaultGigahorseBackend).value
def varArg: Boolean = values contains VarArg
def ignoreUnknown: Boolean = values contains IgnoreUnknown
def autoPackages: Boolean = values contains AutoPackages
def generateMutable: Boolean = values contains GenerateMutable
def generateVisitor: Boolean = values contains GenerateVisitor
def generateLens: Boolean = values contains GenerateLens
def capitalizeWords: Boolean = values contains CapitalizeWords
def symbolEncodingStrategy = get[SymbolEncoding.Strategy] getOrElse defaultSymbolEncodingStrategy
def enumNameMaxLength: Int = (get[EnumNameMaxLength] getOrElse defaultEnumNameMaxLength).value
def useLists: Boolean = values contains UseLists
private def get[A <: ConfigEntry: Manifest]: Option[A] =
items.get(implicitly[Manifest[A]].runtimeClass.getName).asInstanceOf[Option[A]]
def update(item: ConfigEntry): Config =
copy(items = items.updated(item.name, item))
def remove(item: ConfigEntry): Config =
copy(items = items - item.name)
}
object Config {
import ConfigEntry._
def apply(xs: Vector[ConfigEntry]): Config =
xs.foldLeft(new Config(Map())) { (acc, x) => acc.update(x) }
val defaultPackageNames = PackageNames(Map(None -> None))
val defaultOpOutputWrapperPostfix = OpOutputWrapperPostfix(Defaults.opOutputWrapperPostfix)
val defaultOutdir = Outdir(new File("."))
val defaultWrappedComplexTypes = WrappedComplexTypes(Nil)
val defaultProtocolFileName = ProtocolFileName("xmlprotocol.scala")
val defaultProtocolPackageName = ProtocolPackageName(None)
val defaultDefaultNamespace = DefaultNamespace(None)
val defaultContentsSizeLimit = ContentsSizeLimit(Int.MaxValue)
val defaultSequenceChunkSize = SequenceChunkSize(10)
val defaultDispatchVersion = DispatchVersion(scalaxb.BuildInfo.defaultDispatchVersion)
val defaultGigahorseVersion = GigahorseVersion(scalaxb.BuildInfo.defaultGigahorseVersion)
val defaultGigahorseBackend = GigahorseBackend(scalaxb.BuildInfo.defaultGigahorseBackend)
val defaultSymbolEncodingStrategy = SymbolEncoding.Legacy151
val defaultEnumNameMaxLength = EnumNameMaxLength(50)
val default = Config(
Vector(defaultPackageNames, defaultOpOutputWrapperPostfix, defaultOutdir,
defaultWrappedComplexTypes, SeperateProtocol, defaultProtocolFileName,
defaultProtocolPackageName, GenerateRuntime, GenerateDispatchClient,
defaultContentsSizeLimit, defaultSequenceChunkSize,
GenerateAsync, defaultDispatchVersion)
)
}
sealed trait ConfigEntry {
def name: String = getClass.getName
}
object ConfigEntry {
case class PackageNames(value: Map[Option[String], Option[String]]) extends ConfigEntry
case class ClassPrefix(value: String) extends ConfigEntry
case class ClassPostfix(value: String) extends ConfigEntry
case class ParamPrefix(value: String) extends ConfigEntry
case class AttributePrefix(value: String) extends ConfigEntry
case class OpOutputWrapperPostfix(value: String) extends ConfigEntry
case class Outdir(value: File) extends ConfigEntry
case object GeneratePackageDir extends ConfigEntry
case class WrappedComplexTypes(value: List[String]) extends ConfigEntry
case object PrependFamilyName extends ConfigEntry
case object SeperateProtocol extends ConfigEntry
case class ProtocolFileName(value: String) extends ConfigEntry
case class ProtocolPackageName(value: Option[String]) extends ConfigEntry
case class DefaultNamespace(value: Option[String]) extends ConfigEntry
case object GenerateRuntime extends ConfigEntry
case object GenerateDispatchClient extends ConfigEntry
case object GenerateDispatchAs extends ConfigEntry
case object GenerateGigahorseClient extends ConfigEntry
case class ContentsSizeLimit(value: Int) extends ConfigEntry
case class SequenceChunkSize(value: Int) extends ConfigEntry
case object NamedAttributes extends ConfigEntry
case object LaxAny extends ConfigEntry
case object GenerateAsync extends ConfigEntry
case class DispatchVersion(value: String) extends ConfigEntry
case class GigahorseVersion(value: String) extends ConfigEntry
case class GigahorseBackend(value: String) extends ConfigEntry
case object VarArg extends ConfigEntry
case object IgnoreUnknown extends ConfigEntry
case object AutoPackages extends ConfigEntry
case object GenerateMutable extends ConfigEntry
case object GenerateVisitor extends ConfigEntry
case object GenerateLens extends ConfigEntry
case object CapitalizeWords extends ConfigEntry
case class EnumNameMaxLength(value: Int) extends ConfigEntry
case object UseLists extends ConfigEntry
object SymbolEncoding {
sealed abstract class Strategy(val alias: String, val description: String) extends ConfigEntry with Product with Serializable {
final override def name: String = classOf[Strategy].getName
}
case object Discard extends Strategy("discard", "Discards any characters that are invalid in Scala identifiers, such as dots and hyphens")
case object SymbolName extends Strategy("symbol-name", "Replaces `.`, `-`, `:`, and trailing `_` in class names with `Dot`, `Hyphen`, `Colon`, and `Underscore`")
case object UnicodePoint extends Strategy("unicode-point", "Replaces symbols with a 'u' followed by the 4-digit hexadecimal code of the character (e.g. `_` => `u005f`)")
case object DecimalAscii extends Strategy("decimal-ascii", "Replaces symbols with a 'u' followed by the decimal code of the character (e.g. `_` => `u95`)")
case object Legacy151 extends Strategy("legacy-1.5.1", "Same as decimal-ascii except that _trailing_ underscores are replaced with `u93` (as introduced in v1.5.1)")
val values = Seq(Discard, SymbolName, UnicodePoint, DecimalAscii, Legacy151)
def apply(alias: String): Option[Strategy] = values.find(_.alias == alias)
def withName(alias: String): Strategy = apply(alias).getOrElse {
throw new IllegalArgumentException(s"""Unknown symbol encoding strategy "${alias}"; possible values are ${values.map(_.alias).mkString(", ")}.""")
}
private[compiler] implicit val scoptRead: scopt.Read[Strategy] = scopt.Read.reads(withName)
}
}
| eed3si9n/scalaxb | cli/src/main/scala/scalaxb/compiler/Config.scala | Scala | mit | 9,692 |
package com.eharmony.aloha.models.h2o.json
import java.{lang => jl}
import com.eharmony.aloha.factory.ScalaJsonFormats.listMapFormat
import com.eharmony.aloha.id.ModelId
import com.eharmony.aloha.io.sources.ModelSource
import com.eharmony.aloha.models.h2o.{DoubleFeatureFunction, DoubleSeqFeatureFunction, FeatureFunction, StringFeatureFunction}
import com.eharmony.aloha.reflect.{RefInfo, RefInfoOps}
import com.eharmony.aloha.semantics.Semantics
import com.eharmony.aloha.semantics.func.GenAggFunc
import spray.json.DefaultJsonProtocol._
import spray.json._
import scala.collection.immutable.ListMap
import scala.collection.{immutable => sci}
sealed trait H2oSpec {
type A
def name: String
def spec: String
/**
* This will be used as a default if the resulting function returns `None` for a given value.
* @return
*/
def defVal: Option[A]
implicit def refInfo: RefInfo[A]
def ffConverter[B]: GenAggFunc[B, Option[A]] => FeatureFunction[B]
def compile[B](semantics: Semantics[B]): Either[Seq[String], FeatureFunction[B]] =
semantics.createFunction[Option[A]](spec, Option(defVal))(RefInfoOps.option[A]).right.map(f =>
ffConverter(f.andThenGenAggFunc(_ orElse defVal)))
}
object H2oSpec {
// Used in CLI.
private[h2o] implicit val h2oSpecJsonFormat = lift(new RootJsonReader[H2oSpec] {
override def read(json: JsValue): H2oSpec = {
val jso = json.asJsObject
jso.fields.get("type") match {
case None => jso.convertTo(jsonFormat3(DoubleH2oSpec)) // Default is double type.
case Some(JsString("double")) if jso.fields.contains("size") =>
jso.convertTo(jsonFormat4(DoubleSeqH2oSpec))
case Some(JsString("double")) =>
jso.convertTo(jsonFormat3(DoubleH2oSpec))
case Some(JsString("string")) => jso.convertTo(jsonFormat3(StringH2oSpec))
case Some(JsString(t)) => throw new DeserializationException(s"unsupported H2oSpec type: $t. Should be 'double' or 'string'.")
case Some(t) => throw new DeserializationException(s"H2oSpec type expected string, got: $t")
}
}
})
private[h2o] implicit val h2oFeaturesJsonFormat = new RootJsonFormat[sci.ListMap[String, H2oSpec]] with DefaultJsonProtocol {
override def read(json: JsValue): sci.ListMap[String, H2oSpec] = {
val m = json.convertTo[sci.ListMap[String, JsValue]]
m.map {
case (k, JsString(s)) => (k, DoubleH2oSpec(k, s, None))
case (k, o: JsObject) =>
// Lowercase the type so that it is more forgiving.
val tpe = o.fields.get("type").map {
case JsString(str) => JsString(str.toLowerCase)
case d => d
}
tpe match {
case Some(JsString("double")) if o.fields.contains("size") =>
(k, DoubleSeqH2oSpec(k, spec(o), size(o), o.fields.get("defVal").flatMap(_.convertTo[Option[Seq[Double]]])))
case None | Some(JsString("double")) => (k, DoubleH2oSpec(k, spec(o), o.fields.get("defVal").flatMap(_.convertTo[Option[Double]])))
case Some(JsString("string")) => (k, StringH2oSpec(k, spec(o), o.fields.get("defVal").flatMap(_.convertTo[Option[String]])))
case Some(JsString(d)) => throw new DeserializationException(s"unsupported H2oSpec type: $d. Should be 'double' or 'string'.")
case Some(d) => throw new DeserializationException(s"H2oSpec type expected string, got: $d")
}
case (k, v) => throw new DeserializationException(s"key '$k' needs to be a JSON string or object. found $v.")
}
}
override def write(features: sci.ListMap[String, H2oSpec]): JsValue = {
def dd(s: DoubleH2oSpec) = s.defVal.map(d => Map("defVal" -> JsNumber(d))).getOrElse(Map.empty)
def ddn(s: DoubleSeqH2oSpec) = s.defVal.map{ d =>
val vec: Vector[JsValue] = d.map(x => JsNumber(x))(scala.collection.breakOut)
Map("defVal" -> JsArray(vec))
}.getOrElse(Map.empty)
def ds(s: StringH2oSpec) = s.defVal.map(d => Map("defVal" -> JsString(d))).getOrElse(Map.empty)
val fs = features.map {
case (k, DoubleH2oSpec(name, spec, None)) => (k, JsString(spec))
case (k, s: DoubleH2oSpec) => (k, JsObject(sci.ListMap[String, JsValue]("spec" -> JsString(s.spec)) ++ dd(s) ++ Seq("type" -> JsString("double"))))
case (k, s: DoubleSeqH2oSpec) => (k, JsObject(sci.ListMap[String, JsValue]("spec" -> JsString(s.spec)) ++ ddn(s) ++ Seq("type" -> JsString("double"), "size" -> JsNumber(s.size))))
case (k, s: StringH2oSpec) => (k, JsObject(sci.ListMap[String, JsValue]("spec" -> JsString(s.spec)) ++ ds(s) ++ Seq("type" -> JsString("string"))))
}
JsObject(fs)
}
def spec(o: JsObject) = o.fields.get("spec").map(_.convertTo[String]).getOrElse(throw new DeserializationException("no string called 'spec'."))
def size(o: JsObject) = o.fields.get("size").map(_.convertTo[Int]).getOrElse(throw new DeserializationException("no int called 'size'."))
}
}
case class DoubleH2oSpec(name: String, spec: String, defVal: Option[Double]) extends H2oSpec {
type A = Double
def ffConverter[B] = f => DoubleFeatureFunction(f.andThenGenAggFunc(_.map(v => jl.Double.valueOf(v))))
def refInfo = RefInfo[Double]
}
case class DoubleSeqH2oSpec(name: String, spec: String, size: Int, defVal: Option[Seq[Double]]) extends H2oSpec {
type A = Seq[Double]
def ffConverter[B] = f => DoubleSeqFeatureFunction(f, size)
def refInfo = RefInfo[Seq[Double]]
protected def sizeErr: String = s"feature '$name' output size != $size"
// NOTE: override here and wrap spec in Option to avoid adding implicit Option lift for Seq[Double]
override def compile[B](semantics: Semantics[B]): Either[Seq[String], FeatureFunction[B]] = {
val wrappedSpec = s"Option($spec).map{x => require(x.size == $size, " + s""""$sizeErr"); x}"""
semantics.createFunction[Option[Seq[Double]]](wrappedSpec, Option(defVal))(RefInfo[Option[Seq[Double]]]).right.map(f =>
ffConverter(f.andThenGenAggFunc(_ orElse defVal)))
}
}
case class StringH2oSpec(name: String, spec: String, defVal: Option[String]) extends H2oSpec {
type A = String
def ffConverter[B] = StringFeatureFunction(_)
def refInfo = RefInfo[String]
}
case class H2oAst(modelType: String,
modelId: ModelId,
modelSource: ModelSource,
features: sci.ListMap[String, H2oSpec],
numMissingThreshold: Option[Int] = None,
notes: Option[Seq[String]] = None)
private[h2o] object H2oAst {
implicit val h2oAstJsonFormat = new RootJsonFormat[H2oAst] with DefaultJsonProtocol {
override def read(json: JsValue): H2oAst = {
val jso = json.asJsObject
val modelSource = json.convertTo[ModelSource]
val (modelType, modelId, features) = jso.getFields("modelType", "modelId", "features") match {
case Seq(JsString(mt), mid, fs) =>
(mt, mid.convertTo[ModelId], fs.convertTo(H2oSpec.h2oFeaturesJsonFormat))
case _ => throw new DeserializationException("bad format")
}
val numMissingThreshold = jso.getFields("numMissingThreshold") match {
case Seq(JsNumber(n)) => Option(n.toIntExact)
case _ => None
}
val notes = jso.getFields("notes") match {
case Seq(a: JsArray) => Option(a.convertTo[Seq[String]]).filter(_.nonEmpty)
case _ => None
}
H2oAst(modelType, modelId, modelSource, features, numMissingThreshold, notes)
}
override def write(h2oAst: H2oAst): JsValue = {
val fields = Seq("modelType" -> h2oAst.modelType.toJson) ++
Seq("modelId" -> h2oAst.modelId.toJson) ++
h2oAst.numMissingThreshold.map(t => "numMissingThreshold" -> t.toJson).toSeq ++
Seq("features" -> h2oAst.features.toJson(H2oSpec.h2oFeaturesJsonFormat)) ++
h2oAst.modelSource.toJson.asJsObject.fields.toSeq ++
h2oAst.notes.map(t => "notes" -> t.toJson).toSeq
JsObject(ListMap(fields:_*))
}
}
}
| eHarmony/aloha | aloha-h2o/src/main/scala/com/eharmony/aloha/models/h2o/json/H2oModelJson.scala | Scala | mit | 8,150 |
package com.musingscafe.commons
import java.util.Optional
import com.musingscafe.commons.`object`.Obj
/**
* Created by ayadav on 9/16/17.
*/
class ObjSpecs extends CommonsSpecs {
describe("Object nullability") {
it ("should toOptional or SupplierElse") {
Given("an object")
val nullableString: String = null
When("asked for")
val response = Obj.getOrElse(nullableString, "1")
Then("1 is returned")
response should be ("1")
}
it ("should return an Optional") {
Given("an object")
val nullableString: String = null
When("asked for")
val response = Obj.toOptional(nullableString)
Then("Optional is returned")
response.isInstanceOf[Optional[String]] should be (true)
}
}
}
| lilbond/java-commons | src/test/scala/com/musingscafe/commons/ObjSpecs.scala | Scala | apache-2.0 | 773 |
package org.orbroker
import org.orbroker.conv._
import java.util.concurrent.ConcurrentHashMap
import scala.collection.concurrent.{ Map => ConcurrentMap }
final class Token[T] private (
private val sql: Option[String],
val id: Symbol,
private val ext: QueryExtractor[T],
private[orbroker] val parmConverters: ConcurrentHashMap[Class[_], Option[ParmConverter]]) {
private[orbroker] val extractor: QueryExtractor[T] = ext match {
case je: JoinExtractor[_] => new SafeJoinExtractor(je)
case other: QueryExtractor[_] => other
}
private[orbroker] def convert(parm: Any): Any = {
if (parm == null) {
null
} else {
val refClass = parm.asInstanceOf[AnyRef].getClass
getConverter(refClass) match {
case None => parm
case Some(pc) => pc.toJdbcType(parm.asInstanceOf[pc.T])
}
}
}
private def getConverter(cls: Class[_]): Option[ParmConverter] =
parmConverters.get(cls) match {
case null => fromSuper(cls)
case o => o
}
private def fromSuper(refClass: Class[_]): Option[ParmConverter] = {
var cls = refClass.getSuperclass
// Try all super classes
while (cls != null)
parmConverters.get(cls) match {
case null => cls = cls.getSuperclass
case o => parmConverters.put(refClass, o); return o
}
// That didn't work. Let's try the interfaces
refClass.getInterfaces foreach { iface =>
parmConverters.get(iface) match {
case pc: Some[_] =>
parmConverters.put(refClass, pc); return pc
case _ => // Ignore
}
}
// Nope, nothing can convert. Don't bother doing this again.
parmConverters.put(refClass, None)
None
}
private[orbroker] def getStatement(broker: Broker) = {
if (sql.isEmpty)
broker.getStatement(id)
else
broker.makeStatement(id, sql.get)
}
}
object Token {
private def toMap(parmConverters: Seq[ParmConverter]) = {
val map = new ConcurrentHashMap[Class[_], Option[ParmConverter]]
parmConverters foreach { pc => map.put(pc.fromType, Some(pc)) }
map
}
def apply[T](sql: String, id: Symbol, extractor: QueryExtractor[T], parmConverters: ParmConverter*) = {
new Token(Option(sql), id, extractor, toMap(parmConverters))
}
def apply[T](id: Symbol, extractor: QueryExtractor[T], parmConverters: ParmConverter*): Token[T] =
apply(null, id, extractor, parmConverters: _*)
def apply[T](sql: String, id: Symbol, parmConverters: ParmConverter*) = {
val extractor = new DefaultExtractor(id).asInstanceOf[RowExtractor[T]]
new Token(Option(sql), id, extractor, toMap(parmConverters))
}
def apply[T](id: Symbol, parmConverters: ParmConverter*): Token[T] =
apply(null, id, parmConverters: _*)
def apply[T](sql: String, extractor: QueryExtractor[T], parmConverters: ParmConverter*): Token[T] =
apply(sql, org.orbroker.NO_ID, extractor, parmConverters: _*)
} | nilskp/orbroker | src/main/scala/org/orbroker/Token.scala | Scala | mit | 3,015 |
// Copyright 2011 Kiel Hodges
package sample
import replicant._
class MockGenericRepository[Subject: ResponseFallback] extends MockMinder[GenericRepository[Subject]] { minder =>
protected class BaseSubject {
def store(subject: Subject) = minder.store(subject)
def findById(id: Long) = minder.findById(id)
}
val mock: GenericRepository[Subject] = new BaseSubject with GenericRepository[Subject]
val store = method("store", mock.store _)
val findById = method("findById", mock.findById _)
}
| greenbar/replicant | scala/src/test/scala/sample/MockGenericRepository.scala | Scala | mit | 524 |
package com.github.jlprat.ninetynine.p20
import org.scalatest.WordSpec
import com.github.jlprat.ninetynine.p20.P20._
/**
* Created by @jlprat on 14/05/2016.
* Specs for P20: Remove the Kth element from a list
*/
class P20Spec extends WordSpec {
"remove" when {
"called with empty lists" should {
"return a None object" in {
val empty = List[String]()
assert(remove(empty, 0) === None)
assert(remove(empty, 3) === None)
assert(remove(empty, 20) === None)
assert(remove(empty, 1000) === None)
}
}
"called with non empty lists" when {
val nonEmpty = List(1, 2, 3)
val hugeList = List.tabulate(500)(x => x.toString)
"called with a position lower than the size of the list" should {
val posOne = 1
val pos20 = 20
val hugePosition = 450
val firstResult = remove(nonEmpty, posOne)
val secondResult = remove(hugeList, pos20)
val thirdResult = remove(hugeList, hugePosition)
"return a non empty Option with a tuple" which {
assert(firstResult.isDefined)
assert(secondResult.isDefined)
assert(thirdResult.isDefined)
"first position is the removed element" in {
firstResult.foreach( x => assert(x._1 === nonEmpty(posOne)))
println(secondResult)
println(hugeList(pos20))
secondResult.foreach( x => assert(x._1 === hugeList(pos20)))
thirdResult.foreach( x => assert(x._1 === hugeList(hugePosition)))
}
"second position is a list" which {
"is one element shorter than the original" in {
firstResult.foreach( x => assert(x._2.size === nonEmpty.size - 1))
secondResult.foreach( x => assert(x._2.size === hugeList.size - 1))
thirdResult.foreach( x => assert(x._2.size === hugeList.size - 1))
}
"doesn't contain the element in specified position" in {
firstResult.foreach( x => assert(x._2(posOne) === nonEmpty(posOne + 1)))
secondResult.foreach( x => assert(x._2(pos20) === hugeList(pos20 + 1)))
thirdResult.foreach( x => assert(x._2(hugePosition) === hugeList(hugePosition + 1)))
firstResult.foreach( x => assert(x._2 === nonEmpty.take(posOne) ::: nonEmpty.drop(posOne + 1)))
secondResult.foreach( x => assert(x._2 === hugeList.take(pos20) ::: hugeList.drop(pos20 + 1)))
thirdResult.foreach( x => assert(x._2 === hugeList.take(hugePosition) ::: hugeList.drop(hugePosition + 1)))
}
}
}
}
"return a None object" when {
"called with a position that is not in the list" in {
val firstResult = remove(nonEmpty, nonEmpty.size + 2)
assert(firstResult === None)
val secondResult = remove(hugeList, hugeList.size + 2)
assert(secondResult === None)
}
}
}
}
}
| jlprat/99-scala-problems | src/test/scala/com/github/jlprat/ninetynine/p20/P20Spec.scala | Scala | apache-2.0 | 2,975 |
package edu.gemini.spModel.gemini.spModel.template
import java.security.Principal
import edu.gemini.pot.sp._
import edu.gemini.pot.sp.memImpl.{MemTemplateGroup, MemProgram, MemFactory}
import edu.gemini.pot.spdb.DBLocalDatabase
import edu.gemini.spModel.core.SPProgramID
import edu.gemini.spModel.gemini.flamingos2.Flamingos2
import edu.gemini.spModel.gemini.gmos.GmosNorthType.DisperserNorth
import edu.gemini.spModel.gemini.gmos.InstGmosNorth
import edu.gemini.spModel.gemini.obscomp.SPProgram
import edu.gemini.spModel.gemini.security.UserRolePrivileges
import edu.gemini.spModel.obs.SPObservation
import edu.gemini.spModel.template.ReapplicationFunctor
import org.junit._
import org.junit.Assert._
import java.util.UUID
class ReapplicationFunctorTest {
val PROG_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d47")
val TEMPLATE_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d45")
val TEMPLATE_FOLDER_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d48")
val TEMPLATE_GROUP_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d49")
val TEMPLATE_GMOS_OBS_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d46")
val TEMPLATE_F2_OBS_KEY = new SPNodeKey("6d026d22-d642-4f50-8f99-ab666e286d43")
val GMOS_OBSERVATION_KEY = new SPNodeKey("2f7a8b79-1d10-416a-baf3-9b982f77da53")
val F2_OBSERVATION_KEY = new SPNodeKey("2f7a8b79-1d10-416a-baf3-9b982f77da51")
val dbUuid = UUID.randomUUID
val db = DBLocalDatabase.createTransient()
val fact = db.getFactory.asInstanceOf[MemFactory]
val id = "GS-2015A-Q-1"
val templateProgram = MemProgram.create(TEMPLATE_KEY, SPProgramID.toProgramID(id), dbUuid)
/**
* Verify that changes to PA are now overwritten on re-apply
*/
@Before
def before() {
// Build the template
val templateProg = new SPProgram
templateProgram.setDataObject(templateProg)
val templateFolder = fact.createTemplateFolder(templateProgram, TEMPLATE_FOLDER_KEY)
templateProgram.setTemplateFolder(templateFolder)
val templateGroup = new MemTemplateGroup(templateProgram, TEMPLATE_GROUP_KEY)
// The template observation with GMOS-N
val templateGmosObs = fact.createObservation(templateProgram, TEMPLATE_GMOS_OBS_KEY)
// Add GMOS Observation Template
val tGmosObscomps = new java.util.ArrayList[ISPObsComponent]
val tGmos = new InstGmosNorth
// The template sets the position angle to 120
tGmos.setPosAngle(120)
// Disperser on template is mirror
tGmos.setDisperser(DisperserNorth.MIRROR)
// Set a custom MDF name
tGmos.setFPUnitCustomMask("Custom name on the template")
val tGmosObscomp = fact.doCreateObsComponent(templateProgram, tGmos.getType, TEMPLATE_GMOS_OBS_KEY)
tGmosObscomp.setDataObject(tGmos)
tGmosObscomps.add(tGmosObscomp)
templateGmosObs.setObsComponents(tGmosObscomps)
templateGroup.addObservation(templateGmosObs)
// Add F2 Observation Template
val templatef2Obs = fact.createObservation(templateProgram, TEMPLATE_F2_OBS_KEY)
val tf2Obscomps = new java.util.ArrayList[ISPObsComponent]
val tf2 = new Flamingos2
// The template sets the position angle to 120
tf2.setPosAngle(150)
// Set a custom fpu mask name
tf2.setFpuCustomMask("Custom mask on the template")
val tf2Obscomp = fact.doCreateObsComponent(templateProgram, tf2.getType, TEMPLATE_F2_OBS_KEY)
tf2Obscomp.setDataObject(tf2)
tf2Obscomps.add(tf2Obscomp)
templatef2Obs.setObsComponents(tf2Obscomps)
templateGroup.addObservation(templatef2Obs)
templateFolder.addTemplateGroup(templateGroup)
// Build an observation with GMOS
val gmosObs = fact.createObservation(templateProgram, GMOS_OBSERVATION_KEY)
val gmosSpObs = new SPObservation
gmosSpObs.setTitle("Test Observation")
// Simulate it was created out of the template
gmosSpObs.setOriginatingTemplate(TEMPLATE_GMOS_OBS_KEY)
gmosObs.setDataObject(gmosSpObs)
gmosObs.setSeqComponent(fact.createSeqComponent(templateProgram, SPComponentType.OBSERVER_OBSERVE, GMOS_OBSERVATION_KEY))
val gmosObscomps = new java.util.ArrayList[ISPObsComponent]
val gmos = new InstGmosNorth
// The observation position angle is different
gmos.setPosAngle(99)
// Disperser is different too
gmos.setDisperser(DisperserNorth.B1200_G5301)
// Override the custom MDF name
gmos.setFPUnitCustomMask("Custom name on the observation")
val gmosObscomp = fact.doCreateObsComponent(templateProgram, gmos.getType, GMOS_OBSERVATION_KEY)
gmosObscomp.setDataObject(gmos)
gmosObscomps.add(gmosObscomp)
gmosObs.setObsComponents(gmosObscomps)
// Build an observation with F2
val f2Obs = fact.createObservation(templateProgram, F2_OBSERVATION_KEY)
val f2SpObs = new SPObservation
f2SpObs.setTitle("Test Observation")
// Simulate it was created out of the template
f2SpObs.setOriginatingTemplate(TEMPLATE_F2_OBS_KEY)
f2Obs.setDataObject(f2SpObs)
f2Obs.setSeqComponent(fact.createSeqComponent(templateProgram, SPComponentType.OBSERVER_OBSERVE, F2_OBSERVATION_KEY))
val f2Obscomps = new java.util.ArrayList[ISPObsComponent]
val f2 = new Flamingos2
// The observation position angle is different
f2.setPosAngle(99)
// Override the custom MDF name
f2.setFpuCustomMask("Custom name on the f2 observation")
val f2Obscomp = fact.doCreateObsComponent(templateProgram, f2.getType, F2_OBSERVATION_KEY)
f2Obscomp.setDataObject(f2)
f2Obscomps.add(f2Obscomp)
f2Obs.setObsComponents(f2Obscomps)
templateProgram.addObservation(gmosObs)
templateProgram.addObservation(f2Obs)
// Put the program on the DB
db.put(templateProgram)
}
/**
* Verify that changes to PA are now overwritten on re-apply
*/
@Test
def testReApplyPA() {
// Run the reapply
val functor = new ReapplicationFunctor(UserRolePrivileges.STAFF)
functor.add(templateProgram.getAllObservations.get(0))
functor.add(templateProgram.getAllObservations.get(1))
functor.execute(db, null, new java.util.HashSet[Principal])
val gmosAfterReaaply = db.lookupObservationByID(new SPObservationID("GS-2015A-Q-1-3")).getObsComponents.get(0).getDataObject.asInstanceOf[InstGmosNorth]
// Check that the Position Angle was preserved
assertEquals(99, gmosAfterReaaply.getPosAngle, 0)
// But the Disperser was reset
assertEquals(DisperserNorth.MIRROR, gmosAfterReaaply.getDisperser)
}
/**
* REL-814 Verify that changes to custom MDF Mask are now overwritten on re-apply
*/
@Test
def testReApplyCustomMDF() {
// Run the reapply
val functor = new ReapplicationFunctor(UserRolePrivileges.STAFF)
functor.add(templateProgram.getAllObservations.get(0))
functor.add(templateProgram.getAllObservations.get(1))
functor.execute(db, null, new java.util.HashSet[Principal])
val gmosAfterReapply = db.lookupObservationByID(new SPObservationID("GS-2015A-Q-1-3")).getObsComponents.get(0).getDataObject.asInstanceOf[InstGmosNorth]
// Check that the Custom Mask name is preserved
assertEquals("Custom name on the observation", gmosAfterReapply.getFPUnitCustomMask)
}
/**
* REL-814 Verify that changes to custom MDF Mask are now overwritten on re-apply
*/
@Test
def testReApplyCustomMaskNameF2() {
// Run the reapply
val functor = new ReapplicationFunctor(UserRolePrivileges.STAFF)
functor.add(templateProgram.getAllObservations.get(0))
functor.add(templateProgram.getAllObservations.get(1))
functor.execute(db, null, new java.util.HashSet[Principal])
val f2AfterReaaply = db.lookupObservationByID(new SPObservationID("GS-2015A-Q-1-4")).getObsComponents.get(0).getDataObject.asInstanceOf[Flamingos2]
// Check that the Custom Mask name is preserved
assertEquals("Custom name on the f2 observation", f2AfterReaaply.getFpuCustomMask)
}
} | arturog8m/ocs | bundle/edu.gemini.pot/src/test/scala/edu/gemini/spModel/gemini/spModel/template/ReapplicationFunctorTest.scala | Scala | bsd-3-clause | 7,887 |
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
import org.scalatest._
import events.TestFailed
import org.scalatest.exceptions.DuplicateTestNameException
import org.scalatest.exceptions.NotAllowedException
import org.scalatest.exceptions.TestFailedException
import org.scalatest.exceptions.TestRegistrationClosedException
import org.scalatest.events.InfoProvided
class FeatureSpecSpec extends org.scalatest.FunSpec with SharedHelpers {
describe("A fixture.FeatureSpec") {
it("should return the test names in order of registration from testNames") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("should do that") { fixture =>
}
scenario("should do this") { fixture =>
}
}
expectResult(List("Scenario: should do that", "Scenario: should do this")) {
a.testNames.iterator.toList
}
val b = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
}
expectResult(List[String]()) {
b.testNames.iterator.toList
}
val c = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("should do this") { fixture =>
}
scenario("should do that") { fixture =>
}
}
expectResult(List("Scenario: should do this", "Scenario: should do that")) {
c.testNames.iterator.toList
}
}
it("should throw NotAllowedException if a duplicate scenario name registration is attempted") {
intercept[DuplicateTestNameException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("test this") { fixture =>
}
scenario("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("test this") { fixture =>
}
ignore("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
ignore("test this") { fixture =>
}
ignore("test this") { fixture =>
}
}
}
intercept[DuplicateTestNameException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
ignore("test this") { fixture =>
}
scenario("test this") { fixture =>
}
}
}
}
it("should pass in the fixture to every test method") {
val a = new FeatureSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("should do this") { fixture =>
assert(fixture === hello)
}
scenario("should do that") { fixture =>
assert(fixture === hello)
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
assert(!rep.eventsReceived.exists(_.isInstanceOf[TestFailed]))
}
it("should throw NullPointerException if a null test tag is provided") {
// scenario
intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("hi", null) { fixture => }
}
}
val caught = intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("hi", mytags.SlowAsMolasses, null) { fixture => }
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
scenario("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => }
}
}
// ignore
intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
ignore("hi", null) { fixture => }
}
}
val caught2 = intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
ignore("hi", mytags.SlowAsMolasses, null) { fixture => }
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {}
ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) { fixture => }
}
}
}
class TestWasCalledSuite extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("this") { fixture => theTestThisCalled = true }
scenario("that") { fixture => theTestThatCalled = true }
}
it("should execute all tests when run is called with testName None") {
val b = new TestWasCalledSuite
b.run(None, Args(SilentReporter))
assert(b.theTestThisCalled)
assert(b.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
val a = new TestWasCalledSuite
a.run(Some("Scenario: this"), Args(SilentReporter))
assert(a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should report as ignored, and not run, tests marked ignored") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("test this") { fixture => theTestThisCalled = true }
scenario("test that") { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
val b = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
scenario("test that") { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB))
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.theTestThisCalled)
assert(b.theTestThatCalled)
val c = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("test this") { fixture => theTestThisCalled = true }
ignore("test that") { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repC))
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.theTestThisCalled)
assert(!c.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
val d = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
ignore("test that") { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD))
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.theTestThisCalled)
assert(!d.theTestThatCalled)
}
it("should ignore a test marked as ignored if run is invoked with that testName") {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
val e = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this") { fixture => theTestThisCalled = true }
scenario("test that") { fixture => theTestThatCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(Some("Scenario: test this"), Args(repE))
assert(repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
it("should run only those tests selected by the tags to include and exclude sets") {
// Nothing is excluded
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
scenario("test that") { fixture => theTestThatCalled = true }
}
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
scenario("test that") { fixture => theTestThatCalled = true }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker, Set.empty))
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
scenario("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker, Set.empty))
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("test this", mytags.SlowAsMolasses) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), Map(), None, new Tracker, Set.empty))
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
scenario("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
scenario("test the other") { fixture => theTestTheOtherCalled = true }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker, Set.empty))
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
scenario("test the other") { fixture => theTestTheOtherCalled = true }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker, Set.empty))
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
scenario("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
ignore("test the other") { fixture => theTestTheOtherCalled = true }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker, Set.empty))
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
scenario("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
scenario("test the other") { fixture => theTestTheOtherCalled = true }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), Map(), None, new Tracker, Set.empty))
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded
val i = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
scenario("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
scenario("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
scenario("test the other") { fixture => theTestTheOtherCalled = true }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
scenario("test the other") { fixture => theTestTheOtherCalled = true }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { fixture => theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { fixture => theTestThatCalled = true }
ignore("test the other") { fixture => theTestTheOtherCalled = true }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), Map(), None, new Tracker, Set.empty))
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("test this") { fixture => }
scenario("test that") { fixture => }
}
assert(a.expectedTestCount(Filter()) === 2)
val b = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
ignore("test this") { fixture => }
scenario("test that") { fixture => }
}
assert(b.expectedTestCount(Filter()) === 1)
val c = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("test this", mytags.FastAsLight) { fixture => }
scenario("test that") { fixture => }
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => }
scenario("test that", mytags.SlowAsMolasses) { fixture => }
scenario("test the other thing") { fixture => }
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
val e = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("test this", mytags.FastAsLight, mytags.SlowAsMolasses) { fixture => }
scenario("test that", mytags.SlowAsMolasses) { fixture => }
ignore("test the other thing") { fixture => }
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
it("should generate a TestPending message when the test body is (pending)") {
val a = new FeatureSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("should do this") (pending)
scenario("should do that") { fixture =>
assert(fixture === hello)
}
scenario("should do something else") { fixture =>
assert(fixture === hello)
pending
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new FeatureSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("throws AssertionError") { s => throw new AssertionError }
scenario("throws plain old Error") { s => throw new Error }
scenario("throws Throwable") { s => throw new Throwable }
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort.") {
val a = new FeatureSpec {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("throws AssertionError") { s => throw new OutOfMemoryError }
}
intercept[OutOfMemoryError] {
a.run(None, Args(SilentReporter))
}
}
/*
it("should send InfoProvided events with aboutAPendingTest set to true for info " +
"calls made from a test that is pending") {
val a = new FeatureSpec with GivenWhenThen {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("should do something else") { s =>
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testPending = rep.testPendingEventsReceived
assert(testPending.size === 1)
val recordedEvents = testPending(0).recordedEvents
assert(recordedEvents.size === 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && ip.aboutAPendingTest.get)
}
}
it("should send InfoProvided events with aboutAPendingTest set to false for info " +
"calls made from a test that is not pending") {
val a = new FeatureSpec with GivenWhenThen {
type FixtureParam = String
val hello = "Hello, world!"
def withFixture(test: OneArgTest) {
test(hello)
}
scenario("should do something else") { s =>
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 === 2)
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testSucceeded = rep.testSucceededEventsReceived
assert(testSucceeded.size === 1)
val recordedEvents = testSucceeded(0).recordedEvents
assert(recordedEvents.size === 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && !ip.aboutAPendingTest.get)
}
}
*/
it("should allow both tests that take fixtures and tests that don't") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {
test("Hello, world!")
}
var takesNoArgsInvoked = false
scenario("take no args") { () =>
takesNoArgsInvoked = true
}
var takesAFixtureInvoked = false
scenario("takes a fixture") { s => takesAFixtureInvoked = true }
}
a.run(None, Args(SilentReporter))
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with test functions whose inferred result type is not Unit") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {
test("Hello, world!")
}
var takesNoArgsInvoked = false
scenario("should take no args") { () =>
takesNoArgsInvoked = true; true
}
var takesAFixtureInvoked = false
scenario("should take a fixture") { s => takesAFixtureInvoked = true; true }
}
assert(!a.takesNoArgsInvoked)
assert(!a.takesAFixtureInvoked)
a.run(None, Args(SilentReporter))
assert(a.testNames.size === 2, a.testNames)
assert(a.takesNoArgsInvoked)
assert(a.takesAFixtureInvoked)
}
it("should work with ignored tests whose inferred result type is not Unit") {
val a = new FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
var theTestThisCalled = false
var theTestThatCalled = false
ignore("should test this") { () =>
theTestThisCalled = true; "hi"
}
ignore("should test that") { fixture => theTestThatCalled = true; 42 }
}
assert(!a.theTestThisCalled)
assert(!a.theTestThatCalled)
val reporter = new EventRecordingReporter
a.run(None, Args(reporter))
assert(reporter.testIgnoredEventsReceived.size === 2)
assert(!a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
it("should pass a NoArgTest to withFixture for tests that take no fixture") {
class MySpec extends FeatureSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest) {
aNoArgTestWasPassed = true
}
def withFixture(test: OneArgTest) {
aOneArgTestWasPassed = true
}
scenario("something") { () =>
assert(1 + 1 === 2)
}
}
val s = new MySpec
s.run(None, Args(SilentReporter))
assert(s.aNoArgTestWasPassed)
assert(!s.aOneArgTestWasPassed)
}
it("should not pass a NoArgTest to withFixture for tests that take a Fixture") {
class MySpec extends FeatureSpec {
type FixtureParam = String
var aNoArgTestWasPassed = false
var aOneArgTestWasPassed = false
override def withFixture(test: NoArgTest) {
aNoArgTestWasPassed = true
}
def withFixture(test: OneArgTest) {
aOneArgTestWasPassed = true
}
scenario("something") { fixture =>
assert(1 + 1 === 2)
}
}
val s = new MySpec
s.run(None, Args(SilentReporter))
assert(!s.aNoArgTestWasPassed)
assert(s.aOneArgTestWasPassed)
}
it("should pass a NoArgTest that invokes the no-arg test when the " +
"NoArgTest's no-arg apply method is invoked") {
class MySuite extends FeatureSpec {
type FixtureParam = String
var theNoArgTestWasInvoked = false
def withFixture(test: OneArgTest) {
// Shouldn't be called, but just in case don't invoke a OneArgTest
}
scenario("something") { () =>
theNoArgTestWasInvoked = true
}
}
val s = new MySuite
s.run(None, Args(SilentReporter))
assert(s.theNoArgTestWasInvoked)
}
describe("(when a nesting rule has been violated)") {
it("should, if they call a feature from within an scenario clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
feature("in the wrong place, at the wrong time") {
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a feature with a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
feature("in the wrong place, at the wrong time") {
scenario("should never run") { fixture =>
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
scenario("should never run") { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
scenario("should never run", mytags.SlowAsMolasses) { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a feature with a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
feature("in the wrong place, at the wrong time") {
ignore("should never run") { fixture =>
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
ignore("should never run") { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
scenario("should blow up") { fixture =>
ignore("should never run", mytags.SlowAsMolasses) { fixture =>
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "Scenario: should blow up")
}
it("should, if they call a nested feature from within a feature clause, result in a SuiteAborted event when constructing the FeatureSpec") {
class MySpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("hi") }
feature("should blow up") {
feature("should never run") {
}
}
}
val caught =
intercept[NotAllowedException] {
new MySpec
}
assert(caught.getMessage === "Feature clauses cannot be nested.")
}
}
}
it("should pass the correct test name in the OneArgTest passed to withFixture") {
val a = new FeatureSpec {
type FixtureParam = String
var correctTestNameWasPassed = false
def withFixture(test: OneArgTest) {
correctTestNameWasPassed = test.name == "Scenario: should do something"
test("hi")
}
scenario("should do something") { fixture => }
}
a.run(None, Args(SilentReporter))
assert(a.correctTestNameWasPassed)
}
it("should pass the correct config map in the OneArgTest passed to withFixture") {
val a = new FeatureSpec {
type FixtureParam = String
var correctConfigMapWasPassed = false
def withFixture(test: OneArgTest) {
correctConfigMapWasPassed = (test.configMap == Map("hi" -> 7))
test("hi")
}
scenario("should do something") { fixture => }
}
a.run(None, Args(SilentReporter, Stopper.default, Filter(), Map("hi" -> 7), None, new Tracker(), Set.empty))
assert(a.correctConfigMapWasPassed)
}
class ExamplePrefixSpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) {
test("hi")
}
feature("A Feature") {
scenario("A Scenario") { fixture =>
}
}
}
it("should prefix feature text with 'Feature: '") {
val rep = new EventRecordingReporter
(new ExamplePrefixSpec).run(None, Args(rep))
val scopeOpened = rep.scopeOpenedEventsReceived
assert(scopeOpened.size === 1)
assert(scopeOpened(0).message === "Feature: A Feature")
val scopeClosed = rep.scopeClosedEventsReceived
assert(scopeClosed.size === 1)
assert(scopeClosed(0).message === "Feature: A Feature")
}
it("should prefix scenario text with 'Scenario: '") {
val rep = new EventRecordingReporter
(new ExamplePrefixSpec).run(None, Args(rep))
val testStarting = rep.testStartingEventsReceived
assert(testStarting.size === 1)
assert(testStarting(0).testText === "Scenario: A Scenario")
val testSucceeded = rep.testSucceededEventsReceived
assert(testSucceeded.size === 1)
assert(testSucceeded(0).testText === "Scenario: A Scenario")
}
describe("when failure happens") {
it("should fire TestFailed event with correct stack depth info when test failed") {
class TestSpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("a string") }
scenario("fail scenario") { fixture =>
assert(1 === 2)
}
feature("a feature") {
scenario("nested fail scenario") { fixture =>
assert(1 === 2)
}
}
}
val rep = new EventRecordingReporter
val s1 = new TestSpec
s1.run(None, Args(rep))
assert(rep.testFailedEventsReceived.size === 2)
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get === "FeatureSpecSpec.scala")
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get === thisLineNumber - 13)
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get === "FeatureSpecSpec.scala")
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get === thisLineNumber - 11)
}
it("should generate NotAllowedException with correct stack depth info when has a feature nested inside a feature") {
class TestSpec extends FeatureSpec {
type FixtureParam = String
def withFixture(test: OneArgTest) { test("a string") }
feature("a feature") {
feature("inner feature") {
ignore("nested fail scenario") { fixture =>
assert(1 === 1)
}
}
}
}
val rep = new EventRecordingReporter
val caught = intercept[NotAllowedException] {
new TestSpec
}
assert(caught.failedCodeFileName.get === "FeatureSpecSpec.scala")
assert(caught.failedCodeLineNumber.get === thisLineNumber - 12)
}
it("should generate TestRegistrationClosedException with correct stack depth info when has an scenario nested inside a scenario") {
class TestSpec extends FeatureSpec {
type FixtureParam = String
var registrationClosedThrown = false
feature("a feature") {
scenario("a scenario") { fixture =>
scenario("nested scenario") { fixture =>
assert(1 === 2)
}
}
}
def withFixture(test: OneArgTest) {
try {
test("a string")
}
catch {
case e: TestRegistrationClosedException =>
registrationClosedThrown = true
throw e
}
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size === 1)
assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FeatureSpecSpec.scala" === trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get === thisLineNumber - 25)
}
}
}
| hubertp/scalatest | src/test/scala/org/scalatest/fixture/FeatureSpecSpec.scala | Scala | apache-2.0 | 40,647 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.parser.syntaxtree
import scala.language.postfixOps
import cogx.cogmath.algebra.complex.Complex
/** A wrapper for Complex that allows commutative operations between fields and
* floats.
*
* @author Greg Snider
*/
private[cogx]
class CogComplex(value: Complex) extends Complex(value.real, value.imaginary) {
def +(that: Field) = that + value
def -(that: Field) = (that * -1f) + value
def *(that: Field) = that * value
def /(that: Field) = CogFunctions.reciprocal(that) * value
// Might be worth adding:
// def ===(that: Field) = that === value
// def !===(that: Field) = that !=== value
} | hpe-cct/cct-core | src/main/scala/cogx/compiler/parser/syntaxtree/CogComplex.scala | Scala | apache-2.0 | 1,257 |
package org.pignat.bwatnwa
import org.pignat.bwatnwa.util.Utils
abstract class GraphicalViewTransform(pl:PointListener) extends GraphicalView with TransformListener {
override def setup(): Unit = {
val s = getSize
size(513,512,processing.core.PConstants.P3D);
//frameRate(60)
}
def handle_resize(x:Int, y:Int) : Unit
def handle_update() : Unit
def handle_point(x:Int, y:Int) : Int
protected var m_width = width
protected var m_height = height
override def draw(): Unit =
{
if (width != m_width || height != m_height)
{
m_width = width
m_height = height
handle_resize(m_width, m_height)
}
handle_update()
}
override def mousePressed() = {
pl.point(handle_point(Math.max(Math.min(mouseX, width), 0), Math.max(Math.min(mouseY, height), 0)))
}
override def mouseDragged() = {
mousePressed()
}
} | RandomReaper/bwatnwa | src/main/scala/org/pignat/bwatnwa/GraphicalViewTransform.scala | Scala | agpl-3.0 | 893 |
/*
* ToolsImpl.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.mellite.impl.timeline
import de.sciss.lucre.synth.Txn
import de.sciss.mellite.{FadeViewMode, RegionViewMode, TimelineTool, TimelineTools, TimelineTrackCanvas}
import de.sciss.model.Change
import de.sciss.model.impl.ModelImpl
import java.awt.event.MouseEvent
import javax.swing.event.MouseInputAdapter
final class ToolsImpl[T <: Txn[T]](canvas: TimelineTrackCanvas[T])
extends TimelineTools[T] with ModelImpl[TimelineTools.Update[T]] {
import TimelineTools._
private[this] var _currentTool: TimelineTool[T, _] = TimelineTool.cursor(canvas)
def currentTool: TimelineTool[T, _] = _currentTool
def currentTool_=(value: TimelineTool[T, _]): Unit =
if (_currentTool != value) {
val oldTool = _currentTool
_currentTool = value
oldTool.uninstall(canvas.canvasComponent)
value .install(canvas.canvasComponent, Option(lastMouse))
dispatch(ToolChanged(Change(oldTool, value)))
}
private[this] var _visualBoost: Float = 1f
def visualBoost: Float = _visualBoost
def visualBoost_=(value: Float): Unit =
if (_visualBoost != value) {
val oldBoost = _visualBoost
_visualBoost = value
dispatch(VisualBoostChanged(Change(oldBoost, value)))
}
private[this] var _fadeViewMode: FadeViewMode = FadeViewMode.Curve
def fadeViewMode: FadeViewMode = _fadeViewMode
def fadeViewMode_=(value: FadeViewMode): Unit =
if (_fadeViewMode != value) {
val oldMode = _fadeViewMode
_fadeViewMode = value
dispatch(FadeViewModeChanged(Change(oldMode, value)))
}
private[this] var _regionViewMode: RegionViewMode = RegionViewMode.TitledBox
def regionViewMode: RegionViewMode = _regionViewMode
def regionViewMode_=(value: RegionViewMode): Unit =
if (_regionViewMode != value) {
val oldMode = _regionViewMode
_regionViewMode = value
dispatch(RegionViewModeChanged(Change(oldMode, value)))
}
private[this] var lastMouse: MouseEvent = _
private[this] val mia: MouseInputAdapter = new MouseInputAdapter {
override def mouseEntered (e: MouseEvent): Unit = lastMouse = e
override def mouseMoved (e: MouseEvent): Unit = lastMouse = e
override def mouseDragged (e: MouseEvent): Unit = lastMouse = e
override def mouseExited (e: MouseEvent): Unit = lastMouse = null
override def mousePressed(e: MouseEvent): Unit =
canvas.transportCatch.addCatchBypass(mia)
override def mouseReleased(e: MouseEvent): Unit =
canvas.transportCatch.removeCatchBypass(mia)
}
// constructor
{
val cc = canvas.canvasComponent
cc.peer.addMouseListener (mia)
cc.peer.addMouseMotionListener(mia)
_currentTool.install(cc, Option(lastMouse))
}
} | Sciss/Mellite | app/src/main/scala/de/sciss/mellite/impl/timeline/ToolsImpl.scala | Scala | agpl-3.0 | 3,016 |
package utils
import java.time.LocalDateTime
import scala.concurrent.duration.Duration
import scala.concurrent.{ CanAwait, ExecutionContext, Future }
import scala.util.Try
/**
* Created by henrik on 2017-02-24.
*/
case class TimestampedFuture[T](instance: Future[T], timeOfCreation: LocalDateTime) extends Future[T] with Timestamp {
override def onComplete[U](func: (Try[T]) => U)(implicit executor: ExecutionContext): Unit = instance.onComplete(func)
override def isCompleted: Boolean = instance.isCompleted
override def value: Option[Try[T]] = instance.value
//@throws[T](classOf[InterruptedException])
//@throws[T](classOf[TimeoutException])
override def ready(atMost: Duration)(implicit permit: CanAwait): TimestampedFuture.this.type = {
instance.ready(atMost)
this
}
//@throws[T](classOf[Exception])
override def result(atMost: Duration)(implicit permit: CanAwait): T = instance.result(atMost)
override def transform[S](s: (T) => S, f: (Throwable) => Throwable)(implicit executor: ExecutionContext): TimestampedFuture[S] = TimestampedFuture(instance.transform(s, f), timeOfCreation)
override def map[S](f: (T) => S)(implicit executor: ExecutionContext): TimestampedFuture[S] = TimestampedFuture(instance.map(f), timeOfCreation)
override def flatMap[S](f: (T) => Future[S])(implicit executor: ExecutionContext): Future[S] = TimestampedFuture(instance.flatMap(f), timeOfCreation)
override def filter(p: (T) => Boolean)(implicit executor: ExecutionContext): TimestampedFuture[T] = TimestampedFuture(instance.filter(p), timeOfCreation)
override def collect[S](pf: PartialFunction[T, S])(implicit executor: ExecutionContext): TimestampedFuture[S] = TimestampedFuture(instance.collect(pf), timeOfCreation)
override def recover[U >: T](pf: PartialFunction[Throwable, U])(implicit executor: ExecutionContext): TimestampedFuture[U] = TimestampedFuture(instance.recover(pf), timeOfCreation)
override def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]])(implicit executor: ExecutionContext): TimestampedFuture[U] = TimestampedFuture(instance.recoverWith(pf), timeOfCreation)
//Timestamp of this object kept; if other instance is Timestamped, that timestamp is discarded
override def zip[U](that: Future[U]): TimestampedFuture[(T, U)] = TimestampedFuture(instance.zip(that), timeOfCreation)
override def fallbackTo[U >: T](that: Future[U]): TimestampedFuture[U] = TimestampedFuture(instance.fallbackTo(that), timeOfCreation)
override def mapTo[S](implicit tag: scala.reflect.ClassTag[S]): TimestampedFuture[S] = TimestampedFuture(instance.mapTo(tag), timeOfCreation)
override def andThen[U](pf: PartialFunction[Try[T], U])(implicit executor: ExecutionContext): TimestampedFuture[T] = TimestampedFuture(instance.andThen(pf), timeOfCreation)
}
object TimestampedFuture {
def apply[T, A](args: A)(factory: A => T)(implicit executionContext: ExecutionContext): TimestampedFuture[T] = {
val obj: Future[T] = Future { factory(args) }
val time = LocalDateTime.now()
TimestampedFuture(obj, time)
}
}
| hnrklssn/game-check-match | app/utils/TimestampedFuture.scala | Scala | apache-2.0 | 3,086 |
package org.phenoscape.kb
import org.apache.jena.query.Query
import org.apache.jena.sparql.core.Var
import org.apache.jena.sparql.expr.ExprVar
import org.apache.jena.sparql.expr.aggregate.AggCountVarDistinct
import org.phenoscape.kb.App.withOwlery
import org.phenoscape.owl.Vocab._
import org.phenoscape.owlet.OwletManchesterSyntaxDataType.SerializableClassExpression
import org.phenoscape.owlet.SPARQLComposer._
import org.phenoscape.scowl._
import org.semanticweb.owlapi.model.{IRI, OWLClassExpression}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object DataCoverageFigureReport {
private val entities = Set(
"anocleithrum" -> "http://purl.obolibrary.org/obo/UBERON_4000160",
"basipterygium bone" -> "http://purl.obolibrary.org/obo/UBERON_2000623",
"carpal bone" -> "http://purl.obolibrary.org/obo/UBERON_0001435",
"clavicle" -> "http://purl.obolibrary.org/obo/UBERON_0001105",
"cleithrum" -> "http://purl.obolibrary.org/obo/UBERON_0004741",
"coracoid bone" -> "http://purl.obolibrary.org/obo/UBERON_0004743",
"digit" -> "http://purl.obolibrary.org/obo/UBERON_0002544",
"epicoracoid" -> "http://purl.obolibrary.org/obo/UBERON_3000762",
"epipubis" -> "http://purl.obolibrary.org/obo/UBERON_3000884",
"extrascapula" -> "http://purl.obolibrary.org/obo/UBERON_2000663",
"extracleithrum" -> "http://purl.obolibrary.org/obo/UBERON_4200022",
"femur" -> "http://purl.obolibrary.org/obo/UBERON_0000981",
"fibula" -> "http://purl.obolibrary.org/obo/UBERON_0001446",
"humerus" -> "http://purl.obolibrary.org/obo/UBERON_0000976",
"ilium" -> "http://purl.obolibrary.org/obo/UBERON_0001273",
"interclavicle" -> "http://purl.obolibrary.org/obo/UBERON_0011655",
"ischium" -> "http://purl.obolibrary.org/obo/UBERON_0001274",
"manual digit" -> "http://purl.obolibrary.org/obo/UBERON_0002389",
"metacarpal bone" -> "http://purl.obolibrary.org/obo/UBERON_0002374",
"metatarsal bone" -> "http://purl.obolibrary.org/obo/UBERON_0001448",
"paired fin radial bone" -> "http://purl.obolibrary.org/obo/UBERON_1500006",
"pectoral girdle bone" -> "http://purl.obolibrary.org/obo/UBERON_0007829",
"pectoral fin radial bone" -> "http://purl.obolibrary.org/obo/UBERON_2001586",
"pectoral fin lepidotrichium" -> "http://purl.obolibrary.org/obo/UBERON_4000175",
"pectoral girdle skeleton" -> "http://purl.obolibrary.org/obo/UBERON_0007831",
"pedal digit" -> "http://purl.obolibrary.org/obo/UBERON_0001466",
"pelvic fin radial bone" -> "http://purl.obolibrary.org/obo/UBERON_2000508",
"pelvic fin lepidotrichium" -> "http://purl.obolibrary.org/obo/UBERON_4000173",
"pelvic girdle skeleton" -> "http://purl.obolibrary.org/obo/UBERON_0007832",
"pelvic girdle bone/zone" -> "http://purl.obolibrary.org/obo/UBERON_0007830",
"phalanx of manus" -> "http://purl.obolibrary.org/obo/UBERON_0001436",
"phalanx of pes" -> "http://purl.obolibrary.org/obo/UBERON_0001449",
"postcleithrum" -> "http://purl.obolibrary.org/obo/UBERON_2000410",
"pubis" -> "http://purl.obolibrary.org/obo/UBERON_0001275",
"radius bone" -> "http://purl.obolibrary.org/obo/UBERON_0001423",
"scapula" -> "http://purl.obolibrary.org/obo/UBERON_0006849",
"sternum" -> "http://purl.obolibrary.org/obo/UBERON_0000975",
"tarsal bone" -> "http://purl.obolibrary.org/obo/UBERON_0001447",
"tibia" -> "http://purl.obolibrary.org/obo/UBERON_0000979",
"ulna" -> "http://purl.obolibrary.org/obo/UBERON_0001424",
"radial bone" -> "http://purl.obolibrary.org/obo/UBERON_2000271",
"lepidotrichium" -> "http://purl.obolibrary.org/obo/UBERON_4000172"
)
private val taxa = Set(
"Acanthostega gunnari" -> "http://purl.obolibrary.org/obo/VTO_9001290",
"Aztecia" -> "http://purl.obolibrary.org/obo/VTO_9022990",
"Balanerpeton woodi" -> "http://purl.obolibrary.org/obo/VTO_9000671",
"Baphetes" -> "http://purl.obolibrary.org/obo/VTO_9016432",
"Baphetes kirkbyi" -> "http://purl.obolibrary.org/obo/VTO_9000762",
"Baphetidae" -> "http://purl.obolibrary.org/obo/VTO_9007550",
"Barameda" -> "http://purl.obolibrary.org/obo/VTO_9022956",
"Barameda decipiens" -> "http://purl.obolibrary.org/obo/VTO_9002397",
"Beelarongia" -> "http://purl.obolibrary.org/obo/VTO_9026226",
"Cabonnichthys burnsi" -> "http://purl.obolibrary.org/obo/VTO_9034475",
"Canowindra grossi" -> "http://purl.obolibrary.org/obo/VTO_9034461",
"Capetus palustris" -> "http://purl.obolibrary.org/obo/VTO_9000702",
"Chirodipterus" -> "http://purl.obolibrary.org/obo/VTO_9010969",
"Cladarosymblema narrienense" -> "http://purl.obolibrary.org/obo/VTO_9034467",
"Crassigyrinus scoticus" -> "http://purl.obolibrary.org/obo/VTO_9000765",
"Densignathus" -> "http://purl.obolibrary.org/obo/VTO_9022020",
"Diabolepis" -> "http://purl.obolibrary.org/obo/VTO_9010964",
"Diploceraspis burkei" -> "http://purl.obolibrary.org/obo/VTO_9001328",
"Diplocercides" -> "http://purl.obolibrary.org/obo/VTO_9011134",
"Diplocercides heiligenstockiensis" -> "http://purl.obolibrary.org/obo/VTO_9026227",
"Diplocercides kayseri" -> "http://purl.obolibrary.org/obo/VTO_9026228",
"Dipnoi" -> "http://purl.obolibrary.org/obo/VTO_0033592",
"Dipnorhynchus" -> "http://purl.obolibrary.org/obo/VTO_9010965",
"Dipterus" -> "http://purl.obolibrary.org/obo/VTO_9010803",
"Ectosteorhachis" -> "http://purl.obolibrary.org/obo/VTO_9011140",
"Elginerpeton pancheni" -> "http://purl.obolibrary.org/obo/VTO_9032383",
"Elpistostege" -> "http://purl.obolibrary.org/obo/VTO_9026225",
"Eucritta melanolimnetes" -> "http://purl.obolibrary.org/obo/VTO_9000749",
"Eusthenodon" -> "http://purl.obolibrary.org/obo/VTO_9032360",
"Eusthenopteron foordi" -> "http://purl.obolibrary.org/obo/VTO_9000751",
"Glyptolepis" -> "http://purl.obolibrary.org/obo/VTO_9011119",
"Glyptopomus" -> "http://purl.obolibrary.org/obo/VTO_9008340",
"Gogonasus andrewsae" -> "http://purl.obolibrary.org/obo/VTO_9032353",
"Gooloogongia" -> "http://purl.obolibrary.org/obo/VTO_9022994",
"Greererpeton burkemorani" -> "http://purl.obolibrary.org/obo/VTO_9001326",
"Griphognathus" -> "http://purl.obolibrary.org/obo/VTO_9010794",
"Guiyu oneiros" -> "http://purl.obolibrary.org/obo/VTO_9030864",
"Gyroptychius" -> "http://purl.obolibrary.org/obo/VTO_9008342",
"Holoptychius" -> "http://purl.obolibrary.org/obo/VTO_9033736",
"Ichthyostega stensioei" -> "http://purl.obolibrary.org/obo/VTO_9000752",
"Kenichthys" -> "http://purl.obolibrary.org/obo/VTO_9031588",
"Latimeria" -> "http://purl.obolibrary.org/obo/VTO_0033619",
"Mandageria fairfaxi" -> "http://purl.obolibrary.org/obo/VTO_9032336",
"Medoevia" -> "http://purl.obolibrary.org/obo/VTO_9022802",
"Marsdenichthys longioccipitus" -> "http://purl.obolibrary.org/obo/VTO_9032346",
"Megalichthys" -> "http://purl.obolibrary.org/obo/VTO_9006420",
"Neoceratodus" -> "http://purl.obolibrary.org/obo/VTO_0033614",
"Onychodus" -> "http://purl.obolibrary.org/obo/VTO_9008350",
"Ossinodus pueri" -> "http://purl.obolibrary.org/obo/VTO_9032794",
"Osteolepis" -> "http://purl.obolibrary.org/obo/VTO_9008377",
"Panderichthys rhombolepis" -> "http://purl.obolibrary.org/obo/VTO_9000724",
"Pederpes finneyae" -> "http://purl.obolibrary.org/obo/VTO_9031042",
"Phaneropleuron" -> "http://purl.obolibrary.org/obo/VTO_9007375",
"Platycephalichthys" -> "http://purl.obolibrary.org/obo/VTO_9032995",
"Porolepis" -> "http://purl.obolibrary.org/obo/VTO_9008373",
"Powichthys" -> "http://purl.obolibrary.org/obo/VTO_9008372",
"Psarolepis" -> "http://purl.obolibrary.org/obo/VTO_9021132",
"Rhizodus" -> "http://purl.obolibrary.org/obo/VTO_9008365",
"Rhizodopsis" -> "http://purl.obolibrary.org/obo/VTO_9004049",
"Sauripterus taylori" -> "http://purl.obolibrary.org/obo/VTO_9011673",
"Screbinodus" -> "http://purl.obolibrary.org/obo/VTO_9022795",
"Speonesydrion" -> "http://purl.obolibrary.org/obo/VTO_9026879",
"Strepsodus" -> "http://purl.obolibrary.org/obo/VTO_9022804",
"Spodichthys buetleri" -> "http://purl.obolibrary.org/obo/VTO_9033057",
"Strunius" -> "http://purl.obolibrary.org/obo/VTO_9008395",
"Styloichthys" -> "http://purl.obolibrary.org/obo/VTO_9033408",
"Tiktaalik roseae" -> "http://purl.obolibrary.org/obo/VTO_9000793",
"Tinirau clackae" -> "http://purl.obolibrary.org/obo/VTO_9034469",
"Tristichopterus alatus" -> "http://purl.obolibrary.org/obo/VTO_9033017",
"Uranolophus" -> "http://purl.obolibrary.org/obo/VTO_9010782",
"Uronemus" -> "http://purl.obolibrary.org/obo/VTO_9001389",
"Ventastega curonica" -> "http://purl.obolibrary.org/obo/VTO_9027749",
"Westlothiana lizziae" -> "http://purl.obolibrary.org/obo/VTO_9031047",
"Whatcheeria deltae" -> "http://purl.obolibrary.org/obo/VTO_9019883",
"Whatcheeriidae" -> "http://purl.obolibrary.org/obo/VTO_9031049",
"Youngolepis" -> "http://purl.obolibrary.org/obo/VTO_9008383"
)
def query(): Future[String] = {
val results = for {
(entityLabel, entityIRI) <- entities
(taxonLabel, taxonIRI) <- taxa
} yield queryEntry(entityIRI, taxonIRI).map { count =>
s"$taxonLabel\\t$entityLabel\\t$count"
}
Future.sequence(results).map { entries =>
entries.mkString("\\n")
}
}
private def queryEntry(entityIRI: String, taxonIRI: String): Future[String] = {
val query = buildQuery(Class(taxonIRI), entityIRI)
for {
results <- App.executeSPARQLQuery(query)
} yield if (results.hasNext) results.next.getLiteral("count").getLexicalForm else "0"
}
//character states annotating the term or its parts
private def buildQuery(taxonClass: OWLClassExpression, entityIRI: String): Query = {
val entityClass = Class(IRI.create(entityIRI))
val entityInd = Individual(entityIRI)
val query = select() from "http://kb.phenoscape.org/" where (bgp(t('taxon, exhibits_state, 'state),
t('state, describes_phenotype, 'phenotype)),
withOwlery(t('taxon, rdfsSubClassOf, taxonClass.asOMN)),
withOwlery(
t('phenotype, rdfsSubClassOf, ((IMPLIES_PRESENCE_OF some entityClass) or (towards value entityInd)).asOMN)),
App.BigdataRunPriorFirst)
query.getProject.add(Var.alloc("count"), query.allocAggregate(new AggCountVarDistinct(new ExprVar("state"))))
query
}
}
| phenoscape/phenoscape-kb-services | src/main/scala/org/phenoscape/kb/DataCoverageFigureReport.scala | Scala | mit | 10,522 |
///*
// * Copyright (C) 2011 Romain Reuillon
// *
// * This program is free software: you can redistribute it and/or modify
// * it under the terms of the GNU Affero General Public License as published by
// * the Free Software Foundation, either version 3 of the License, or
// * (at your option) any later version.
// *
// * This program is distributed in the hope that it will be useful,
// * but WITHOUT ANY WARRANTY; without even the implied warranty of
// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// * GNU Affero General Public License for more details.
// *
// * You should have received a copy of the GNU Affero General Public License
// * along with this program. If not, see <http://www.gnu.org/licenses/>.
// */
//
//package org.openmole.plugin.sampling.csv
//
//import java.io.{ File, FileReader }
//
//import au.com.bytecode.opencsv.CSVReader
//import org.openmole.core.context.{ Context, Val }
//import org.openmole.core.exception.UserBadDataError
//import org.openmole.core.dsl._
//import org.scalatest._
//
//import scala.util.Random
//
//class CSVSamplingSpec extends FlatSpec with Matchers {
//
// "CSVSampling" should "detect the correct mapping between csv header defined column" in {
// implicit val rng = new Random(42)
//
// val p1 = Val[String]("col1")
// val p2 = Val[Int]("col2")
//
// val tmpCsvFile = File.createTempFile("tmp", ".csv")
// getClass.getClassLoader.getResourceAsStream("csvTest.csv").copy(tmpCsvFile)
// val reader = new CSVReader(new FileReader(tmpCsvFile))
//
// val sampling =
// CSVSampling(tmpCsvFile) set (
// outputs += p1.mapped
// )
//
// sampling().from(Context.empty).toIterable.head.head.value should equal("first")
//
// val sampling1 =
// CSVSampling(tmpCsvFile) set (outputs += p2.mapped("badName"))
//
// val exception = evaluating { sampling.toSampling.build(Context.empty) } should produce[UserBadDataError]
// }
//}
| openmole/openmole | openmole/plugins/org.openmole.plugin.sampling.csv/src/test/scala/org/openmole/plugin/sampling/csv/CSVSampingSpec.scala | Scala | agpl-3.0 | 1,955 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.{ URI, URL }
import compiler.{ Eval, EvalImports }
import classpath.ClasspathUtilities
import scala.annotation.tailrec
import collection.mutable
import Compiler.Compilers
import inc.{ FileValueCache, Locate }
import Project.{ inScope, makeSettings }
import Def.{ isDummy, ScopedKey, ScopeLocal, Setting }
import Keys.{ appConfiguration, baseDirectory, configuration, fullResolvers, fullClasspath, pluginData, streams, thisProject, thisProjectRef, update }
import Keys.{ exportedProducts, loadedBuild, onLoadMessage, resolvedScoped, sbtPlugin, scalacOptions, taskDefinitionKey }
import tools.nsc.reporters.ConsoleReporter
import Attributed.data
import Scope.{ GlobalScope, ThisScope }
import Types.const
import BuildPaths._
import BuildStreams._
import Locate.DefinesClass
object Load {
// note that there is State passed in but not pulled out
def defaultLoad(state: State, baseDirectory: File, log: Logger, isPlugin: Boolean = false, topLevelExtras: List[URI] = Nil): (() => Eval, sbt.BuildStructure) =
{
val (base, config, definesClass) = timed("Load.defaultLoad until apply", log) {
val globalBase = getGlobalBase(state)
val base = baseDirectory.getCanonicalFile
val definesClass = FileValueCache(Locate.definesClass _)
val rawConfig = defaultPreGlobal(state, base, definesClass.get, globalBase, log)
val config0 = defaultWithGlobal(state, base, rawConfig, globalBase, log)
val config = if (isPlugin) enableSbtPlugin(config0) else config0.copy(extraBuilds = topLevelExtras)
(base, config, definesClass)
}
val result = apply(base, state, config)
definesClass.clear()
result
}
def defaultPreGlobal(state: State, baseDirectory: File, definesClass: DefinesClass, globalBase: File, log: Logger): sbt.LoadBuildConfiguration =
{
val provider = state.configuration.provider
val scalaProvider = provider.scalaProvider
val stagingDirectory = getStagingDirectory(state, globalBase).getCanonicalFile
val loader = getClass.getClassLoader
val classpath = Attributed.blankSeq(provider.mainClasspath ++ scalaProvider.jars)
val compilers = Compiler.compilers(ClasspathOptions.boot)(state.configuration, log)
val evalPluginDef = EvaluateTask.evalPluginDef(log) _
val delegates = defaultDelegates
val initialID = baseDirectory.getName
val pluginMgmt = PluginManagement(loader)
val inject = InjectSettings(injectGlobal(state), Nil, const(Nil))
new sbt.LoadBuildConfiguration(stagingDirectory, classpath, loader, compilers, evalPluginDef, definesClass, delegates,
EvaluateTask.injectStreams, pluginMgmt, inject, None, Nil, log)
}
def injectGlobal(state: State): Seq[Setting[_]] =
(appConfiguration in GlobalScope :== state.configuration) +:
LogManager.settingsLogger(state) +:
EvaluateTask.injectSettings
def defaultWithGlobal(state: State, base: File, rawConfig: sbt.LoadBuildConfiguration, globalBase: File, log: Logger): sbt.LoadBuildConfiguration =
{
val globalPluginsDir = getGlobalPluginsDirectory(state, globalBase)
val withGlobal = loadGlobal(state, base, globalPluginsDir, rawConfig)
val globalSettings = configurationSources(getGlobalSettingsDirectory(state, globalBase))
loadGlobalSettings(base, globalBase, globalSettings, withGlobal)
}
def loadGlobalSettings(base: File, globalBase: File, files: Seq[File], config: sbt.LoadBuildConfiguration): sbt.LoadBuildConfiguration =
{
val compiled: ClassLoader => Seq[Setting[_]] =
if (files.isEmpty || base == globalBase) const(Nil) else buildGlobalSettings(globalBase, files, config)
config.copy(injectSettings = config.injectSettings.copy(projectLoaded = compiled))
}
// We are hiding a bug fix on global setting that was not importing auto imports.
// Because fixing this via https://github.com/sbt/sbt/pull/2399
// breaks the source compatibility: https://github.com/sbt/sbt/issues/2415
@deprecated("Remove this when we can break source compatibility.", "0.13.10")
private[sbt] def useAutoImportInGlobal = sys.props.get("sbt.global.autoimport") map { _.toLowerCase == "true" } getOrElse false
def buildGlobalSettings(base: File, files: Seq[File], config: sbt.LoadBuildConfiguration): ClassLoader => Seq[Setting[_]] =
{
val eval = mkEval(data(config.globalPluginClasspath), base, defaultEvalOptions)
val imports = BuildUtil.baseImports ++
(// when we can beak the source compat, remove this if and use config.detectedGlobalPlugins.imports
if (useAutoImportInGlobal) config.detectedGlobalPlugins.imports
else BuildUtil.importAllRoot(config.globalPluginNames))
loader => {
val loaded = EvaluateConfigurations(eval, files, imports)(loader)
// TODO - We have a potential leak of config-classes in the global directory right now.
// We need to find a way to clean these safely, or at least warn users about
// unused class files that could be cleaned when multiple sbt instances are not running.
loaded.settings
}
}
def loadGlobal(state: State, base: File, global: File, config: sbt.LoadBuildConfiguration): sbt.LoadBuildConfiguration =
if (base != global && global.exists) {
val gp = GlobalPlugin.load(global, state, config)
config.copy(globalPlugin = Some(gp))
} else
config
def defaultDelegates: sbt.LoadedBuild => Scope => Seq[Scope] = (lb: sbt.LoadedBuild) => {
val rootProject = getRootProject(lb.units)
def resolveRef(project: Reference): ResolvedReference = Scope.resolveReference(lb.root, rootProject, project)
Scope.delegates(
lb.allProjectRefs,
(_: ResolvedProject).configurations.map(c => ConfigKey(c.name)),
resolveRef,
rootProject,
project => projectInherit(lb, project),
(project, config) => configInherit(lb, project, config, rootProject),
task => task.extend,
(project, extra) => Nil
)
}
def configInherit(lb: sbt.LoadedBuild, ref: ResolvedReference, config: ConfigKey, rootProject: URI => String): Seq[ConfigKey] =
ref match {
case pr: ProjectRef => configInheritRef(lb, pr, config)
case BuildRef(uri) => configInheritRef(lb, ProjectRef(uri, rootProject(uri)), config)
}
def configInheritRef(lb: sbt.LoadedBuild, ref: ProjectRef, config: ConfigKey): Seq[ConfigKey] =
configurationOpt(lb.units, ref.build, ref.project, config).toList.flatMap(_.extendsConfigs).map(c => ConfigKey(c.name))
def projectInherit(lb: sbt.LoadedBuild, ref: ProjectRef): Seq[ProjectRef] =
getProject(lb.units, ref.build, ref.project).delegates
// build, load, and evaluate all units.
// 1) Compile all plugin definitions
// 2) Evaluate plugin definitions to obtain and compile plugins and get the resulting classpath for the build definition
// 3) Instantiate Plugins on that classpath
// 4) Compile all build definitions using plugin classpath
// 5) Load build definitions.
// 6) Load all configurations using build definitions and plugins (their classpaths and loaded instances).
// 7) Combine settings from projects, plugins, and configurations
// 8) Evaluate settings
def apply(rootBase: File, s: State, config: sbt.LoadBuildConfiguration): (() => Eval, sbt.BuildStructure) =
{
val log = config.log
// load, which includes some resolution, but can't fill in project IDs yet, so follow with full resolution
val partBuild = timed("Load.apply: load", log) { load(rootBase, s, config) }
val loaded = timed("Load.apply: resolveProjects", log) {
resolveProjects(partBuild)
}
val projects = loaded.units
lazy val rootEval = lazyEval(loaded.units(loaded.root).unit)
val settings = timed("Load.apply: finalTransforms", log) {
finalTransforms(buildConfigurations(loaded, getRootProject(projects), config.injectSettings))
}
val delegates = timed("Load.apply: config.delegates", log) { config.delegates(loaded) }
val data = timed("Load.apply: Def.make(settings)...", log) {
// When settings.size is 100000, Def.make takes around 10s.
if (settings.size > 10000) {
log.info(s"Resolving key references (${settings.size} settings) ...")
}
Def.make(settings)(delegates, config.scopeLocal, Project.showLoadingKey(loaded))
}
Project.checkTargets(data) foreach sys.error
val index = timed("Load.apply: structureIndex", log) {
structureIndex(data, settings, loaded.extra(data), projects)
}
val streams = timed("Load.apply: mkStreams", log) { mkStreams(projects, loaded.root, data) }
(rootEval, new sbt.BuildStructure(projects, loaded.root, settings, data, index, streams, delegates, config.scopeLocal))
}
// map dependencies on the special tasks:
// 1. the scope of 'streams' is the same as the defining key and has the task axis set to the defining key
// 2. the defining key is stored on constructed tasks: used for error reporting among other things
// 3. resolvedScoped is replaced with the defining key as a value
// Note: this must be idempotent.
def finalTransforms(ss: Seq[Setting[_]]): Seq[Setting[_]] =
{
def mapSpecial(to: ScopedKey[_]) = new (ScopedKey ~> ScopedKey) {
def apply[T](key: ScopedKey[T]) =
if (key.key == streams.key)
ScopedKey(Scope.fillTaskAxis(Scope.replaceThis(to.scope)(key.scope), to.key), key.key)
else key
}
def setDefining[T] = (key: ScopedKey[T], value: T) => value match {
case tk: Task[t] => setDefinitionKey(tk, key).asInstanceOf[T]
case ik: InputTask[t] => ik.mapTask(tk => setDefinitionKey(tk, key)).asInstanceOf[T]
case _ => value
}
def setResolved(defining: ScopedKey[_]) = new (ScopedKey ~> Option) {
def apply[T](key: ScopedKey[T]): Option[T] =
key.key match {
case resolvedScoped.key => Some(defining.asInstanceOf[T])
case _ => None
}
}
ss.map(s => s mapConstant setResolved(s.key) mapReferenced mapSpecial(s.key) mapInit setDefining)
}
def setDefinitionKey[T](tk: Task[T], key: ScopedKey[_]): Task[T] =
if (isDummy(tk)) tk else Task(tk.info.set(Keys.taskDefinitionKey, key), tk.work)
def structureIndex(data: Settings[Scope], settings: Seq[Setting[_]], extra: KeyIndex => BuildUtil[_], projects: Map[URI, LoadedBuildUnit]): sbt.StructureIndex =
{
val keys = Index.allKeys(settings)
val attributeKeys = Index.attributeKeys(data) ++ keys.map(_.key)
val scopedKeys = keys ++ data.allKeys((s, k) => ScopedKey(s, k)).toVector
val projectsMap = projects.mapValues(_.defined.keySet)
val keyIndex = KeyIndex(scopedKeys.toVector, projectsMap)
val aggIndex = KeyIndex.aggregate(scopedKeys.toVector, extra(keyIndex), projectsMap)
new sbt.StructureIndex(Index.stringToKeyMap(attributeKeys), Index.taskToKeyMap(data), Index.triggers(data), keyIndex, aggIndex)
}
// Reevaluates settings after modifying them. Does not recompile or reload any build components.
def reapply(newSettings: Seq[Setting[_]], structure: sbt.BuildStructure)(implicit display: Show[ScopedKey[_]]): sbt.BuildStructure =
{
val transformed = finalTransforms(newSettings)
val newData = Def.make(transformed)(structure.delegates, structure.scopeLocal, display)
val newIndex = structureIndex(newData, transformed, index => BuildUtil(structure.root, structure.units, index, newData), structure.units)
val newStreams = mkStreams(structure.units, structure.root, newData)
new sbt.BuildStructure(units = structure.units, root = structure.root, settings = transformed, data = newData, index = newIndex, streams = newStreams, delegates = structure.delegates, scopeLocal = structure.scopeLocal)
}
def isProjectThis(s: Setting[_]) = s.key.scope.project match { case This | Select(ThisProject) => true; case _ => false }
def buildConfigurations(loaded: sbt.LoadedBuild, rootProject: URI => String, injectSettings: InjectSettings): Seq[Setting[_]] =
{
((loadedBuild in GlobalScope :== loaded) +:
transformProjectOnly(loaded.root, rootProject, injectSettings.global)) ++
inScope(GlobalScope)(pluginGlobalSettings(loaded) ++ loaded.autos.globalSettings) ++
loaded.units.toSeq.flatMap {
case (uri, build) =>
val plugins = build.unit.plugins.detected.plugins.values
val pluginBuildSettings = plugins.flatMap(_.buildSettings) ++ loaded.autos.buildSettings(uri)
val pluginNotThis = plugins.flatMap(_.settings) filterNot isProjectThis
val projectSettings = build.defined flatMap {
case (id, project) =>
val ref = ProjectRef(uri, id)
val defineConfig: Seq[Setting[_]] = for (c <- project.configurations) yield ((configuration in (ref, ConfigKey(c.name))) :== c)
val builtin: Seq[Setting[_]] = (thisProject :== project) +: (thisProjectRef :== ref) +: defineConfig
val settings = builtin ++ project.settings ++ injectSettings.project
// map This to thisScope, Select(p) to mapRef(uri, rootProject, p)
transformSettings(projectScope(ref), uri, rootProject, settings)
}
val buildScope = Scope(Select(BuildRef(uri)), Global, Global, Global)
val buildBase = baseDirectory :== build.localBase
val buildSettings = transformSettings(buildScope, uri, rootProject, pluginNotThis ++ pluginBuildSettings ++ (buildBase +: build.buildSettings))
buildSettings ++ projectSettings
}
}
@deprecated("Does not account for AutoPlugins and will be made private.", "0.13.2")
def pluginGlobalSettings(loaded: sbt.LoadedBuild): Seq[Setting[_]] =
loaded.units.toSeq flatMap {
case (_, build) =>
build.unit.plugins.detected.plugins.values flatMap { _.globalSettings }
}
@deprecated("No longer used.", "0.13.0")
def extractSettings(plugins: Seq[Plugin]): (Seq[Setting[_]], Seq[Setting[_]], Seq[Setting[_]]) =
(plugins.flatMap(_.settings), plugins.flatMap(_.projectSettings), plugins.flatMap(_.buildSettings))
def transformProjectOnly(uri: URI, rootProject: URI => String, settings: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.resolveProject(uri, rootProject), settings)
def transformSettings(thisScope: Scope, uri: URI, rootProject: URI => String, settings: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.resolveScope(thisScope, uri, rootProject), settings)
def projectScope(project: Reference): Scope = Scope(Select(project), Global, Global, Global)
def lazyEval(unit: sbt.BuildUnit): () => Eval =
{
lazy val eval = mkEval(unit)
() => eval
}
def mkEval(unit: sbt.BuildUnit): Eval = mkEval(unit.definitions, unit.plugins, unit.plugins.pluginData.scalacOptions)
def mkEval(defs: sbt.LoadedDefinitions, plugs: sbt.LoadedPlugins, options: Seq[String]): Eval =
mkEval(defs.target ++ plugs.classpath, defs.base, options)
def mkEval(classpath: Seq[File], base: File, options: Seq[String]): Eval =
new Eval(options, classpath, s => new ConsoleReporter(s), Some(evalOutputDirectory(base)))
/**
* This will clean up left-over files in the config-classes directory if they are no longer used.
*
* @param base The base directory for the build, should match the one passed into `mkEval` method.
*/
def cleanEvalClasses(base: File, keep: Seq[File]): Unit = {
val baseTarget = evalOutputDirectory(base)
val keepSet = keep.map(_.getCanonicalPath).toSet
// If there are no keeper files, this may be because cache was up-to-date and
// the files aren't properly returned, even though they should be.
// TODO - figure out where the caching of whether or not to generate classfiles occurs, and
// put cleanups there, perhaps.
if (keepSet.nonEmpty) {
def keepFile(f: File) = keepSet(f.getCanonicalPath)
import Path._
val existing = (baseTarget.***.get).filterNot(_.isDirectory)
val toDelete = existing.filterNot(keepFile)
if (toDelete.nonEmpty) {
IO.delete(toDelete)
}
}
}
@deprecated("This method is no longer used", "0.13.6")
def configurations(srcs: Seq[File], eval: () => Eval, imports: Seq[String]): ClassLoader => LoadedSbtFile =
if (srcs.isEmpty) const(LoadedSbtFile.empty)
else EvaluateConfigurations(eval(), srcs, imports)
def load(file: File, s: State, config: sbt.LoadBuildConfiguration): sbt.PartBuild =
load(file, builtinLoader(s, config.copy(pluginManagement = config.pluginManagement.shift, extraBuilds = Nil)), config.extraBuilds.toList)
def builtinLoader(s: State, config: sbt.LoadBuildConfiguration): BuildLoader =
{
val fail = (uri: URI) => sys.error("Invalid build URI (no handler available): " + uri)
val resolver = (info: BuildLoader.ResolveInfo) => RetrieveUnit(info)
val build = (info: BuildLoader.BuildInfo) => Some(() =>
loadUnit(info.uri, info.base, info.state, info.config)
)
val components = BuildLoader.components(resolver, build, full = BuildLoader.componentLoader)
BuildLoader(components, fail, s, config)
}
def load(file: File, loaders: BuildLoader, extra: List[URI]): sbt.PartBuild = loadURI(IO.directoryURI(file), loaders, extra)
def loadURI(uri: URI, loaders: BuildLoader, extra: List[URI]): sbt.PartBuild =
{
IO.assertAbsolute(uri)
val (referenced, map, newLoaders) = loadAll(uri :: extra, Map.empty, loaders, Map.empty)
checkAll(referenced, map)
val build = new sbt.PartBuild(uri, map)
newLoaders transformAll build
}
def addOverrides(unit: sbt.BuildUnit, loaders: BuildLoader): BuildLoader =
loaders updatePluginManagement PluginManagement.extractOverrides(unit.plugins.fullClasspath)
def addResolvers(unit: sbt.BuildUnit, isRoot: Boolean, loaders: BuildLoader): BuildLoader =
unit.definitions.builds.flatMap(_.buildLoaders).toList match {
case Nil => loaders
case x :: xs =>
import Alternatives._
val resolver = (x /: xs) { _ | _ }
if (isRoot) loaders.setRoot(resolver) else loaders.addNonRoot(unit.uri, resolver)
}
def loaded(unit: sbt.BuildUnit): (sbt.PartBuildUnit, List[ProjectReference]) =
{
val defined = projects(unit)
if (defined.isEmpty) sys.error("No projects defined in build unit " + unit)
// since base directories are resolved at this point (after 'projects'),
// we can compare Files instead of converting to URIs
def isRoot(p: Project) = p.base == unit.localBase
val externals = referenced(defined).toList
val explicitRoots = unit.definitions.builds.flatMap(_.rootProject)
val projectsInRoot = if (explicitRoots.isEmpty) defined.filter(isRoot) else explicitRoots
val rootProjects = if (projectsInRoot.isEmpty) defined.head :: Nil else projectsInRoot
(new sbt.PartBuildUnit(unit, defined.map(d => (d.id, d)).toMap, rootProjects.map(_.id), buildSettings(unit)), externals)
}
def buildSettings(unit: sbt.BuildUnit): Seq[Setting[_]] =
{
val buildScope = GlobalScope.copy(project = Select(BuildRef(unit.uri)))
val resolve = Scope.resolveBuildScope(buildScope, unit.uri)
Project.transform(resolve, unit.definitions.builds.flatMap(_.settings))
}
@tailrec def loadAll(bases: List[URI], references: Map[URI, List[ProjectReference]], loaders: BuildLoader, builds: Map[URI, sbt.PartBuildUnit]): (Map[URI, List[ProjectReference]], Map[URI, sbt.PartBuildUnit], BuildLoader) =
bases match {
case b :: bs =>
if (builds contains b)
loadAll(bs, references, loaders, builds)
else {
val (loadedBuild, refs) = loaded(loaders(b))
checkBuildBase(loadedBuild.unit.localBase)
val newLoader = addOverrides(loadedBuild.unit, addResolvers(loadedBuild.unit, builds.isEmpty, loaders.resetPluginDepth))
// it is important to keep the load order stable, so we sort the remaining URIs
val remainingBases = (refs.flatMap(Reference.uri) reverse_::: bs).sorted
loadAll(remainingBases, references.updated(b, refs), newLoader, builds.updated(b, loadedBuild))
}
case Nil => (references, builds, loaders)
}
def checkProjectBase(buildBase: File, projectBase: File): Unit = {
checkDirectory(projectBase)
assert(buildBase == projectBase || IO.relativize(buildBase, projectBase).isDefined, "Directory " + projectBase + " is not contained in build root " + buildBase)
}
def checkBuildBase(base: File) = checkDirectory(base)
def checkDirectory(base: File): Unit = {
assert(base.isAbsolute, "Not absolute: " + base)
if (base.isFile)
sys.error("Not a directory: " + base)
else if (!base.exists)
IO createDirectory base
}
def resolveAll(builds: Map[URI, sbt.PartBuildUnit]): Map[URI, sbt.LoadedBuildUnit] =
{
val rootProject = getRootProject(builds)
builds map {
case (uri, unit) =>
(uri, unit.resolveRefs(ref => Scope.resolveProjectRef(uri, rootProject, ref)))
}
}
def checkAll(referenced: Map[URI, List[ProjectReference]], builds: Map[URI, sbt.PartBuildUnit]): Unit = {
val rootProject = getRootProject(builds)
for ((uri, refs) <- referenced; ref <- refs) {
val ProjectRef(refURI, refID) = Scope.resolveProjectRef(uri, rootProject, ref)
val loadedUnit = builds(refURI)
if (!(loadedUnit.defined contains refID)) {
val projectIDs = loadedUnit.defined.keys.toSeq.sorted
sys.error("No project '" + refID + "' in '" + refURI + "'.\nValid project IDs: " + projectIDs.mkString(", "))
}
}
}
def resolveBase(against: File): Project => Project =
{
def resolve(f: File) =
{
val fResolved = new File(IO.directoryURI(IO.resolve(against, f)))
checkProjectBase(against, fResolved)
fResolved
}
p => p.copy(base = resolve(p.base))
}
def resolveProjects(loaded: sbt.PartBuild): sbt.LoadedBuild =
{
val rootProject = getRootProject(loaded.units)
val units = loaded.units map {
case (uri, unit) =>
IO.assertAbsolute(uri)
(uri, resolveProjects(uri, unit, rootProject))
}
new sbt.LoadedBuild(loaded.root, units)
}
def resolveProjects(uri: URI, unit: sbt.PartBuildUnit, rootProject: URI => String): sbt.LoadedBuildUnit =
{
IO.assertAbsolute(uri)
val resolve = (_: Project).resolve(ref => Scope.resolveProjectRef(uri, rootProject, ref))
new sbt.LoadedBuildUnit(unit.unit, unit.defined mapValues resolve, unit.rootProjects, unit.buildSettings)
}
def projects(unit: sbt.BuildUnit): Seq[Project] =
{
// we don't have the complete build graph loaded, so we don't have the rootProject function yet.
// Therefore, we use resolveProjectBuild instead of resolveProjectRef. After all builds are loaded, we can fully resolve ProjectReferences.
val resolveBuild = (_: Project).resolveBuild(ref => Scope.resolveProjectBuild(unit.uri, ref))
// although the default loader will resolve the project base directory, other loaders may not, so run resolveBase here as well
unit.definitions.projects.map(resolveBuild compose resolveBase(unit.localBase))
}
def getRootProject(map: Map[URI, sbt.BuildUnitBase]): URI => String =
uri => getBuild(map, uri).rootProjects.headOption getOrElse emptyBuild(uri)
def getConfiguration(map: Map[URI, sbt.LoadedBuildUnit], uri: URI, id: String, conf: ConfigKey): Configuration =
configurationOpt(map, uri, id, conf) getOrElse noConfiguration(uri, id, conf.name)
def configurationOpt(map: Map[URI, sbt.LoadedBuildUnit], uri: URI, id: String, conf: ConfigKey): Option[Configuration] =
getProject(map, uri, id).configurations.find(_.name == conf.name)
def getProject(map: Map[URI, sbt.LoadedBuildUnit], uri: URI, id: String): ResolvedProject =
getBuild(map, uri).defined.getOrElse(id, noProject(uri, id))
def getBuild[T](map: Map[URI, T], uri: URI): T =
map.getOrElse(uri, noBuild(uri))
def emptyBuild(uri: URI) = sys.error(s"No root project defined for build unit '$uri'")
def noBuild(uri: URI) = sys.error(s"Build unit '$uri' not defined.")
def noProject(uri: URI, id: String) = sys.error(s"No project '$id' defined in '$uri'.")
def noConfiguration(uri: URI, id: String, conf: String) = sys.error(s"No configuration '$conf' defined in project '$id' in '$uri'")
// Called from builtinLoader
def loadUnit(uri: URI, localBase: File, s: State, config: sbt.LoadBuildConfiguration): sbt.BuildUnit =
timed(s"Load.loadUnit($uri, ...)", config.log) {
val log = config.log
val normBase = localBase.getCanonicalFile
val defDir = projectStandard(normBase)
val plugs = timed("Load.loadUnit: plugins", log) {
plugins(defDir, s, config.copy(pluginManagement = config.pluginManagement.forPlugin))
}
val defsScala = timed("Load.loadUnit: defsScala", log) {
plugs.detected.builds.values
}
val buildLevelExtraProjects = plugs.detected.autoPlugins flatMap { d =>
d.value.extraProjects map {_.setProjectOrigin(ProjectOrigin.ExtraProject)}
}
// NOTE - because we create an eval here, we need a clean-eval later for this URI.
lazy val eval = timed("Load.loadUnit: mkEval", log) { mkEval(plugs.classpath, defDir, plugs.pluginData.scalacOptions) }
val initialProjects = defsScala.flatMap(b => projectsFromBuild(b, normBase)) ++ buildLevelExtraProjects
val hasRootAlreadyDefined = defsScala.exists(_.rootProject.isDefined)
val memoSettings = new mutable.HashMap[File, LoadedSbtFile]
def loadProjects(ps: Seq[Project], createRoot: Boolean) = {
val result = loadTransitive(ps, normBase, plugs, () => eval, config.injectSettings, Nil, memoSettings, config.log, createRoot, uri, config.pluginManagement.context, Nil)
result
}
val loadedProjectsRaw = timed("Load.loadUnit: loadedProjectsRaw", log) { loadProjects(initialProjects, !hasRootAlreadyDefined) }
// TODO - As of sbt 0.13.6 we should always have a default root project from
// here on, so the autogenerated build aggregated can be removed from this code. ( I think)
// We may actually want to move it back here and have different flags in loadTransitive...
val hasRoot = loadedProjectsRaw.projects.exists(_.base == normBase) || defsScala.exists(_.rootProject.isDefined)
val (loadedProjects, defaultBuildIfNone, keepClassFiles) =
if (hasRoot)
(loadedProjectsRaw.projects, Build.defaultEmpty, loadedProjectsRaw.generatedConfigClassFiles)
else {
val existingIDs = loadedProjectsRaw.projects.map(_.id)
val refs = existingIDs.map(id => ProjectRef(uri, id))
val defaultID = autoID(normBase, config.pluginManagement.context, existingIDs)
val b = Build.defaultAggregated(defaultID, refs)
val defaultProjects = timed("Load.loadUnit: defaultProjects", log) { loadProjects(projectsFromBuild(b, normBase), false) }
(defaultProjects.projects ++ loadedProjectsRaw.projects, b, defaultProjects.generatedConfigClassFiles ++ loadedProjectsRaw.generatedConfigClassFiles)
}
// Now we clean stale class files.
// TODO - this may cause issues with multiple sbt clients, but that should be deprecated pending sbt-server anyway
timed("Load.loadUnit: cleanEvalClasses", log) {
cleanEvalClasses(defDir, keepClassFiles)
}
val defs = if (defsScala.isEmpty) defaultBuildIfNone :: Nil else defsScala
// HERE we pull out the defined vals from memoSettings and unify them all so
// we can use them later.
val valDefinitions = memoSettings.values.foldLeft(DefinedSbtValues.empty) { (prev, sbtFile) =>
prev.zip(sbtFile.definitions)
}
val loadedDefs = new sbt.LoadedDefinitions(defDir, Nil, plugs.loader, defs, loadedProjects, plugs.detected.builds.names, valDefinitions)
new sbt.BuildUnit(uri, normBase, loadedDefs, plugs)
}
private[this] def autoID(localBase: File, context: PluginManagement.Context, existingIDs: Seq[String]): String =
{
def normalizeID(f: File) = Project.normalizeProjectID(f.getName) match {
case Right(id) => id
case Left(msg) => sys.error(autoIDError(f, msg))
}
def nthParentName(f: File, i: Int): String =
if (f eq null) Build.defaultID(localBase) else if (i <= 0) normalizeID(f) else nthParentName(f.getParentFile, i - 1)
val pluginDepth = context.pluginProjectDepth
val postfix = "-build" * pluginDepth
val idBase = if (context.globalPluginProject) "global-plugins" else nthParentName(localBase, pluginDepth)
val tryID = idBase + postfix
if (existingIDs.contains(tryID)) Build.defaultID(localBase) else tryID
}
private[this] def autoIDError(base: File, reason: String): String =
"Could not derive root project ID from directory " + base.getAbsolutePath + ":\n" +
reason + "\nRename the directory or explicitly define a root project."
private[this] def projectsFromBuild(b: Build, base: File): Seq[Project] =
b.projectDefinitions(base).map(resolveBase(base))
// Lame hackery to keep track of our state.
private[this] case class LoadedProjects(projects: Seq[Project], generatedConfigClassFiles: Seq[File])
/**
* Loads a new set of projects, including any transitively defined projects underneath this one.
*
* We have two assumptions here:
*
* 1. The first `Project` instance we encounter defines AddSettings and gets to specify where we pull other settings.
* 2. Any project manipulation (enable/disablePlugins) is ok to be added in the order we encounter it.
*
* Any further setting is ignored, as even the SettingSet API should be deprecated/removed with sbt 1.0.
*
* Note: Lots of internal details in here that shouldn't be otherwise exposed.
*
* @param newProjects A sequence of projects we have not yet loaded, but will try to. Must not be Nil
* @param buildBase The `baseDirectory` for the entire build.
* @param plugins A misnomer, this is actually the compiled BuildDefinition (classpath and such) for this project.
* @param eval A mechanism of generating an "Eval" which can compile scala code for us.
* @param injectSettings Settings we need to inject into projects.
* @param acc An accumulated list of loaded projects. TODO - how do these differ from newProjects?
* @param memoSettings A recording of all sbt files that have been loaded so far.
* @param log The logger used for this project.
* @param makeOrDiscoverRoot True if we should autogenerate a root project.
* @param buildUri The URI of the build this is loading
* @param context The plugin management context for autogenerated IDs.
*
* @return The completely resolved/updated sequence of projects defined, with all settings expanded.
*
* TODO - We want to attach the known (at this time) vals/lazy vals defined in each project's
* build.sbt to that project so we can later use this for the `set` command.
*/
private[this] def loadTransitive(
newProjects: Seq[Project],
buildBase: File,
plugins: sbt.LoadedPlugins,
eval: () => Eval,
injectSettings: InjectSettings,
acc: Seq[Project],
memoSettings: mutable.Map[File, LoadedSbtFile],
log: Logger,
makeOrDiscoverRoot: Boolean,
buildUri: URI,
context: PluginManagement.Context,
generatedConfigClassFiles: Seq[File]): LoadedProjects =
/*timed(s"Load.loadTransitive(${ newProjects.map(_.id) })", log)*/ {
// load all relevant configuration files (.sbt, as .scala already exists at this point)
def discover(auto: AddSettings, base: File): DiscoveredProjects =
discoverProjects(auto, base, plugins, eval, memoSettings)
// Step two:
// a. Apply all the project manipulations from .sbt files in order
// b. Deduce the auto plugins for the project
// c. Finalize a project with all its settings/configuration.
def finalizeProject(p: Project, files: Seq[File], expand: Boolean): (Project, Seq[Project]) = {
val configFiles = files flatMap { f => memoSettings.get(f) }
val p1: Project =
timed(s"Load.loadTransitive(${p.id}): transformedProject", log) {
configFiles.flatMap(_.manipulations).foldLeft(p) { (prev, t) =>
t(prev)
}
}
val autoPlugins: Seq[AutoPlugin] =
timed(s"Load.loadTransitive(${p.id}): autoPlugins", log) {
try plugins.detected.deducePluginsFromProject(p1, log)
catch { case e: AutoPluginException => throw translateAutoPluginException(e, p) }
}
val p2 = this.resolveProject(p1, autoPlugins, plugins, injectSettings, memoSettings, log)
val projectLevelExtra =
if (expand) autoPlugins flatMap { _.derivedProjects(p2) map {_.setProjectOrigin(ProjectOrigin.DerivedProject)} }
else Nil
(p2, projectLevelExtra)
}
// Discover any new project definition for the base directory of this project, and load all settings.
// Also return any newly discovered project instances.
def discoverAndLoad(p: Project): (Project, Seq[Project], Seq[File]) = {
val (root, discovered, files, generated) = discover(p.auto, p.base) match {
case DiscoveredProjects(Some(root), rest, files, generated) =>
// TODO - We assume here the project defined in a build.sbt WINS because the original was
// a phony. However, we may want to 'merge' the two, or only do this if the original was a default
// generated project.
(root, rest, files, generated)
case DiscoveredProjects(None, rest, files, generated) => (p, rest, files, generated)
}
val (finalRoot, projectLevelExtra) = finalizeProject(root, files, true)
(finalRoot, discovered ++ projectLevelExtra, generated)
}
// Load all config files AND finalize the project at the root directory, if it exists.
// Continue loading if we find any more.
newProjects match {
case Seq(next, rest @ _*) =>
log.debug(s"[Loading] Loading project ${next.id} @ ${next.base}")
val (finished, discovered, generated) = discoverAndLoad(next)
loadTransitive(rest ++ discovered, buildBase, plugins, eval, injectSettings, acc :+ finished, memoSettings, log, false, buildUri, context, generated ++ generatedConfigClassFiles)
case Nil if makeOrDiscoverRoot =>
log.debug(s"[Loading] Scanning directory ${buildBase}")
discover(AddSettings.defaultSbtFiles, buildBase) match {
case DiscoveredProjects(Some(root), discovered, files, generated) =>
log.debug(s"[Loading] Found root project ${root.id} w/ remaining ${discovered.map(_.id).mkString(",")}")
val (finalRoot, projectLevelExtra) = timed(s"Load.loadTransitive: finalizeProject($root)", log) {
finalizeProject(root, files, true)
}
loadTransitive(discovered ++ projectLevelExtra, buildBase, plugins, eval, injectSettings, finalRoot +: acc, memoSettings, log, false, buildUri, context, generated ++ generatedConfigClassFiles)
// Here we need to create a root project...
case DiscoveredProjects(None, discovered, files, generated) =>
log.debug(s"[Loading] Found non-root projects ${discovered.map(_.id).mkString(",")}")
// Here we do something interesting... We need to create an aggregate root project
val otherProjects = loadTransitive(discovered, buildBase, plugins, eval, injectSettings, acc, memoSettings, log, false, buildUri, context, Nil)
val otherGenerated = otherProjects.generatedConfigClassFiles
val existingIds = otherProjects.projects map (_.id)
val refs = existingIds map (id => ProjectRef(buildUri, id))
val defaultID = autoID(buildBase, context, existingIds)
val root0 = if (discovered.isEmpty || java.lang.Boolean.getBoolean("sbt.root.ivyplugin")) Build.defaultAggregatedProject(defaultID, buildBase, refs)
else Build.generatedRootWithoutIvyPlugin(defaultID, buildBase, refs)
val (root, _) = timed(s"Load.loadTransitive: finalizeProject2($root0)", log) {
finalizeProject(root0, files, false)
}
val result = root +: (acc ++ otherProjects.projects)
log.debug(s"[Loading] Done in ${buildBase}, returning: ${result.map(_.id).mkString("(", ", ", ")")}")
LoadedProjects(result, generated ++ otherGenerated ++ generatedConfigClassFiles)
}
case Nil =>
log.debug(s"[Loading] Done in ${buildBase}, returning: ${acc.map(_.id).mkString("(", ", ", ")")}")
LoadedProjects(acc, generatedConfigClassFiles)
}
}
private[this] def translateAutoPluginException(e: AutoPluginException, project: Project): AutoPluginException =
e.withPrefix(s"Error determining plugins for project '${project.id}' in ${project.base}:\n")
/**
* Represents the results of flushing out a directory and discovering all the projects underneath it.
* THis will return one completely loaded project, and any newly discovered (and unloaded) projects.
*
* @param root The project at "root" directory we were looking, or non if non was defined.
* @param nonRoot Any sub-projects discovered from this directory
* @param sbtFiles Any sbt file loaded during this discovery (used later to complete the project).
* @param generatedFile Any .class file that was generated when compiling/discovering these projects.
*/
private[this] case class DiscoveredProjects(
root: Option[Project],
nonRoot: Seq[Project],
sbtFiles: Seq[File],
generatedFiles: Seq[File])
/**
* This method attempts to resolve/apply all configuration loaded for a project. It is responsible for the following:
*
* Ordering all Setting[_]s for the project
*
*
* @param transformedProject The project with manipulation.
* @param projectPlugins The deduced list of plugins for the given project.
* @param loadedPlugins The project definition (and classloader) of the build.
* @param globalUserSettings All the settings contributed from the ~/.sbt/<version> directory
* @param memoSettings A recording of all loaded files (our files should reside in there). We should need not load any
* sbt file to resolve a project.
* @param log A logger to report auto-plugin issues to.
*/
private[this] def resolveProject(
p: Project,
projectPlugins: Seq[AutoPlugin],
loadedPlugins: sbt.LoadedPlugins,
globalUserSettings: InjectSettings,
memoSettings: mutable.Map[File, LoadedSbtFile],
log: Logger): Project =
timed(s"Load.resolveProject(${p.id})", log) {
import AddSettings._
val autoConfigs = projectPlugins.flatMap(_.projectConfigurations)
// 3. Use AddSettings instance to order all Setting[_]s appropriately
val allSettings = {
// TODO - This mechanism of applying settings could be off... It's in two places now...
lazy val defaultSbtFiles = configurationSources(p.base)
// Grabs the plugin settings for old-style sbt plugins.
def pluginSettings(f: Plugins) =
timed(s"Load.resolveProject(${p.id}): expandSettings(...): pluginSettings($f)", log) {
val included = loadedPlugins.detected.plugins.values.filter(f.include) // don't apply the filter to AutoPlugins, only Plugins
included.flatMap(p => p.settings.filter(isProjectThis) ++ p.projectSettings)
}
// Filter the AutoPlugin settings we included based on which ones are
// intended in the AddSettings.AutoPlugins filter.
def autoPluginSettings(f: AutoPlugins) =
timed(s"Load.resolveProject(${p.id}): expandSettings(...): autoPluginSettings($f)", log) {
projectPlugins.filter(f.include).flatMap(_.projectSettings)
}
// Grab all the settigns we already loaded from sbt files
def settings(files: Seq[File]): Seq[Setting[_]] =
timed(s"Load.resolveProject(${p.id}): expandSettings(...): settings($files)", log) {
for {
file <- files
config <- (memoSettings get file).toSeq
setting <- config.settings
} yield setting
}
// Expand the AddSettings instance into a real Seq[Setting[_]] we'll use on the project
def expandSettings(auto: AddSettings): Seq[Setting[_]] = auto match {
case BuildScalaFiles => p.settings
case User => globalUserSettings.cachedProjectLoaded(loadedPlugins.loader)
case sf: SbtFiles => settings(sf.files.map(f => IO.resolve(p.base, f)))
case sf: DefaultSbtFiles => settings(defaultSbtFiles.filter(sf.include))
case p: Plugins => pluginSettings(p)
case p: AutoPlugins => autoPluginSettings(p)
case q: Sequence => (Seq.empty[Setting[_]] /: q.sequence) { (b, add) => b ++ expandSettings(add) }
}
timed(s"Load.resolveProject(${p.id}): expandSettings(...)", log) {
expandSettings(p.auto)
}
}
// Finally, a project we can use in buildStructure.
p.copy(settings = allSettings).setAutoPlugins(projectPlugins).prefixConfigs(autoConfigs: _*)
}
/**
* This method attempts to discover all Project/settings it can using the configured AddSettings and project base.
*
* @param auto The AddSettings of the defining project (or default) we use to determine which build.sbt files to read.
* @param projectBase The directory we're currently loading projects/definitions from.
* @param eval A mechanism of executing/running scala code.
* @param memoSettings A recording of all files we've parsed.
*/
private[this] def discoverProjects(
auto: AddSettings,
projectBase: File,
loadedPlugins: sbt.LoadedPlugins,
eval: () => Eval,
memoSettings: mutable.Map[File, LoadedSbtFile]): DiscoveredProjects = {
// Default sbt files to read, if needed
lazy val defaultSbtFiles = configurationSources(projectBase)
// Classloader of the build
val loader = loadedPlugins.loader
// How to load an individual file for use later.
// TODO - We should import vals defined in other sbt files here, if we wish to
// share. For now, build.sbt files have their own unique namespace.
def loadSettingsFile(src: File): LoadedSbtFile =
EvaluateConfigurations.evaluateSbtFile(eval(), src, IO.readLines(src), loadedPlugins.detected.imports, 0)(loader)
// How to merge SbtFiles we read into one thing
def merge(ls: Seq[LoadedSbtFile]): LoadedSbtFile = (LoadedSbtFile.empty /: ls) { _ merge _ }
// Loads a given file, or pulls from the cache.
def memoLoadSettingsFile(src: File): LoadedSbtFile = memoSettings.getOrElse(src, {
val lf = loadSettingsFile(src)
memoSettings.put(src, lf.clearProjects) // don't load projects twice
lf
})
// Loads a set of sbt files, sorted by their lexical name (current behavior of sbt).
def loadFiles(fs: Seq[File]): LoadedSbtFile =
merge(fs.sortBy(_.getName).map(memoLoadSettingsFile))
// Finds all the build files associated with this project
import AddSettings.{ User, SbtFiles, DefaultSbtFiles, Plugins, AutoPlugins, Sequence, BuildScalaFiles }
def associatedFiles(auto: AddSettings): Seq[File] = auto match {
case sf: SbtFiles => sf.files.map(f => IO.resolve(projectBase, f)).filterNot(_.isHidden)
case sf: DefaultSbtFiles => defaultSbtFiles.filter(sf.include).filterNot(_.isHidden)
case q: Sequence => (Seq.empty[File] /: q.sequence) { (b, add) => b ++ associatedFiles(add) }
case _ => Seq.empty
}
val rawFiles = associatedFiles(auto)
val loadedFiles = loadFiles(rawFiles)
val rawProjects = loadedFiles.projects
val (root, nonRoot) = rawProjects.partition(_.base == projectBase)
// TODO - good error message if more than one root project
DiscoveredProjects(root.headOption, nonRoot, rawFiles, loadedFiles.generatedFiles)
}
def globalPluginClasspath(globalPlugin: Option[GlobalPlugin]): Seq[Attributed[File]] =
globalPlugin match {
case Some(cp) => cp.data.fullClasspath
case None => Nil
}
/** These are the settings defined when loading a project "meta" build. */
val autoPluginSettings: Seq[Setting[_]] = inScope(GlobalScope in LocalRootProject)(Seq(
sbtPlugin :== true,
pluginData := {
val prod = (exportedProducts in Configurations.Runtime).value
val cp = (fullClasspath in Configurations.Runtime).value
val opts = (scalacOptions in Configurations.Compile).value
PluginData(removeEntries(cp, prod), prod, Some(fullResolvers.value), Some(update.value), opts)
},
onLoadMessage := ("Loading project definition from " + baseDirectory.value)
))
private[this] def removeEntries(cp: Seq[Attributed[File]], remove: Seq[Attributed[File]]): Seq[Attributed[File]] =
{
val files = data(remove).toSet
cp filter { f => !files.contains(f.data) }
}
def enableSbtPlugin(config: sbt.LoadBuildConfiguration): sbt.LoadBuildConfiguration =
config.copy(injectSettings = config.injectSettings.copy(
global = autoPluginSettings ++ config.injectSettings.global,
project = config.pluginManagement.inject ++ config.injectSettings.project
))
def activateGlobalPlugin(config: sbt.LoadBuildConfiguration): sbt.LoadBuildConfiguration =
config.globalPlugin match {
case Some(gp) => config.copy(injectSettings = config.injectSettings.copy(project = gp.inject))
case None => config
}
def plugins(dir: File, s: State, config: sbt.LoadBuildConfiguration): sbt.LoadedPlugins =
if (hasDefinition(dir))
buildPlugins(dir, s, enableSbtPlugin(activateGlobalPlugin(config)))
else
noPlugins(dir, config)
def hasDefinition(dir: File) =
{
import Path._
(dir * -GlobFilter(DefaultTargetName)).get.nonEmpty
}
def noPlugins(dir: File, config: sbt.LoadBuildConfiguration): sbt.LoadedPlugins =
loadPluginDefinition(dir, config, PluginData(config.globalPluginClasspath, Nil, None, None, Nil))
def buildPlugins(dir: File, s: State, config: sbt.LoadBuildConfiguration): sbt.LoadedPlugins =
loadPluginDefinition(dir, config, buildPluginDefinition(dir, s, config))
def loadPluginDefinition(dir: File, config: sbt.LoadBuildConfiguration, pluginData: PluginData): sbt.LoadedPlugins =
{
val (definitionClasspath, pluginLoader) = pluginDefinitionLoader(config, pluginData)
loadPlugins(dir, pluginData.copy(dependencyClasspath = definitionClasspath), pluginLoader)
}
def pluginDefinitionLoader(config: sbt.LoadBuildConfiguration, dependencyClasspath: Seq[Attributed[File]]): (Seq[Attributed[File]], ClassLoader) =
pluginDefinitionLoader(config, dependencyClasspath, Nil)
def pluginDefinitionLoader(config: sbt.LoadBuildConfiguration, pluginData: PluginData): (Seq[Attributed[File]], ClassLoader) =
pluginDefinitionLoader(config, pluginData.dependencyClasspath, pluginData.definitionClasspath)
def pluginDefinitionLoader(config: sbt.LoadBuildConfiguration, depcp: Seq[Attributed[File]], defcp: Seq[Attributed[File]]): (Seq[Attributed[File]], ClassLoader) =
{
val definitionClasspath =
if (depcp.isEmpty)
config.classpath
else
(depcp ++ config.classpath).distinct
val pm = config.pluginManagement
// only the dependencyClasspath goes in the common plugin class loader ...
def addToLoader() = pm.loader add Path.toURLs(data(depcp))
val parentLoader = if (depcp.isEmpty) pm.initialLoader else { addToLoader(); pm.loader }
val pluginLoader =
if (defcp.isEmpty)
parentLoader
else {
// ... the build definition classes get their own loader so that they don't conflict with other build definitions (#511)
ClasspathUtilities.toLoader(data(defcp), parentLoader)
}
(definitionClasspath, pluginLoader)
}
def buildPluginDefinition(dir: File, s: State, config: sbt.LoadBuildConfiguration): PluginData =
{
val (eval, pluginDef) = apply(dir, s, config)
val pluginState = Project.setProject(Load.initialSession(pluginDef, eval), pluginDef, s)
config.evalPluginDef(Project.structure(pluginState), pluginState)
}
@deprecated("Use ModuleUtilities.getCheckedObjects[Build].", "0.13.2")
def loadDefinitions(loader: ClassLoader, defs: Seq[String]): Seq[Build] =
defs map { definition => loadDefinition(loader, definition) }
@deprecated("Use ModuleUtilities.getCheckedObject[Build].", "0.13.2")
def loadDefinition(loader: ClassLoader, definition: String): Build =
ModuleUtilities.getObject(definition, loader).asInstanceOf[Build]
def loadPlugins(dir: File, data: PluginData, loader: ClassLoader): sbt.LoadedPlugins =
new sbt.LoadedPlugins(dir, data, loader, PluginDiscovery.discoverAll(data, loader))
@deprecated("Replaced by the more general PluginDiscovery.binarySourceModuleNames and will be made private.", "0.13.2")
def getPluginNames(classpath: Seq[Attributed[File]], loader: ClassLoader): Seq[String] =
PluginDiscovery.binarySourceModuleNames(classpath, loader, PluginDiscovery.Paths.Plugins, classOf[Plugin].getName)
@deprecated("Use PluginDiscovery.binaryModuleNames.", "0.13.2")
def binaryPlugins(classpath: Seq[File], loader: ClassLoader): Seq[String] =
PluginDiscovery.binaryModuleNames(classpath, loader, PluginDiscovery.Paths.Plugins)
@deprecated("Use PluginDiscovery.onClasspath", "0.13.2")
def onClasspath(classpath: Seq[File])(url: URL): Boolean =
PluginDiscovery.onClasspath(classpath)(url)
@deprecated("Use ModuleUtilities.getCheckedObjects[Plugin].", "0.13.2")
def loadPlugins(loader: ClassLoader, pluginNames: Seq[String]): Seq[Plugin] =
ModuleUtilities.getCheckedObjects[Plugin](pluginNames, loader).map(_._2)
@deprecated("Use ModuleUtilities.getCheckedObject[Plugin].", "0.13.2")
def loadPlugin(pluginName: String, loader: ClassLoader): Plugin =
ModuleUtilities.getCheckedObject[Plugin](pluginName, loader)
@deprecated("No longer used.", "0.13.2")
def findPlugins(analysis: inc.Analysis): Seq[String] = discover(analysis, "sbt.Plugin")
@deprecated("No longer used.", "0.13.2")
def findDefinitions(analysis: inc.Analysis): Seq[String] = discover(analysis, "sbt.Build")
@deprecated("Use PluginDiscovery.sourceModuleNames", "0.13.2")
def discover(analysis: inc.Analysis, subclasses: String*): Seq[String] =
PluginDiscovery.sourceModuleNames(analysis, subclasses: _*)
def initialSession(structure: sbt.BuildStructure, rootEval: () => Eval, s: State): SessionSettings = {
val session = s get Keys.sessionSettings
val currentProject = session map (_.currentProject) getOrElse Map.empty
val currentBuild = session map (_.currentBuild) filter (uri => structure.units.keys exists (uri ==)) getOrElse structure.root
new SessionSettings(currentBuild, projectMap(structure, currentProject), structure.settings, Map.empty, Nil, rootEval)
}
def initialSession(structure: sbt.BuildStructure, rootEval: () => Eval): SessionSettings =
new SessionSettings(structure.root, projectMap(structure, Map.empty), structure.settings, Map.empty, Nil, rootEval)
def projectMap(structure: sbt.BuildStructure, current: Map[URI, String]): Map[URI, String] =
{
val units = structure.units
val getRoot = getRootProject(units)
def project(uri: URI) = {
current get uri filter {
p => structure allProjects uri map (_.id) contains p
} getOrElse getRoot(uri)
}
units.keys.map(uri => (uri, project(uri))).toMap
}
def defaultEvalOptions: Seq[String] = Nil
@deprecated("Use BuildUtil.baseImports", "0.13.0")
def baseImports = BuildUtil.baseImports
@deprecated("Use BuildUtil.checkCycles", "0.13.0")
def checkCycles(units: Map[URI, sbt.LoadedBuildUnit]): Unit = BuildUtil.checkCycles(units)
@deprecated("Use BuildUtil.importAll", "0.13.0")
def importAll(values: Seq[String]): Seq[String] = BuildUtil.importAll(values)
@deprecated("Use BuildUtil.importAllRoot", "0.13.0")
def importAllRoot(values: Seq[String]): Seq[String] = BuildUtil.importAllRoot(values)
@deprecated("Use BuildUtil.rootedNames", "0.13.0")
def rootedName(s: String): String = BuildUtil.rootedName(s)
@deprecated("Use BuildUtil.getImports", "0.13.0")
def getImports(unit: sbt.BuildUnit): Seq[String] = BuildUtil.getImports(unit)
def referenced[PR <: ProjectReference](definitions: Seq[ProjectDefinition[PR]]): Seq[PR] = definitions flatMap { _.referenced }
@deprecated("LoadedBuildUnit is now top-level", "0.13.0")
type LoadedBuildUnit = sbt.LoadedBuildUnit
@deprecated("BuildStructure is now top-level", "0.13.0")
type BuildStructure = sbt.BuildStructure
@deprecated("StructureIndex is now top-level", "0.13.0")
type StructureIndex = sbt.StructureIndex
@deprecated("LoadBuildConfiguration is now top-level", "0.13.0")
type LoadBuildConfiguration = sbt.LoadBuildConfiguration
@deprecated("LoadBuildConfiguration is now top-level", "0.13.0")
val LoadBuildConfiguration = sbt.LoadBuildConfiguration
final class EvaluatedConfigurations(val eval: Eval, val settings: Seq[Setting[_]])
final case class InjectSettings(global: Seq[Setting[_]], project: Seq[Setting[_]], projectLoaded: ClassLoader => Seq[Setting[_]]) {
import java.net.URLClassLoader
private val cache: mutable.Map[String, Seq[Setting[_]]] = mutable.Map.empty
// Cache based on the underlying URL values of the classloader
def cachedProjectLoaded(cl: ClassLoader): Seq[Setting[_]] =
cl match {
case cl: URLClassLoader => cache.getOrElseUpdate(classLoaderToHash(Some(cl)), projectLoaded(cl))
case _ => projectLoaded(cl)
}
private def classLoaderToHash(o: Option[ClassLoader]): String =
o match {
case Some(cl: URLClassLoader) =>
cl.getURLs.toList.toString + classLoaderToHash(Option(cl.getParent))
case Some(cl: ClassLoader) =>
cl.toString + classLoaderToHash(Option(cl.getParent))
case _ => "null"
}
}
@deprecated("LoadedDefinitions is now top-level", "0.13.0")
type LoadedDefinitions = sbt.LoadedDefinitions
@deprecated("LoadedPlugins is now top-level", "0.13.0")
type LoadedPlugins = sbt.LoadedPlugins
@deprecated("BuildUnit is now top-level", "0.13.0")
type BuildUnit = sbt.BuildUnit
@deprecated("LoadedBuild is now top-level", "0.13.0")
type LoadedBuild = sbt.LoadedBuild
@deprecated("PartBuild is now top-level", "0.13.0")
type PartBuild = sbt.PartBuild
@deprecated("BuildUnitBase is now top-level", "0.13.0")
type BuildUnitBase = sbt.BuildUnitBase
@deprecated("PartBuildUnit is now top-level", "0.13.0")
type PartBuildUnit = sbt.PartBuildUnit
@deprecated("Use BuildUtil.apply", "0.13.0")
def buildUtil(root: URI, units: Map[URI, sbt.LoadedBuildUnit], keyIndex: KeyIndex, data: Settings[Scope]): BuildUtil[ResolvedProject] = BuildUtil(root, units, keyIndex, data)
/** Debugging method to time how long it takes to run various compilation tasks. */
private[sbt] def timed[T](label: String, log: Logger)(t: => T): T = {
val start = System.nanoTime
val result = t
val elapsed = System.nanoTime - start
log.debug(label + " took " + (elapsed / 1e6) + " ms")
result
}
}
final case class LoadBuildConfiguration(
stagingDirectory: File,
classpath: Seq[Attributed[File]],
loader: ClassLoader,
compilers: Compilers,
evalPluginDef: (sbt.BuildStructure, State) => PluginData,
definesClass: DefinesClass,
delegates: sbt.LoadedBuild => Scope => Seq[Scope],
scopeLocal: ScopeLocal,
pluginManagement: PluginManagement,
injectSettings: Load.InjectSettings,
globalPlugin: Option[GlobalPlugin],
extraBuilds: Seq[URI],
log: Logger) {
lazy val (globalPluginClasspath, globalPluginLoader) = Load.pluginDefinitionLoader(this, Load.globalPluginClasspath(globalPlugin))
lazy val globalPluginNames = if (globalPluginClasspath.isEmpty) Nil else Load.getPluginNames(globalPluginClasspath, globalPluginLoader)
private[sbt] lazy val globalPluginDefs = {
val pluginData = globalPlugin match {
case Some(x) => PluginData(x.data.fullClasspath, x.data.internalClasspath, Some(x.data.resolvers), Some(x.data.updateReport), Nil)
case None => PluginData(globalPluginClasspath, Nil, None, None, Nil)
}
val baseDir = globalPlugin match {
case Some(x) => x.base
case _ => stagingDirectory
}
Load.loadPluginDefinition(baseDir, this, pluginData)
}
lazy val detectedGlobalPlugins = globalPluginDefs.detected
}
final class IncompatiblePluginsException(msg: String, cause: Throwable) extends Exception(msg, cause)
| som-snytt/xsbt | main/src/main/scala/sbt/Load.scala | Scala | bsd-3-clause | 57,737 |
package app
import java.io.{File, FileNotFoundException}
import java.nio.file.{Files, Paths}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{LocalFileSystem, Path}
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.spark.graphx._
import org.apache.spark.ml.feature.Word2VecModel
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import run.local.WikiDumpImport
import webapp.config.AppParams
import wikipedia.{GraphOperator, WordEmbedding}
import scala.collection.mutable
object Status extends Enumeration {
val AVAILABLE, RUNNING = Value
}
object MiningApp {
private final val LOCAL_PAGES_PATH = AppParams.getInstance().getLocalSaveDir + "local_pages_path.parquet"
private final val LOCAL_GRAPH_VERTICES_PATH = AppParams.getInstance().getLocalSaveDir + "local_graph_vertices_path.save"
private final val LOCAL_GRAPH_EDGES_PATH = AppParams.getInstance().getLocalSaveDir + "local_graph_edges_path.save"
private var started: Boolean = false
private var pages: DataFrame = _
private var graph: Graph[String, Long] = _
private var embedded_space: Word2VecModel = _
private var ss: SparkSession = _
private var status: Status.Value = Status.AVAILABLE
private var loadedFile: String = "NA"
import org.apache.hadoop.fs.FileSystem
private val hdfs: FileSystem = FileSystem.get(new Configuration())
def init(session: SparkSession): Unit = {
ss = session
ss.sparkContext.hadoopConfiguration.set("fs.hdfs.impl", classOf[DistributedFileSystem].getName)
ss.sparkContext.hadoopConfiguration.set("fs.file.impl", classOf[LocalFileSystem].getName)
started = true
}
def clearLocal(): Unit = {
if (Files.exists(Paths.get(LOCAL_GRAPH_VERTICES_PATH))) {
hdfs.delete(new Path(LOCAL_GRAPH_VERTICES_PATH), true)
}
if (Files.exists(Paths.get(LOCAL_GRAPH_EDGES_PATH))) {
hdfs.delete(new Path(LOCAL_GRAPH_EDGES_PATH), true)
}
if (Files.exists(Paths.get(LOCAL_PAGES_PATH))) {
hdfs.delete(new Path(LOCAL_PAGES_PATH), true)
}
}
@throws(classOf[FileNotFoundException])
def importLocal(): Unit = {
status = Status.RUNNING
new Thread(new Runnable {
override def run(): Unit = {
importGraph()
importPages()
status = Status.AVAILABLE
loadedFile = "localStoredDF"
}
}).start()
}
def importWikiDumpInBackground(filePath: String): Unit = {
status = Status.RUNNING
new Thread(new Runnable {
override def run(): Unit = {
val result = WikiDumpImport.importDumpAndGetDF(filePath, ss)
pages = result._1
graph = result._2
exportPages()
exportGraph()
status = Status.AVAILABLE
loadedFile = new File(filePath).getName
}
}).start()
}
def createTempPagesExport(): Unit = {
pages
.coalesce(1)
.write.mode(SaveMode.Overwrite)
.format("org.apache.spark.sql.json")
.save(AppParams.getInstance().getLocalSaveDir + "tmpExportPages.json")
}
def createTempVSMExport(): Unit = {
embedded_space.getVectors
.coalesce(1)
.write.mode(SaveMode.Overwrite)
.format("org.apache.spark.sql.json")
.save(AppParams.getInstance().getLocalSaveDir + "tmpExportVSM.json")
}
def startWordEmbedding(dimension: Int, window: Int, iteration: Int): Unit = {
status = Status.RUNNING
new Thread(new Runnable {
override def run(): Unit = {
embedded_space = WordEmbedding.runWord2Vec(ss, pages, dimension, window, iteration)
status = Status.AVAILABLE
}
}).start()
}
def getPage(pageName: String): Page = {
val rows = pages.select("title", "text", "edges")
.filter("title = \\"" + pageName + "\\"").take(1)
if (rows.length != 1) return null
val row = rows(0)
val edges = row.get(2)
.asInstanceOf[mutable.WrappedArray[Row]]
.toArray[Row]
.map(r => (r.get(0).asInstanceOf[String], r.get(1).asInstanceOf[String]))
Page(row.get(0).asInstanceOf[String], row.get(1).asInstanceOf[String], edges)
}
def getBestPageRankGraph: EdgesAndVertices = {
val ind = GraphOperator.pageRanker(graph, ss.sparkContext)
val bestGraph = graph.subgraph(_ => true, (a, _) => ind contains a)
EdgesAndVertices(
bestGraph.edges.collect().map(e => EdgeLight(e.srcId, e.dstId)),
bestGraph.vertices.collect().map(v => VertexLight(v._1.toLong, v._2))
)
}
def findSynonymsForQuery(query: String, num_result: Int): Array[WordEmbedding.WordAndSimilarity] = {
WordEmbedding.queryToSynonyms(embedded_space, ss, query, num_result)
}
def pageCount: Long = {
pages.count()
}
/* ############## Accessors ############## */
def isStarted: Boolean = started
def getStatus: Status.Value = status
def getLoadedFile: String = loadedFile
def pagesLoaded: Boolean = {
pages != null
}
/* ########## Private functions ########## */
@throws(classOf[FileNotFoundException])
private def importPages(): Unit = {
pages = ss.read.parquet(LOCAL_PAGES_PATH)
}
@throws(classOf[FileNotFoundException])
private def importGraph(): Unit = {
val vertices = ss.sparkContext.objectFile[(VertexId, String)](LOCAL_GRAPH_VERTICES_PATH)
val edges = ss.sparkContext.objectFile[Edge[Long]](LOCAL_GRAPH_EDGES_PATH)
graph = Graph[String, Long](vertices, edges)
}
private def exportPages(): Unit = {
pages.write.mode(SaveMode.Overwrite).parquet(LOCAL_PAGES_PATH)
}
private def exportGraph(): Unit = {
if (Files.exists(Paths.get(LOCAL_GRAPH_VERTICES_PATH))) {
hdfs.delete(new Path(LOCAL_GRAPH_VERTICES_PATH), true)
}
if (Files.exists(Paths.get(LOCAL_GRAPH_EDGES_PATH))) {
hdfs.delete(new Path(LOCAL_GRAPH_EDGES_PATH), true)
}
graph.vertices.saveAsObjectFile(LOCAL_GRAPH_VERTICES_PATH)
graph.edges.saveAsObjectFile(LOCAL_GRAPH_EDGES_PATH)
}
/* ############# Case classes ############# */
case class Page(title: String, text: String, edges: Array[(String, String)])
case class EdgesAndVertices(edges: Array[EdgeLight], vertices: Array[VertexLight])
case class EdgeLight(from: Long, to: Long)
case class VertexLight(id: Long, label: String)
}
| Erwangf/wikipedia-mining | src/main/scala/app/MiningApp.scala | Scala | mit | 6,242 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import play.api.libs.json.Json
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.computations.formats._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
class CP8Spec extends WordSpec with Matchers with MockitoSugar {
implicit val format = Json.format[CP8Holder]
"CP8 to json" should {
"create valid json for int value" in {
val json = Json.toJson(CP8Holder(CP8(Some(1234))))
json.toString shouldBe """{"cp8":1234}"""
}
"create valid json for -ve int" in {
val json = Json.toJson(CP8Holder(CP8(Some(-1234))))
json.toString shouldBe """{"cp8":-1234}"""
}
"create valid json for None" in {
val json = Json.toJson(CP8Holder(CP8(None)))
json.toString shouldBe """{"cp8":null}"""
}
}
"CP8 from json" should {
"create +ve int from valid json" in {
val json = Json.parse("""{"cp8":1234}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(Some(1234)))
}
"create -ve int from valid json" in {
val json = Json.parse("""{"cp8":-1234}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(Some(-1234)))
}
"create None from valid json" in {
val json = Json.parse("""{"cp8":null}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(None))
}
}
val boxRetriever = mock[ComputationsBoxRetriever]
"CP8 validation" should {
"pass when zero" in {
CP8(Some(0)).validate(boxRetriever) shouldBe empty
}
"pass when at max" in {
CP8(Some(99999999)).validate(boxRetriever) shouldBe empty
}
"pass when at min" in {
CP8(Some(-99999999)).validate(boxRetriever) shouldBe empty
}
"fail when below min" in {
CP8(Some(-100000000)).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.below.min", Some(Seq("-99999999", "99999999"))))
}
"fail when above max" in {
CP8(Some(100000000)).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.above.max", Some(Seq("-99999999", "99999999"))))
}
"fail when empty" in {
CP8(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.required"))
}
}
}
case class CP8Holder(cp8: CP8)
| pncampbell/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP8Spec.scala | Scala | apache-2.0 | 2,993 |
package powercards
trait Stage {
def play(game: Game): Stage
}
| whence/powerlife | scala/powercards_oo/src/main/scala/powercards/Stage.scala | Scala | mit | 66 |
package sclack.tech
/**
* Just a class to help with coordinates and reduce code clutter.
*
* @author Simon Symeonidis
*/
object CoordinateHelper {
/**
* Call this when you want coordinates for doing things (who is around me?
* what is around me?)
*
* @param coord is the current coordinate in tuple form (x, y)
* @return tuples of (x, y), each containing relative points to (x, y) in
* directions left, right, top, bottom.
*/
def adjacentCoordinates(coord: (Int,Int)) : Array[(Int,Int)] = {
Array[(Int,Int)](
(coord._1 - 1, coord._2), /* left */
(coord._1 + 1, coord._2), /* right */
(coord._1, coord._2 - 1), /* up */
(coord._1, coord._2 + 1) /* down */
)
}
def moveNorth(curr: (Int,Int)) : (Int, Int) = {
(curr._1, curr._2 - 1)
}
def moveSouth(curr: (Int,Int)) : (Int, Int) = {
(curr._1, curr._2 + 1)
}
def moveEast(curr: (Int,Int)) : (Int, Int) = {
(curr._1 + 1, curr._2)
}
def moveWest(curr: (Int,Int)) : (Int, Int) = {
(curr._1 - 1, curr._2)
}
}
| psyomn/sclack | src/main/scala/tech/CoordinateHelper.scala | Scala | gpl-3.0 | 1,068 |
package com.socrata.datacoordinator.secondary.sql
import java.sql.Connection
import com.rojoma.simplearm.v2.using
import com.socrata.datacoordinator.id.DatasetId
import com.socrata.datacoordinator.id.sql._
import com.socrata.datacoordinator.secondary.{SecondaryMetric, SecondaryMetrics}
abstract class SqlSecondaryMetrics(conn: Connection) extends SecondaryMetrics {
override def storeTotal(storeId: String): SecondaryMetric = {
using(conn.prepareStatement("SELECT sum(total_size) AS total_size FROM secondary_metrics WHERE store_id = ?")) { stmt =>
stmt.setString(1, storeId)
using(stmt.executeQuery()) { rs =>
rs.next()
SecondaryMetric(
totalSizeBytes = Option(rs.getLong("total_size")).getOrElse(0L)
)
}
}
}
override def dataset(storeId: String, datasetId: DatasetId): Option[SecondaryMetric] = {
using(conn.prepareStatement(
"""SELECT total_size
| FROM secondary_metrics
| WHERE store_id = ?
| AND dataset_system_id = ?""".stripMargin)) { stmt =>
stmt.setString(1, storeId)
stmt.setDatasetId(2, datasetId)
using(stmt.executeQuery()) { rs =>
if (rs.next()) {
Some(SecondaryMetric(
totalSizeBytes = rs.getLong("total_size")
))
} else {
None
}
}
}
}
override def dropDataset(storeId: String, datasetId: DatasetId): Unit = {
using(conn.prepareStatement("DELETE FROM secondary_metrics_history WHERE dataset_system_id = ? AND store_id = ?")) { stmt =>
stmt.setDatasetId(1, datasetId)
stmt.setString(2, storeId)
stmt.execute()
}
using(conn.prepareStatement("DELETE FROM secondary_metrics WHERE store_id = ? AND dataset_system_id = ?")) { stmt =>
stmt.setString(1, storeId)
stmt.setDatasetId(2, datasetId)
stmt.execute()
}
}
}
| socrata-platform/data-coordinator | coordinatorlib/src/main/scala/com/socrata/datacoordinator/secondary/sql/SqlSecondaryMetrics.scala | Scala | apache-2.0 | 1,884 |
import java.util.{Locale, UUID}
import javax.servlet.ServletContext
import akka.actor.ActorSystem
import hclu.hreg.Beans
import hclu.hreg.api._
import hclu.hreg.api.swagger.SwaggerServlet
import hclu.hreg.common.Utils
import hclu.hreg.common.logging.AsyncErrorReportingLogAppender
import hclu.hreg.dao.sql.SqlDatabase
import hclu.hreg.domain.User
import hclu.hreg.version.BuildInfo
import org.scalatra.{LifeCycle, ScalatraServlet}
import scala.concurrent.Future
/**
* This is the ScalatraBootstrap bootstrap file. You can use it to mount servlets or
* filters. It's also a good place to put initialization code which needs to
* run at application start (e.g. database configurations), and init params.
*/
class ScalatraBootstrap extends LifeCycle with Beans {
override def init(context: ServletContext) {
Locale.setDefault(Locale.US) // set default locale to prevent Scalatra from sending cookie expiration date in polish format :)
// Initialize error reporting client.
AsyncErrorReportingLogAppender(config, errorReporter).init()
SqlDatabase.updateSchema(sqlDatabase.connectionString)
def mountServlet(servlet: ScalatraServlet with Mappable) {
servlet match {
case s: SwaggerMappable => context.mount(s, s.fullMappingPath, s.name)
case _ => context.mount(servlet, servlet.fullMappingPath)
}
}
mountServlet(new UsersServlet(userService))
mountServlet(new PasswordRecoveryServlet(passwordRecoveryService, userService))
mountServlet(new DocsServlet(docService, userService))
mountServlet(new VersionServlet)
mountServlet(new SwaggerServlet)
mountServlet(new UploadServlet)
mountServlet(new ContactsServlet(contactService, userService))
context.setAttribute("appObject", this)
logger.info("\\nStarted HREG [{}]\\nwith DB: {}", BuildInfo, sqlDatabase)
val salt = Utils.randomString(128)
val token = UUID.randomUUID().toString
val now = clock.nowUtc
val userCreatation: Future[Unit] = userDao.add(User.withRandomUUID("admin", "admin@drain.io", "xxx", salt, token, now, "admin", "admin"))
userCreatation.onSuccess {
case _ => logger.info("Admin added")
}
}
override def destroy(context: ServletContext) {
sqlDatabase.close()
system.shutdown()
}
}
| tsechov/hclu-registry | backend/src/main/scala/ScalatraBootstrap.scala | Scala | apache-2.0 | 2,291 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.