code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.adapter
import com.twitter.zipkin.gen
import com.twitter.zipkin.query._
object ThriftQueryAdapter extends QueryAdapter {
type timelineAnnotationType = gen.TimelineAnnotation
type traceTimelineType = gen.TraceTimeline
type traceComboType = gen.TraceCombo
type traceSummaryType = gen.TraceSummary
type traceType = gen.Trace
type queryRequestType = gen.QueryRequest
type queryResponseType = gen.QueryResponse
type orderType = gen.Order
/* TimelineAnnotation from Thrift */
def apply(t: timelineAnnotationType): TimelineAnnotation = {
TimelineAnnotation(
t.`timestamp`,
t.`value`,
ThriftAdapter(t.`host`),
t.`spanId`,
t.`parentId`,
t.`serviceName`,
t.`spanName`)
}
/* TimelineAnnotation to Thrift */
def apply(t: TimelineAnnotation): timelineAnnotationType = {
gen.TimelineAnnotation(
t.timestamp,
t.value,
ThriftAdapter(t.host),
t.spanId,
t.parentId,
t.serviceName,
t.spanName)
}
/* TraceTimeline from Thrift */
def apply(t: traceTimelineType): TraceTimeline = {
TraceTimeline(
t.`traceId`,
t.`rootMostSpanId`,
t.`annotations`.map { ThriftQueryAdapter(_) },
t.`binaryAnnotations`.map { ThriftAdapter(_) })
}
/* TraceTimeline to Thrift */
def apply(t: TraceTimeline): traceTimelineType = {
gen.TraceTimeline(
t.traceId,
t.rootSpanId,
t.annotations.map { ThriftQueryAdapter(_) },
t.binaryAnnotations.map { ThriftAdapter(_) })
}
/* TraceCombo from Thrift */
def apply(t: traceComboType): TraceCombo = {
TraceCombo(
ThriftQueryAdapter(t.`trace`),
t.`summary`.map(ThriftQueryAdapter(_)),
t.`timeline`.map(ThriftQueryAdapter(_)),
t.`spanDepths`.map(_.toMap))
}
/* TraceCombo to Thrift */
def apply(t: TraceCombo): traceComboType = {
gen.TraceCombo(
ThriftQueryAdapter(t.trace),
t.traceSummary.map(ThriftQueryAdapter(_)),
t.traceTimeline.map(ThriftQueryAdapter(_)),
t.spanDepths)
}
/* TraceSummary from Thrift */
def apply(t: traceSummaryType): TraceSummary = {
new TraceSummary(t.traceId, t.startTimestamp, t.endTimestamp,
t.durationMicro, t.serviceCounts,
t.endpoints.map(ThriftAdapter(_)).toList)
}
/* TraceSummary to Thrift */
def apply(t: TraceSummary): traceSummaryType = {
gen.TraceSummary(t.traceId, t.startTimestamp, t.endTimestamp,
t.durationMicro, t.serviceCounts, t.endpoints.map(ThriftAdapter(_)))
}
/* Trace from Thrift */
def apply(t: traceType): Trace = {
Trace(t.`spans`.map(ThriftAdapter(_)))
}
/* Trace to Thrift */
def apply(t: Trace): traceType = {
gen.Trace(t.spans.map(ThriftAdapter(_)))
}
/* QueryRequest */
def apply(q: queryRequestType): QueryRequest = {
QueryRequest(
q.`serviceName`,
q.`spanName`,
q.`annotations`,
q.`binaryAnnotations`.map {
_.map { ThriftAdapter(_) }
},
q.`endTs`,
q.`limit`,
ThriftQueryAdapter(q.`order`))
}
def apply(q: QueryRequest): queryRequestType = {
gen.QueryRequest(
q.serviceName,
q.spanName,
q.annotations,
q.binaryAnnotations.map {
_.map { ThriftAdapter(_) }
},
q.endTs,
q.limit,
ThriftQueryAdapter(q.order))
}
/* QueryResponse */
def apply(q: queryResponseType): QueryResponse =
QueryResponse(q.`traceIds`, q.`startTs`, q.`endTs`)
def apply(q: QueryResponse): queryResponseType =
gen.QueryResponse(q.traceIds, q.startTs, q.endTs)
/* Order */
def apply(o: orderType): Order = {
o match {
case gen.Order.DurationDesc => Order.DurationDesc
case gen.Order.DurationAsc => Order.DurationAsc
case gen.Order.TimestampDesc => Order.TimestampDesc
case gen.Order.TimestampAsc => Order.TimestampAsc
case gen.Order.None => Order.None
}
}
def apply(o: Order): orderType = {
o match {
case Order.DurationDesc => gen.Order.DurationDesc
case Order.DurationAsc => gen.Order.DurationAsc
case Order.TimestampDesc => gen.Order.TimestampDesc
case Order.TimestampAsc => gen.Order.TimestampAsc
case Order.None => gen.Order.None
}
}
}
|
netconstructor/zipkin
|
zipkin-scrooge/src/main/scala/com/twitter/zipkin/adapter/ThriftQueryAdapter.scala
|
Scala
|
apache-2.0
| 4,871
|
package org.bizzle.astar
import
org.bizzle.pathfinding.{ coordinate, pathingmap, PathingStatus },
coordinate.{ BadCoordinate2D, Coordinate2D },
pathingmap.PathingMapString
import
base.{ AStarBase, HeuristicLib }
/**
* Created by IntelliJ IDEA.
* User: Jason
* Date: 12/4/11
* Time: 10:40 PM
*/
// Basically, runs two AStar processes asychronously, and they pass each other their updated beenThere arrays and current locations.
// If one reaches a location that the other has reached, or if the current locations are next to each other, it returns.
object BiDirAStar extends AStarBase[BiDirStepData](0.8, HeuristicLib.manhattanDistance) {
override def apply(mapString: PathingMapString) : PathingStatus[BiDirStepData] = {
val stepData = BiDirStepData(mapString)
execute(primeStepData(stepData), calculateMaxIters(stepData.pathingMap.colCount, stepData.pathingMap.rowCount))
}
override protected def execute(stepData: BiDirStepData, maxIters: Int) : PathingStatus[BiDirStepData] = {
val (stgStepData, stgCrumbs) = step(stepData.clone())
val (gtsStepData, gtsCrumbs) = step(stepData.cloneForBiBackwards())
stgStepData.assimilateBreadcrumbs(gtsCrumbs)
gtsStepData.assimilateBreadcrumbs(stgCrumbs)
val director = new BiDirDirector(decide(_: BiDirStepData, maxIters), step) // decide() gets partially applied
director.direct(stgStepData, gtsStepData)
}
override protected def goalIsFound(stepData: BiDirStepData, freshLoc: Coordinate2D) =
(freshLoc overlaps stepData.goal) || (stepData hasInOthersBreadcrumbs freshLoc)
override protected def makeNewStepData(stepData: BiDirStepData, freshLoc: Coordinate2D = BadCoordinate2D, isIncingIters: Boolean = false) =
BiDirStepData(freshLoc, stepData, isIncingIters)
}
|
TheBizzle/PathFinding
|
AStar/src/main/org/bizzle/astar/BiDirAStar.scala
|
Scala
|
bsd-3-clause
| 1,789
|
package codeboy.mapreduce.simple
abstract class Mapper[inputType,outPutKeyType,outPutValueType] {
//ignore the input key
def map (input: inputType):List[(outPutKeyType,outPutValueType)]
}
|
codeboyyong/scala-sample
|
my_sample/src/main/scala/codeboy/mapreduce/simple/Mapper.scala
|
Scala
|
apache-2.0
| 196
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.allqueries
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.Spark2QueryTest
import org.scalatest.BeforeAndAfterAll
class InsertIntoCarbonTableSpark2TestCase extends Spark2QueryTest with BeforeAndAfterAll {
override def beforeAll: Unit = {
sql("drop table if exists OneRowTable")
}
test("insert select one row") {
sql("create table OneRowTable(col1 string, col2 string, col3 int, col4 double) stored by 'carbondata'")
sql("insert into OneRowTable select '0.1', 'a.b', 1, 1.2")
checkAnswer(sql("select * from OneRowTable"), Seq(Row("0.1", "a.b", 1, 1.2)))
}
override def afterAll {
sql("drop table if exists OneRowTable")
}
}
|
ravipesala/incubator-carbondata
|
integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/InsertIntoCarbonTableSpark2TestCase.scala
|
Scala
|
apache-2.0
| 1,530
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.strategy
import java.util
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path, PathFilter}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SparkSession, SparkSqlAdapter}
import org.apache.spark.sql.carbondata.execution.datasources.SparkCarbonFileFormat
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, AttributeSet, Expression, ExpressionSet, NamedExpression}
import org.apache.spark.sql.execution.{DataSourceScanExec, FilterExec, ProjectExec, SparkPlan}
import org.apache.spark.sql.execution.datasources.{FileFormat, HadoopFsRelation, InMemoryFileIndex, LogicalRelation}
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.text.TextFileFormat
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.SparkSQLUtil
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore}
import org.apache.carbondata.core.readcommitter.ReadCommittedScope
import org.apache.carbondata.core.statusmanager.{FileFormat => FileFormatName, LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
import org.apache.carbondata.core.util.{CarbonProperties, CarbonSessionInfo, SessionParams, ThreadLocalSessionInfo}
import org.apache.carbondata.core.util.path.CarbonTablePath
object MixedFormatHandler {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
val supportedFormats: Seq[String] =
Seq("carbon", "carbondata", "parquet", "orc", "json", "csv", "text")
def validateFormat(format: String): Boolean = {
supportedFormats.exists(_.equalsIgnoreCase(format))
}
/**
* collect schema, list of last level directory and list of all data files under given path
*
* @param sparkSession spark session
* @param options option for ADD SEGMENT
* @param inputPath under which path to collect
* @return schema of the data file, map of last level directory (partition folder) to its
* children file list (data files)
*/
def collectInfo(
sparkSession: SparkSession,
options: Map[String, String],
inputPath: String): (StructType, mutable.Map[String, Seq[FileStatus]]) = {
val path = new Path(inputPath)
val fs = path.getFileSystem(SparkSQLUtil.sessionState(sparkSession).newHadoopConf())
val rootPath = fs.getFileStatus(path)
val leafDirFileMap = collectAllLeafFileStatus(sparkSession, rootPath, fs)
val format = options.getOrElse("format", "carbondata").toLowerCase
val fileFormat = if (format.equalsIgnoreCase("carbondata") ||
format.equalsIgnoreCase("carbon")) {
new SparkCarbonFileFormat()
} else {
getFileFormat(new FileFormatName(format))
}
if (leafDirFileMap.isEmpty) {
throw new RuntimeException("no partition data is found")
}
val schema = fileFormat.inferSchema(sparkSession, options, leafDirFileMap.head._2).get
(schema, leafDirFileMap)
}
/**
* collect leaf directories and leaf files recursively in given path
*
* @param sparkSession spark session
* @param path path to collect
* @param fs hadoop file system
* @return mapping of leaf directory to its children files
*/
private def collectAllLeafFileStatus(
sparkSession: SparkSession,
path: FileStatus,
fs: FileSystem): mutable.Map[String, Seq[FileStatus]] = {
val directories: ArrayBuffer[FileStatus] = ArrayBuffer()
val leafFiles: ArrayBuffer[FileStatus] = ArrayBuffer()
val lastLevelFileMap = mutable.Map[String, Seq[FileStatus]]()
// get all files under input path
val fileStatus = fs.listStatus(path.getPath, new PathFilter {
override def accept(path: Path): Boolean = {
!path.getName.equals("_SUCCESS") && !path.getName.endsWith(".crc")
}
})
// collect directories and files
fileStatus.foreach { file =>
if (file.isDirectory) directories.append(file)
else leafFiles.append(file)
}
if (leafFiles.nonEmpty) {
// leaf file is found, so parent folder (input parameter) is the last level dir
val updatedPath = FileFactory.getUpdatedFilePath(path.getPath.toString)
lastLevelFileMap.put(updatedPath, leafFiles)
lastLevelFileMap
} else {
// no leaf file is found, for each directory, collect recursively
directories.foreach { dir =>
val map = collectAllLeafFileStatus(sparkSession, dir, fs)
lastLevelFileMap ++= map
}
lastLevelFileMap
}
}
def extraSegments(identifier: AbsoluteTableIdentifier,
readCommittedScope: ReadCommittedScope): Array[LoadMetadataDetails] = {
val loadMetadataDetails = readCommittedScope.getSegmentList
val segsToAccess = getSegmentsToAccess(identifier)
loadMetadataDetails.filter { metaDetail =>
metaDetail.getSegmentStatus.equals(SegmentStatus.SUCCESS) ||
metaDetail.getSegmentStatus.equals(SegmentStatus.LOAD_PARTIAL_SUCCESS)
}.filterNot { currLoad =>
currLoad.getFileFormat.equals(FileFormatName.COLUMNAR_V3) ||
currLoad.getFileFormat.equals(FileFormatName.ROW_V1)
}.filter {
l => segsToAccess.isEmpty || segsToAccess.contains(l.getLoadName)
}
}
/**
* Generates the RDD for non carbon segments. It uses the spark underlying file formats and
* generates the RDD in its native format without changing any of its flow to keep the original
* performance and features.
*
* If multiple segments are with different formats like parquet , orc etc then it creates RDD for
* each format segments and union them.
*/
def extraRDD(
l: LogicalRelation,
projects: Seq[NamedExpression],
filters: Seq[Expression],
readCommittedScope: ReadCommittedScope,
identifier: AbsoluteTableIdentifier,
extraSegments: Array[LoadMetadataDetails],
supportBatch: Boolean = true): Option[(RDD[InternalRow], Boolean)] = {
val rdds = extraSegments
.groupBy(_.getFileFormat)
.map { case (format, details) =>
// collect paths as input to scan RDD
val paths = details. flatMap { d =>
val segmentFile = SegmentFileStore.readSegmentFile(
CarbonTablePath.getSegmentFilePath(readCommittedScope.getFilePath, d.getSegmentFile))
// If it is a partition table, the path to create RDD should be the root path of the
// partition folder (excluding the partition subfolder).
// If it is not a partition folder, collect all data file paths
if (segmentFile.getOptions.containsKey("partition")) {
val segmentPath = segmentFile.getOptions.get("path")
if (segmentPath == null) {
throw new RuntimeException("invalid segment file, 'path' option not found")
}
Seq(new Path(segmentPath))
} else {
// If it is not a partition folder, collect all data file paths to create RDD
segmentFile.getLocationMap.asScala.flatMap { case (p, f) =>
f.getFiles.asScala.map { ef =>
new Path(p + CarbonCommonConstants.FILE_SEPARATOR + ef)
}.toSeq
}.toSeq
}
}
val fileFormat = getFileFormat(format, supportBatch)
getRDDForExternalSegments(l, projects, filters, fileFormat, paths)
}
if (rdds.nonEmpty) {
if (rdds.size == 1) {
Some(rdds.head)
} else {
if (supportBatch && rdds.exists(!_._2)) {
extraRDD(l, projects, filters, readCommittedScope, identifier, extraSegments, false)
} else {
var rdd: RDD[InternalRow] = null
rdds.foreach { r =>
if (rdd == null) {
rdd = r._1
} else {
rdd = rdd.union(r._1)
}
}
Some(rdd, rdds.forall(_._2))
}
}
} else {
None
}
}
def getFileFormat(fileFormat: FileFormatName, supportBatch: Boolean = true): FileFormat = {
if (fileFormat.equals(new FileFormatName("parquet"))) {
new ExtendedParquetFileFormat(supportBatch)
} else if (fileFormat.equals(new FileFormatName("orc"))) {
new ExtendedOrcFileFormat(supportBatch)
} else if (fileFormat.equals(new FileFormatName("json"))) {
new JsonFileFormat
} else if (fileFormat.equals(new FileFormatName("csv"))) {
new CSVFileFormat
} else if (fileFormat.equals(new FileFormatName("text"))) {
new TextFileFormat
} else {
throw new UnsupportedOperationException("Format not supported " + fileFormat)
}
}
private class ExtendedParquetFileFormat(supportBatch: Boolean) extends ParquetFileFormat {
override def supportBatch(sparkSession: SparkSession, schema: StructType): Boolean = {
super.supportBatch(sparkSession, schema) && supportBatch
}
}
private class ExtendedOrcFileFormat(supportBatch: Boolean) extends OrcFileFormat {
override def supportBatch(sparkSession: SparkSession, schema: StructType): Boolean = {
super.supportBatch(sparkSession, schema) && supportBatch
}
}
/**
* Generates the RDD using the spark file format.
*/
private def getRDDForExternalSegments(l: LogicalRelation,
projects: Seq[NamedExpression],
filters: Seq[Expression],
fileFormat: FileFormat,
paths: Seq[Path]): (RDD[InternalRow], Boolean) = {
val sparkSession = l.relation.sqlContext.sparkSession
val fsRelation = l.catalogTable match {
case Some(catalogTable) =>
val fileIndex =
new InMemoryFileIndex(sparkSession, paths, catalogTable.storage.properties, None)
// exclude the partition in data schema
val dataSchema = catalogTable.schema.filterNot { column =>
catalogTable.partitionColumnNames.contains(column.name)}
HadoopFsRelation(
fileIndex,
catalogTable.partitionSchema,
new StructType(dataSchema.toArray),
catalogTable.bucketSpec,
fileFormat,
catalogTable.storage.properties)(sparkSession)
case _ =>
HadoopFsRelation(
new InMemoryFileIndex(sparkSession, Seq.empty, Map.empty, None),
new StructType(),
l.relation.schema,
None,
fileFormat,
null)(sparkSession)
}
// Filters on this relation fall into four categories based on where we can use them to avoid
// reading unneeded data:
// - partition keys only - used to prune directories to read
// - bucket keys only - optionally used to prune files to read
// - keys stored in the data only - optionally used to skip groups of data in files
// - filters that need to be evaluated again after the scan
val filterSet = ExpressionSet(filters)
// The attribute name of predicate could be different than the one in schema in case of
// case insensitive, we should change them to match the one in schema, so we do not need to
// worry about case sensitivity anymore.
val normalizedFilters = filters.map { e =>
e transform {
case a: AttributeReference =>
a.withName(l.output.find(_.semanticEquals(a)).get.name)
}
}
val partitionColumns =
l.resolve(
fsRelation.partitionSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
val partitionSet = AttributeSet(partitionColumns)
val partitionKeyFilters =
ExpressionSet(normalizedFilters
.filter(_.references.subsetOf(partitionSet)))
LOGGER.info(s"Pruning directories with: ${ partitionKeyFilters.mkString(",") }")
val dataColumns =
l.resolve(fsRelation.dataSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
// Partition keys are not available in the statistics of the files.
val dataFilters = normalizedFilters.filter(_.references.intersect(partitionSet).isEmpty)
// Predicates with both partition keys and attributes need to be evaluated after the scan.
val afterScanFilters = filterSet -- partitionKeyFilters.filter(_.references.nonEmpty)
LOGGER.info(s"Post-Scan Filters: ${ afterScanFilters.mkString(",") }")
val filterAttributes = AttributeSet(afterScanFilters)
val requiredExpressions = new util.LinkedHashSet[NamedExpression](
(projects.flatMap(p => findAttribute(dataColumns, p)) ++
filterAttributes.map(p => dataColumns.find(_.exprId.equals(p.exprId)).get)).asJava
).asScala.toSeq
val readDataColumns =
requiredExpressions.filterNot(partitionColumns.contains).asInstanceOf[Seq[Attribute]]
val outputSchema = readDataColumns.toStructType
LOGGER.info(s"Output Data Schema: ${ outputSchema.simpleString(5) }")
val outputAttributes = readDataColumns ++ partitionColumns
val scan =
SparkSqlAdapter.getScanForSegments(
fsRelation,
outputAttributes,
outputSchema,
partitionKeyFilters.toSeq,
dataFilters,
l.catalogTable.map(_.identifier))
val afterScanFilter = afterScanFilters.toSeq.reduceOption(expressions.And)
val withFilter = afterScanFilter.map(FilterExec(_, scan)).getOrElse(scan)
val withProjections: SparkPlan = if (projects == withFilter.output) {
withFilter
} else {
ProjectExec(projects, withFilter)
}
(withProjections.find(_.isInstanceOf[DataSourceScanExec]).get.asInstanceOf[DataSourceScanExec]
.inputRDDs().head, fileFormat.supportBatch(sparkSession, outputSchema))
}
// This function is used to get the unique columns based on expression Id from
// filters and the projections list
def findAttribute(dataColumns: Seq[Attribute], p: Expression): Seq[Attribute] = {
dataColumns.find {
x =>
val attr = findAttributeReference(p)
attr.isDefined && x.exprId.equals(attr.get.exprId)
} match {
case Some(c) => Seq(c)
case None => Seq()
}
}
private def findAttributeReference(p: Expression): Option[NamedExpression] = {
p match {
case a: AttributeReference =>
Some(a)
case al =>
if (al.children.nonEmpty) {
al.children.map(findAttributeReference).head
} else {
None
}
case _ => None
}
}
def getSegmentsToAccess(identifier: AbsoluteTableIdentifier): Seq[String] = {
val carbonSessionInfo: CarbonSessionInfo = {
var info = ThreadLocalSessionInfo.getCarbonSessionInfo
if (info == null || info.getSessionParams == null) {
info = new CarbonSessionInfo
info.setSessionParams(new SessionParams())
}
info.getSessionParams.addProps(CarbonProperties.getInstance().getAddedProperty)
info
}
val tableUniqueKey = identifier.getDatabaseName + "." + identifier.getTableName
val inputSegmentsKey = CarbonCommonConstants.CARBON_INPUT_SEGMENTS + tableUniqueKey
val segmentsStr = carbonSessionInfo.getThreadParams
.getProperty(inputSegmentsKey, carbonSessionInfo.getSessionParams
.getProperty(inputSegmentsKey,
CarbonProperties.getInstance().getProperty(inputSegmentsKey, "*")))
if (!segmentsStr.equals("*")) {
segmentsStr.split(",")
} else {
Seq.empty
}
}
/**
* Returns true if any other non-carbon format segment exists
*/
def otherFormatSegmentsExist(metadataPath: String): Boolean = {
val allSegments = SegmentStatusManager.readLoadMetadata(metadataPath)
allSegments.exists(a => a.getFileFormat != null && !a.isCarbonFormat)
}
}
|
zzcclp/carbondata
|
integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
|
Scala
|
apache-2.0
| 16,907
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.storage.elasticsearch
import java.io.IOException
import scala.collection.JavaConverters._
import org.apache.http.Header
import org.apache.http.entity.ContentType
import org.apache.http.nio.entity.NStringEntity
import org.apache.http.util.EntityUtils
import org.apache.predictionio.data.storage.StorageClientConfig
import org.apache.predictionio.data.storage.StorageClientException
import org.elasticsearch.client.RestClient
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization.write
import grizzled.slf4j.Logging
class ESSequences(client: ESClient, config: StorageClientConfig, index: String) extends Logging {
implicit val formats = DefaultFormats
private val estype = "sequences"
val restClient = client.open()
try {
ESUtils.createIndex(restClient, index,
ESUtils.getNumberOfShards(config, index.toUpperCase),
ESUtils.getNumberOfReplicas(config, index.toUpperCase))
val mappingJson =
(estype ->
("_all" -> ("enabled" -> 0)) ~
("properties" ->
("n" -> ("enabled" -> 0))))
ESUtils.createMapping(restClient, index, estype, compact(render(mappingJson)))
} finally {
restClient.close()
}
def genNext(name: String): Int = {
val restClient = client.open()
try {
val entity = new NStringEntity(write("n" -> name), ContentType.APPLICATION_JSON)
val response = restClient.performRequest(
"POST",
s"/$index/$estype/$name",
Map("refresh" -> "false").asJava,
entity)
val jsonResponse = parse(EntityUtils.toString(response.getEntity))
val result = (jsonResponse \ "result").extract[String]
result match {
case "created" =>
(jsonResponse \ "_version").extract[Int]
case "updated" =>
(jsonResponse \ "_version").extract[Int]
case _ =>
throw new IllegalStateException(s"[$result] Failed to update $index/$estype/$name")
}
} catch {
case e: IOException =>
throw new StorageClientException(s"Failed to update $index/$estype/$name", e)
} finally {
restClient.close()
}
}
}
|
himanshudhami/PredictionIO
|
storage/elasticsearch/src/main/scala/org/apache/predictionio/data/storage/elasticsearch/ESSequences.scala
|
Scala
|
apache-2.0
| 3,012
|
package chapter.fifteen
import ExerciseFour._
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class ExerciseFourSpec extends FlatSpec with Matchers {
"function" should "" in {
}
}
|
deekim/impatient-scala
|
src/test/scala/chapter/fifteen/ExerciseFourSpec.scala
|
Scala
|
apache-2.0
| 265
|
package net.warpgame.engine.core.context.loader.service
import java.lang.invoke.{MethodHandle, MethodHandles}
import java.lang.reflect.{AnnotatedElement, Constructor, Parameter}
import net.warpgame.engine.core.context.service.ServiceBuilder
import net.warpgame.engine.core.context.service.{Qualified, ServiceBuilder}
import net.warpgame.engine.core.context.loader.service.ServiceResolver._
/**
* @author Jaca777
* Created 2017-08-27 at 22
*/
private[loader] class ServiceResolver(classResolver: ClassResolver) {
def resolveServiceInfo(): Set[ServiceInfo] = {
val classes = classResolver.resolveServiceClasses().par
val declaredServices = classes.map(toDeclaredServiceInfo).seq
val arrayCollectiveServices = getCollectiveServices(declaredServices)
declaredServices ++ arrayCollectiveServices
}
def toDeclaredServiceInfo(serviceClass: Class[_]): ServiceInfo = {
val builderConstructor = findBuilderConstructor(serviceClass)
val builderHandle = toMethodHandle(builderConstructor)
val dependencies = getDependencies(builderConstructor)
val qualifier = getQualifier(serviceClass)
ServiceInfo(serviceClass, qualifier, builderHandle, dependencies.toList)
}
private def findBuilderConstructor(serviceClass: Class[_]): Constructor[_] =
serviceClass.getConstructors match {
case Array(constr) => constr
case constrs: Array[Constructor[_]] =>
getExplicitBuilderConstructor(constrs, serviceClass.getName)
case Array() =>
throw NoServiceConstructorFoundException(serviceClass.getName)
}
private def getExplicitBuilderConstructor(constrs: Array[Constructor[_]], className: String): Constructor[_] = {
val explicitConstrs = constrs.filter(isExplicitBuilder)
explicitConstrs match {
case Array(constr) => constr
case a if a.length > 1 =>
throw AmbiguousServiceBuilderDefinition(className)
case Array() =>
throw UnableToResolveServiceBuilderException(className)
}
}
private def isExplicitBuilder(constructor: Constructor[_]): Boolean =
constructor.getAnnotation(classOf[ServiceBuilder]) != null
private def toMethodHandle(constructor: Constructor[_]): MethodHandle = {
val lookup = MethodHandles.lookup()
lookup.unreflectConstructor(constructor)
}
private def getDependencies(constr: Constructor[_]): Array[DependencyInfo] = {
val params = constr.getParameters
params.map(toDependency)
}
private def toDependency(param: Parameter): DependencyInfo =
DependencyInfo(param.getType, getQualifier(param))
private def getQualifier(param: AnnotatedElement): Option[String] = {
val annotation = param.getAnnotation(classOf[Qualified])
if (annotation != null) {
Some(annotation.qualifier())
} else {
None
}
}
private def getCollectiveServices(
declaredServices: Set[ServiceInfo]
): Set[ServiceInfo] = {
val arrayDependencies = declaredServices
.flatMap(s => s.dependencies)
.filter(d => d.`type`.isArray)
arrayDependencies.map(toCollectiveServiceInfo(declaredServices))
}
private def toCollectiveServiceInfo
(declaredServices: Set[ServiceInfo])
(dependencyInfo: DependencyInfo)
: ServiceInfo = {
val qualified = findQualified(dependencyInfo.`type`, dependencyInfo.qualifier, declaredServices)
ServiceInfo(
dependencyInfo.`type`,
dependencyInfo.qualifier,
collectiveServiceBuilder(dependencyInfo, qualified.size),
qualified
)
}
private def findQualified(`type`: Class[_], qualifier: Option[String], services: Set[ServiceInfo]): List[DependencyInfo] = {
val componentType = `type`.getComponentType
services.filter(s => componentType.isAssignableFrom(s.`type`) && qualifier.forall(q => s.qualifier.contains(q)))
.map(toDependencyInfo)
.toList
}
private def toDependencyInfo(serviceInfo: ServiceInfo): DependencyInfo = {
DependencyInfo(serviceInfo.`type`, serviceInfo.qualifier)
}
private def collectiveServiceBuilder(depenencyInfo: DependencyInfo, size: Int): MethodHandle =
MethodHandles.identity(depenencyInfo.`type`)
.asCollector(depenencyInfo.`type`, size)
}
object ServiceResolver {
case class NoServiceConstructorFoundException(className: String)
extends RuntimeException(
s"No public constructors found for service at $className"
)
case class AmbiguousServiceBuilderDefinition(className: String)
extends RuntimeException(
s"Multiple constructors annotated with @ServiceBuilder found for service at $className"
)
case class UnableToResolveServiceBuilderException(className: String)
extends RuntimeException(
s"Multiple constructors for service at $className found, but none is annotated with @ServiceBuilder"
)
}
|
WarpOrganization/warp
|
core/src/main/scala/net/warpgame/engine/core/context/loader/service/ServiceResolver.scala
|
Scala
|
lgpl-3.0
| 4,805
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
object Option
{
/** An implicit conversion that converts an option to an iterable value
*/
implicit def option2Iterable[A](xo: Option[A]): Iterable[A] = xo.toList
/** An Option factory which creates Some(value) if the argument is not null,
* and None if it is null.
*
* @param x the value
* @return Some(value) if value != null, None if value == null
*/
def apply[A](x: A): Option[A] = if (x == null) None else Some(x)
}
/** This class represents optional values. Instances of <code>Option</code>
* are either instances of case class <code>Some</code> or it is case
* object <code>None</code>.
*
* @author Martin Odersky
* @author Matthias Zenger
* @version 1.1, 16/01/2007
*/
sealed abstract class Option[+A] extends Product {
self =>
/** True if the option is the <code>None</code> value, false otherwise.
*/
def isEmpty: Boolean
/** True if the option is a <code>Some</code>(...) false otherwise.
*/
def isDefined: Boolean = !isEmpty
/** get the value of this option.
* @note The option must be nonEmpty.
* @throws Predef.NoSuchElementException if the option is empty.
*/
def get: A
/** If the option is nonempty return its value,
* otherwise return the result of evaluating a default expression.
*
* @param default the default expression.
*/
def getOrElse[B >: A](default: => B): B =
if (isEmpty) default else this.get
/** The option's value if it is nonempty, or <code>null</code> if it is empty.
* The use of null of course is discouraged, but code written to use Options
* often must interface with code which expects and returns nulls.
*/
def orNull[A1 >: A](implicit ev: Null <:< A1): A1 = this getOrElse null
/** If the option is nonempty, return a function applied to its value,
* wrapped in a Some i.e. <code>Some(f(this.get))</code>.
* Otherwise return <code>None</code>.
*
* @param f the function to apply
*/
def map[B](f: A => B): Option[B] =
if (isEmpty) None else Some(f(this.get))
/** If the option is nonempty, return a function applied to its value.
* Otherwise return None.
* @param f the function to apply
*/
def flatMap[B](f: A => Option[B]): Option[B] =
if (isEmpty) None else f(this.get)
/** If the option is nonempty and the given predicate <code>p</code>
* yields <code>false</code> on its value, return <code>None</code>.
* Otherwise return the option value itself.
*
* @param p the predicate used for testing.
*/
def filter(p: A => Boolean): Option[A] =
if (isEmpty || p(this.get)) this else None
/** Necessary to keep Option from being implicitly converted to
* Iterable in for comprehensions.
*/
def withFilter(p: A => Boolean): WithFilter = new WithFilter(p)
/** We need a whole WithFilter class to honor the "doesn't create a new
* collection" contract even though it seems unlikely to matter much in a
* collection with max size 1.
*/
class WithFilter(p: A => Boolean) {
def map[B](f: A => B): Option[B] = self filter p map f
def flatMap[B](f: A => Option[B]): Option[B] = self filter p flatMap f
def foreach[U](f: A => U): Unit = self filter p foreach f
def withFilter(q: A => Boolean): WithFilter = new WithFilter(x => p(x) && q(x))
}
/** If the option is nonempty, p(value), otherwise false.
*
* @param p the predicate to test
*/
def exists(p: A => Boolean): Boolean =
!isEmpty && p(this.get)
/** Apply the given procedure <code>f</code> to the option's value,
* if it is nonempty. Do nothing if it is empty.
*
* @param f the procedure to apply.
*/
def foreach[U](f: A => U) {
if (!isEmpty) f(this.get)
}
/** If the given partial function <code>pf</code> is defined for the
* option's value, apply it to the value. Otherwise, None.
*
* @param pf the partial function.
*/
def collect[B](pf: PartialFunction[A, B]): Option[B] =
if (!isEmpty && pf.isDefinedAt(this.get)) Some(pf(this.get)) else None
/** If the option is nonempty return it,
* otherwise return the result of evaluating an alternative expression.
* @param alternative the alternative expression.
*/
def orElse[B >: A](alternative: => Option[B]): Option[B] =
if (isEmpty) alternative else this
/** An singleton iterator returning the option's value if it is nonempty
* or the empty iterator if the option is empty.
*/
def iterator: Iterator[A] =
if (isEmpty) Iterator.empty else Iterator.single(this.get)
/** A singleton list containing the option's value if it is nonempty
* or the empty list if the option is empty.
*/
def toList: List[A] =
if (isEmpty) List() else List(this.get)
/** An <code>Either</code> that is a <code>Left</code> with the given argument
* <code>left</code> if this is empty, or a <code>Right</code> if this is nonempty with the
* option's value.
*/
def toRight[X](left: => X) =
if (isEmpty) Left(left) else Right(this.get)
/** An <code>Either</code> that is a <code>Right</code> with the given argument
* <code>right</code> if this is empty, or a <code>Left</code> if this is nonempty with the
* option's value.
*/
def toLeft[X](right: => X) =
if (isEmpty) Right(right) else Left(this.get)
}
/** Class <code>Some[A]</code> represents existing values of type
* <code>A</code>.
*
* @author Martin Odersky
* @version 1.0, 16/07/2003
*/
final case class Some[+A](x: A) extends Option[A] {
def isEmpty = false
def get = x
}
/** This case object represents non-existent values.
*
* @author Martin Odersky
* @version 1.0, 16/07/2003
*/
case object None extends Option[Nothing] {
def isEmpty = true
def get = throw new NoSuchElementException("None.get")
}
|
cran/rkafkajars
|
java/scala/Option.scala
|
Scala
|
apache-2.0
| 6,377
|
package eventstore
package akka
import scala.concurrent.{ ExecutionContext, Future }
import _root_.akka.actor.ActorRef
import _root_.akka.pattern.ask
import _root_.akka.util.Timeout
trait EsTransaction {
def transactionId: Long
def write(events: List[EventData]): Future[Unit]
def commit(): Future[Unit]
}
private[eventstore] object EsTransaction {
implicit def executionContext: ExecutionContext = ExecutionContext.Implicits.global
def start(actor: ActorRef)(implicit timeout: Timeout): Future[EsTransaction] = {
import TransactionActor.{ TransactionId, GetTransactionId }
val future = actor ? GetTransactionId
future.mapTo[TransactionId].map(x => continue(x.value, actor))
}
def continue(transactionId: Long, actor: ActorRef)(implicit timeout: Timeout): EsTransaction =
EsTransactionForActor(transactionId, actor)
}
private[eventstore] final case class EsTransactionForActor(
transactionId: Long, actor: ActorRef)(implicit timeout: Timeout
) extends EsTransaction {
import TransactionActor._
import EsTransaction.executionContext
def write(events: List[EventData]): Future[Unit] = {
val future = actor ? Write(events)
future.map(_ => ())
}
def commit(): Future[Unit] = {
val future = actor ? Commit
future.map(_ => ())
}
}
|
EventStore/EventStore.JVM
|
client/src/main/scala/eventstore/akka/EsTransaction.scala
|
Scala
|
bsd-3-clause
| 1,293
|
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.catnap
import cats.effect.IO
import monix.execution.Scheduler
import monix.execution.schedulers.SchedulerService
import scala.concurrent.duration._
abstract class ConcurrentQueueJVMSuite(parallelism: Int) extends BaseConcurrentQueueSuite[SchedulerService] {
def setup(): SchedulerService =
Scheduler.computation(
name = s"concurrent-queue-par-$parallelism",
parallelism = parallelism
)
def tearDown(env: SchedulerService): Unit = {
env.shutdown()
assert(env.awaitTermination(30.seconds), "env.awaitTermination")
}
def testIO(name: String, times: Int = 1)(f: Scheduler => IO[Unit]): Unit = {
def repeatTest(test: IO[Unit], n: Int): IO[Unit] =
if (n > 0) test.flatMap(_ => repeatTest(test, n - 1))
else IO.unit
testAsync(name) { implicit ec =>
repeatTest(f(ec).timeout(60.second), times).unsafeToFuture()
}
}
}
object ConcurrentQueueJVMParallelism8Suite extends ConcurrentQueueJVMSuite(8)
object ConcurrentQueueJVMParallelism4Suite extends ConcurrentQueueJVMSuite(4)
object ConcurrentQueueJVMParallelism1Suite extends ConcurrentQueueJVMSuite(1)
|
monixio/monix
|
monix-catnap/jvm/src/test/scala/monix/catnap/ConcurrentQueueJVMSuite.scala
|
Scala
|
apache-2.0
| 1,807
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.optim.{L2Regularizer, SGD}
import com.intel.analytics.bigdl.tensor.{Storage, Tensor}
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.T
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class SpatialDilatedConvolutionSpec extends TorchSpec {
"SpatialDilatedConvolution L2 regularizer" should "works correctly" in {
import com.intel.analytics.bigdl.numeric.NumericDouble
torchCheck()
val nInputPlane = 1
val nOutputPlane = 1
val kW = 2
val kH = 2
val dW = 1
val dH = 1
val padW = 0
val padH = 0
val inputData = Array(
1.0, 2, 3,
4, 5, 6,
7, 8, 9
)
val state1 = T("learningRate" -> 0.1, "learningRateDecay" -> 5e-7,
"weightDecay" -> 0.1, "momentum" -> 0.002)
val state2 = T("learningRate" -> 0.1, "learningRateDecay" -> 5e-7,
"weightDecay" -> 0.0, "momentum" -> 0.002)
val inputN = 5
val outputN = 2
val batchSize = 5
val criterion = new MSECriterion[Double]
val input = Tensor[Double](Storage(inputData), 1, Array(1, 3, 3))
val labels = Tensor[Double](4).rand()
val model1 = Sequential()
.add(new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH))
.add(Sigmoid())
val (weights1, grad1) = model1.getParameters()
val model2 = Sequential()
.add(new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH,
wRegularizer = L2Regularizer(0.1), bRegularizer = L2Regularizer(0.1)))
.add(Sigmoid())
val (weights2, grad2) = model2.getParameters()
weights2.copy(weights1.clone())
grad2.copy(grad1.clone())
val sgd = new SGD[Double]
def feval1(x: Tensor[Double]): (Double, Tensor[Double]) = {
val output = model1.forward(input).toTensor[Double]
val _loss = criterion.forward(output, labels)
model1.zeroGradParameters()
val gradInput = criterion.backward(output, labels)
model1.backward(input, gradInput)
(_loss, grad1)
}
def feval2(x: Tensor[Double]): (Double, Tensor[Double]) = {
val output = model2.forward(input).toTensor[Double]
val _loss = criterion.forward(output, labels)
model2.zeroGradParameters()
val gradInput = criterion.backward(output, labels)
model2.backward(input, gradInput)
(_loss, grad2)
}
var loss1: Array[Double] = null
for (i <- 1 to 100) {
loss1 = sgd.optimize(feval1, weights1, state1)._2
println(s"${i}-th loss = ${loss1(0)}")
}
var loss2: Array[Double] = null
for (i <- 1 to 100) {
loss2 = sgd.optimize(feval2, weights2, state2)._2
println(s"${i}-th loss = ${loss2(0)}")
}
weights1 should be(weights2)
loss1 should be(loss2)
}
"A SpatialDilatedConvolution" should "generate correct output" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val nInputPlane = 3
val nOutputPlane = 6
val kW = 3
val kH = 3
val dW = 1
val dH = 1
val padW = 2
val padH = 2
val layer = new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH)
Random.setSeed(seed)
val input = Tensor[Double](3, 3, 6, 6).apply1(e => Random.nextDouble())
val output = layer.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
"layer = nn.SpatialDilatedConvolution(3, 6, 3, 3, 1, 1, 2, 2)\n" +
"weight = layer.weight\n" +
"bias = layer.bias \n" +
"output = layer:forward(input) "
val (luaTime, torchResult) = TH.run(code, Map("input" -> input),
Array("weight", "bias", "output"))
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]]
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val weight = layer.weight
val bias = layer.bias
weight should be(luaWeight)
bias should be(luaBias)
output should be(luaOutput)
}
"A SpatialDilatedConvolution" should "generate correct output and grad" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val nInputPlane = 3
val nOutputPlane = 6
val kW = 3
val kH = 3
val dW = 1
val dH = 1
val padW = 2
val padH = 2
val layer = new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH)
val model = new Sequential[Double]()
model.add(layer)
Random.setSeed(3)
val input = Tensor[Double](3, 3, 6, 6).apply1(e => Random.nextDouble())
val output = model.updateOutput(input).toTensor[Double]
val gradOutput = Tensor[Double]().resizeAs(output).apply1(e => Random.nextDouble())
val gradInput = model.backward(input, gradOutput)
val code = "torch.manualSeed(" + seed + ")\n" +
"""layer = nn.SpatialDilatedConvolution(3, 6, 3, 3, 1, 1, 2, 2)
model = nn.Sequential()
model:add(layer)
weight = layer.weight
bias = layer.bias
model:zeroGradParameters()
output = model:forward(input)
gradInput = model:backward(input, gradOutput)
gradBias = layer.gradBias
gradWeight = layer.gradWeight
"""
val (luaTime, torchResult) = TH.run(code,
Map("input" -> input, "gradOutput" -> gradOutput),
Array("weight", "bias", "output", "gradInput", "gradBias", "gradWeight")
)
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]]
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val luaGradBias = torchResult("gradBias").asInstanceOf[Tensor[Double]]
val luaGradWeight = torchResult("gradWeight").asInstanceOf[Tensor[Double]]
val weight = layer.weight
val bias = layer.bias
weight should be(luaWeight)
bias should be(luaBias)
output should be(luaOutput)
gradInput should be(luaGradInput)
luaGradBias should be (layer.gradBias)
luaGradWeight should be (layer.gradWeight)
}
"A SpatialDilatedConvolution" should "generate correct output and grad with 3D input" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val nInputPlane = 3
val nOutputPlane = 6
val kW = 3
val kH = 3
val dW = 2
val dH = 2
val padW = 1
val padH = 1
val layer = new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH)
val model = new Sequential[Double]()
model.add(layer)
Random.setSeed(3)
val input = Tensor[Double](3, 6, 6).apply1(e => Random.nextDouble())
val output = model.updateOutput(input).toTensor[Double]
val gradOutput = Tensor[Double]().resizeAs(output).apply1(e => Random.nextDouble())
val gradInput = model.backward(input, gradOutput)
val code = "torch.manualSeed(" + seed + ")\n" +
"""layer = nn.SpatialDilatedConvolution(3, 6, 3, 3, 2, 2, 1, 1)
model = nn.Sequential()
model:add(layer)
weight = layer.weight
bias = layer.bias
model:zeroGradParameters()
output = model:forward(input)
gradInput = model:backward(input, gradOutput)
gradBias = layer.gradBias
gradWeight = layer.gradWeight
"""
val (luaTime, torchResult) = TH.run(code,
Map("input" -> input, "gradOutput" -> gradOutput),
Array("weight", "bias", "output", "gradInput", "gradBias", "gradWeight")
)
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]]
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val luaGradBias = torchResult("gradBias").asInstanceOf[Tensor[Double]]
val luaGradWeight = torchResult("gradWeight").asInstanceOf[Tensor[Double]]
val weight = layer.weight
val bias = layer.bias
weight should be(luaWeight)
bias should be(luaBias)
output should be(luaOutput)
gradInput should be(luaGradInput)
luaGradBias should be (layer.gradBias)
luaGradWeight should be (layer.gradWeight)
}
"A SpatialDilatedConvolution multiple forward backward" should
"generate correct output and grad" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val nInputPlane = 3
val nOutputPlane = 6
val kW = 3
val kH = 3
val dW = 1
val dH = 1
val padW = 2
val padH = 2
val diaW = 2
val diaH = 2
val layer = new SpatialDilatedConvolution[Double](nInputPlane, nOutputPlane,
kW, kH, dW, dH, padW, padH, diaW, diaH)
val model = new Sequential[Double]()
model.add(layer)
Random.setSeed(3)
val input = Tensor[Double](3, 3, 6, 6).apply1(e => Random.nextDouble())
val output = model.updateOutput(input).toTensor[Double]
val gradOutput = Tensor[Double]().resizeAs(output).apply1(e => Random.nextDouble())
model.zeroGradParameters()
val gradInput = model.backward(input, gradOutput)
val output2 = model.forward(input).toTensor[Double]
model.zeroGradParameters()
val gradInput2 = model.backward(input, gradOutput)
val code = "torch.manualSeed(" + seed + ")\n" +
"""layer = nn.SpatialDilatedConvolution(3, 6, 3, 3, 1, 1, 2, 2, 2, 2)
model = nn.Sequential()
model:add(layer)
weight = layer.weight
bias = layer.bias
model:zeroGradParameters()
output = model:forward(input)
gradInput = model:backward(input, gradOutput)
gradBias = layer.gradBias
gradWeight = layer.gradWeight
model:zeroGradParameters()
output2 = model:forward(input)
gradInput2 = model:backward(input, gradOutput)
gradBias2 = layer.gradBias
gradWeight2 = layer.gradWeight
"""
val (luaTime, torchResult) = TH.run(code,
Map("input" -> input, "gradOutput" -> gradOutput),
Array("weight", "bias", "output", "gradInput", "gradBias", "gradWeight",
"output2", "gradInput2", "gradBias2", "gradWeight2")
)
val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]]
val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]]
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val luaGradBias = torchResult("gradBias").asInstanceOf[Tensor[Double]]
val luaGradWeight = torchResult("gradWeight").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("output2").asInstanceOf[Tensor[Double]]
val luaGradInput2 = torchResult("gradInput2").asInstanceOf[Tensor[Double]]
val luaGradBias2 = torchResult("gradBias2").asInstanceOf[Tensor[Double]]
val luaGradWeight2 = torchResult("gradWeight2").asInstanceOf[Tensor[Double]]
val weight = layer.weight
val bias = layer.bias
weight should be(luaWeight)
bias should be(luaBias)
output2 should be(luaOutput2)
gradInput2 should be(luaGradInput2)
luaGradBias2 should be (layer.gradBias)
luaGradWeight2 should be (layer.gradWeight)
}
}
|
jenniew/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/SpatialDilatedConvolutionSpec.scala
|
Scala
|
apache-2.0
| 11,918
|
/*
* GE.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape
import de.sciss.fscape.graph.impl.GESeq
import de.sciss.fscape.graph.{ConstantD, ConstantI, ConstantL}
import scala.language.implicitConversions
object GE {
trait Lazy extends Lazy.Expander[UGenInLike] with GE
implicit def fromInt (x: Int ): ConstantI = new ConstantI(x)
implicit def fromDouble(x: Double): ConstantD = new ConstantD(x)
implicit def fromLong (x: Long ): ConstantL = new ConstantL(x)
implicit def fromSeq(xs: scala.Seq[GE]): GE = xs match {
case scala.Seq(x) => x
case _ => GESeq(xs.toIndexedSeq)
}
}
/** The main trait used in an FScape graph, a graph element, abbreviated as `GE`.
*
* A lot of operations on `GE` are defined separately in `GEOps1` and `GEOps2`
*
* @see [[GEOps1]]
* @see [[GEOps2]]
*/
trait GE extends Product {
private[fscape] def expand(implicit b: UGenGraph.Builder): UGenInLike
}
|
Sciss/FScape-next
|
core/shared/src/main/scala/de/sciss/fscape/GE.scala
|
Scala
|
agpl-3.0
| 1,176
|
package cas.analysis.subject.components
case class ID(value: String) extends Component
|
kell18/CAS
|
src/main/scala/cas/analysis/subject/components/ID.scala
|
Scala
|
gpl-2.0
| 88
|
package org.qbproject.schema
import org.specs2.mutable.Specification
import play.api.libs.json._
import play.api.libs.json.Json.toJsFieldJsValueWrapper
import scala.math.BigDecimal.long2bigDecimal
object DSLSpec extends Specification {
import QBSchema._
"DSL" should {
val schema = qbClass("time" -> qbPosixTime)
"have a posix time type" in {
val currentTime = System.currentTimeMillis() / 1000L
val instance = Json.obj("time" -> currentTime)
QBValidator.validate(schema)(instance).get \ "time" must beEqualTo(JsDefined(JsNumber(currentTime)))
}
"not validate posix time instances with a double value set" in {
val instance = Json.obj("time" -> 11.11)
QBValidator.validate(schema)(instance) must beAnInstanceOf[JsError]
}
"not validate posix time instances with a negative value set" in {
val instance = Json.obj("time" -> -1000)
QBValidator.validate(schema)(instance) must beAnInstanceOf[JsError]
}
}
}
|
edgarmueller/qbproject
|
qbschema/src/test/scala/org/qbproject/schema/DSLSpec.scala
|
Scala
|
apache-2.0
| 987
|
/*
* Copyright (c) 2015,
* Ilya Sergey, Christopher Earl, Matthew Might and David Van Horn
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the project "Reachability" nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.ucombinator.lambdajs.cfa.pdcfa
import org.ucombinator.gc.GCInterface
/**
* @author ilya
*/
trait LambdaJSGarbageCollector extends GCInterface {
def rootAddr(c: ControlState, frames: Kont) = {
throw new Exception("Asbtract GC for LambdaJS CFA2 is not yet implemented")
}
def gc(c: ControlState, frames: Kont) = {
throw new Exception("Asbtract GC for LambdaJS CFA2 is not yet implemented")
}
def shouldGC = false
def printGCDebug = false
}
|
ilyasergey/reachability
|
src/org/ucombinator/lambdajs/cfa/pdcfa/LambdaJSGarbageCollector.scala
|
Scala
|
bsd-3-clause
| 2,110
|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.api.broker
import akka.Done
import akka.stream.scaladsl.{ Flow, Source }
import scala.concurrent.Future
/**
* A Subscriber for consuming messages from a message broker.
*/
trait Subscriber[Payload] {
/**
* Returns a copy of this subscriber with the passed group id.
*
* @param groupId The group id to assign to this subscriber.
* @return A copy of this subscriber with the passed group id.
*/
def withGroupId(groupId: String): Subscriber[Payload]
/**
* Returns this subscriber, but message payloads are wrapped
* in [[Message]] instances to allow accessing any metadata
* associated with the message.
*
* @return A copy of this subscriber.
*/
def withMetadata: Subscriber[Message[Payload]]
/**
* Returns a stream of messages with at most once delivery semantic.
*
* If a failure occurs (e.g., an exception is thrown), the user is responsible
* of deciding how to recover from it (e.g., restarting the stream, aborting, ...).
*/
def atMostOnceSource: Source[Payload, _]
/**
* Applies the passed `flow` to the messages processed by this subscriber. Messages are delivered to the passed
* `flow` at least once.
*
* If a failure occurs (e.g., an exception is thrown), the stream may be automatically restarted starting with the
* message that caused the failure.
*
* Whether the stream is automatically restarted depends on the Lagom message broker implementation in use. If the
* Kafka Lagom message broker module is being used, then by default the stream is automatically restarted when a
* failure occurs.
*
* The `flow` may pull more elements from upstream but it must emit exactly one `Done` message for each message that
* it receives. It must also emit them in the same order that the messages were received. This means that the `flow`
* must not filter or collect a subset of the messages, instead it must split the messages into separate streams and
* map those that would have been dropped to `Done`.
*
* @param flow The flow to apply to each received message.
* @return A `Future` that will be completed if the `flow` completes.
*/
def atLeastOnce(flow: Flow[Payload, Done, _]): Future[Done]
}
object Subscriber {
/**
* Subscribers with the same group id belong to the same subscriber group.
* Conceptually you can think of a subscriber group as being a single logical
* subscriber that happens to be made up of multiple processes.
*
* Subscriber groups allow a pool of processes to divide the work of consuming
* and processing records. These processes can either be running on the same
* machine or, as is more likely, they can be distributed over many machines
* to provide scalability and fault tolerance for processing.
*/
trait GroupId {
def groupId: String
}
}
|
rstento/lagom
|
service/scaladsl/api/src/main/scala/com/lightbend/lagom/scaladsl/api/broker/Subscriber.scala
|
Scala
|
apache-2.0
| 2,957
|
package ul
trait GetArgs extends Props {
//parse array of arguments and place result to props
def argsParse(args: Array[String], sep:String = "=") {
for (a <- args) {
val as = a.split(sep)
if (as.length <= 2) {
argFind(as(0)) match {
case Some(attr) =>
try {
props.updateString(attr.tag, if (as.length == 2) as(1) else "true")
} catch { case _:Throwable => }
case None =>
}
}
}
}
//find property with name
def argFind(name:String, sep:String = ";"):Option[PropAttr] =
props.attrs.find( _.name.split(sep).contains(name))
//generate help text
def argsHelp:String = {
val s = new StringBuilder
for (p <- props.attrs) {
s.append(p.name).append(" [").append(props.typeStr(p.tag)).append(" = ").append(props.get(p.tag)).append("] - ").append(p.descr).append("\n")
}
s.toString
}
}
|
edartuz/meerkat
|
repo/src/ul/GetArgs.scala
|
Scala
|
mit
| 1,077
|
package io.udash.auth
trait UserCtx {
def has(permission: Permission): Boolean
def isAuthenticated: Boolean
}
object UserCtx {
trait Unauthenticated extends UserCtx {
override def has(permission: Permission): Boolean = false
override def isAuthenticated: Boolean = false
}
}
|
UdashFramework/udash-core
|
auth/src/main/scala/io/udash/auth/UserCtx.scala
|
Scala
|
apache-2.0
| 292
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import scala.collection.mutable
import breeze.linalg.{DenseVector => BDV}
import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS, OWLQN => BreezeOWLQN}
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.linalg.BLAS._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.linalg.VectorImplicits._
import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions.{col, lit}
import org.apache.spark.sql.types.DoubleType
import org.apache.spark.storage.StorageLevel
/**
* Params for logistic regression.
*/
private[classification] trait LogisticRegressionParams extends ProbabilisticClassifierParams
with HasRegParam with HasElasticNetParam with HasMaxIter with HasFitIntercept with HasTol
with HasStandardization with HasWeightCol with HasThreshold {
/**
* Set threshold in binary classification, in range [0, 1].
*
* If the estimated probability of class label 1 is > threshold, then predict 1, else 0.
* A high threshold encourages the model to predict 0 more often;
* a low threshold encourages the model to predict 1 more often.
*
* Note: Calling this with threshold p is equivalent to calling `setThresholds(Array(1-p, p))`.
* When [[setThreshold()]] is called, any user-set value for [[thresholds]] will be cleared.
* If both [[threshold]] and [[thresholds]] are set in a ParamMap, then they must be
* equivalent.
*
* Default is 0.5.
* @group setParam
*/
def setThreshold(value: Double): this.type = {
if (isSet(thresholds)) clear(thresholds)
set(threshold, value)
}
/**
* Get threshold for binary classification.
*
* If [[thresholds]] is set with length 2 (i.e., binary classification),
* this returns the equivalent threshold: {{{1 / (1 + thresholds(0) / thresholds(1))}}}.
* Otherwise, returns [[threshold]] if set, or its default value if unset.
*
* @group getParam
* @throws IllegalArgumentException if [[thresholds]] is set to an array of length other than 2.
*/
override def getThreshold: Double = {
checkThresholdConsistency()
if (isSet(thresholds)) {
val ts = $(thresholds)
require(ts.length == 2, "Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2. thresholds: " + ts.mkString(","))
1.0 / (1.0 + ts(0) / ts(1))
} else {
$(threshold)
}
}
/**
* Set thresholds in multiclass (or binary) classification to adjust the probability of
* predicting each class. Array must have length equal to the number of classes, with values >= 0.
* The class with largest value p/t is predicted, where p is the original probability of that
* class and t is the class' threshold.
*
* Note: When [[setThresholds()]] is called, any user-set value for [[threshold]] will be cleared.
* If both [[threshold]] and [[thresholds]] are set in a ParamMap, then they must be
* equivalent.
*
* @group setParam
*/
def setThresholds(value: Array[Double]): this.type = {
if (isSet(threshold)) clear(threshold)
set(thresholds, value)
}
/**
* Get thresholds for binary or multiclass classification.
*
* If [[thresholds]] is set, return its value.
* Otherwise, if [[threshold]] is set, return the equivalent thresholds for binary
* classification: (1-threshold, threshold).
* If neither are set, throw an exception.
*
* @group getParam
*/
override def getThresholds: Array[Double] = {
checkThresholdConsistency()
if (!isSet(thresholds) && isSet(threshold)) {
val t = $(threshold)
Array(1-t, t)
} else {
$(thresholds)
}
}
/**
* If [[threshold]] and [[thresholds]] are both set, ensures they are consistent.
* @throws IllegalArgumentException if [[threshold]] and [[thresholds]] are not equivalent
*/
protected def checkThresholdConsistency(): Unit = {
if (isSet(threshold) && isSet(thresholds)) {
val ts = $(thresholds)
require(ts.length == 2, "Logistic Regression found inconsistent values for threshold and" +
s" thresholds. Param threshold is set (${$(threshold)}), indicating binary" +
s" classification, but Param thresholds is set with length ${ts.length}." +
" Clear one Param value to fix this problem.")
val t = 1.0 / (1.0 + ts(0) / ts(1))
require(math.abs($(threshold) - t) < 1E-5, "Logistic Regression getThreshold found" +
s" inconsistent values for threshold (${$(threshold)}) and thresholds (equivalent to $t)")
}
}
override def validateParams(): Unit = {
checkThresholdConsistency()
}
}
/**
* Logistic regression.
* Currently, this class only supports binary classification. It will support multiclass
* in the future.
*/
@Since("1.2.0")
class LogisticRegression @Since("1.2.0") (
@Since("1.4.0") override val uid: String)
extends ProbabilisticClassifier[Vector, LogisticRegression, LogisticRegressionModel]
with LogisticRegressionParams with DefaultParamsWritable with Logging {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("logreg"))
/**
* Set the regularization parameter.
* Default is 0.0.
* @group setParam
*/
@Since("1.2.0")
def setRegParam(value: Double): this.type = set(regParam, value)
setDefault(regParam -> 0.0)
/**
* Set the ElasticNet mixing parameter.
* For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
* For 0 < alpha < 1, the penalty is a combination of L1 and L2.
* Default is 0.0 which is an L2 penalty.
* @group setParam
*/
@Since("1.4.0")
def setElasticNetParam(value: Double): this.type = set(elasticNetParam, value)
setDefault(elasticNetParam -> 0.0)
/**
* Set the maximum number of iterations.
* Default is 100.
* @group setParam
*/
@Since("1.2.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
setDefault(maxIter -> 100)
/**
* Set the convergence tolerance of iterations.
* Smaller value will lead to higher accuracy with the cost of more iterations.
* Default is 1E-6.
* @group setParam
*/
@Since("1.4.0")
def setTol(value: Double): this.type = set(tol, value)
setDefault(tol -> 1E-6)
/**
* Whether to fit an intercept term.
* Default is true.
* @group setParam
*/
@Since("1.4.0")
def setFitIntercept(value: Boolean): this.type = set(fitIntercept, value)
setDefault(fitIntercept -> true)
/**
* Whether to standardize the training features before fitting the model.
* The coefficients of models will be always returned on the original scale,
* so it will be transparent for users. Note that with/without standardization,
* the models should be always converged to the same solution when no regularization
* is applied. In R's GLMNET package, the default behavior is true as well.
* Default is true.
* @group setParam
*/
@Since("1.5.0")
def setStandardization(value: Boolean): this.type = set(standardization, value)
setDefault(standardization -> true)
@Since("1.5.0")
override def setThreshold(value: Double): this.type = super.setThreshold(value)
@Since("1.5.0")
override def getThreshold: Double = super.getThreshold
/**
* Whether to over-/under-sample training instances according to the given weights in weightCol.
* If not set or empty String, all instances are treated equally (weight 1.0).
* Default is not set, so all instances have weight one.
* @group setParam
*/
@Since("1.6.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
@Since("1.5.0")
override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value)
@Since("1.5.0")
override def getThresholds: Array[Double] = super.getThresholds
private var optInitialModel: Option[LogisticRegressionModel] = None
/** @group setParam */
private[spark] def setInitialModel(model: LogisticRegressionModel): this.type = {
this.optInitialModel = Some(model)
this
}
override protected[spark] def train(dataset: Dataset[_]): LogisticRegressionModel = {
val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE
train(dataset, handlePersistence)
}
protected[spark] def train(dataset: Dataset[_], handlePersistence: Boolean):
LogisticRegressionModel = {
val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol))
val instances: RDD[Instance] =
dataset.select(col($(labelCol)).cast(DoubleType), w, col($(featuresCol))).rdd.map {
case Row(label: Double, weight: Double, features: Vector) =>
Instance(label, weight, features)
}
if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK)
val instr = Instrumentation.create(this, instances)
instr.logParams(regParam, elasticNetParam, standardization, threshold,
maxIter, tol, fitIntercept)
val (summarizer, labelSummarizer) = {
val seqOp = (c: (MultivariateOnlineSummarizer, MultiClassSummarizer),
instance: Instance) =>
(c._1.add(instance.features, instance.weight), c._2.add(instance.label, instance.weight))
val combOp = (c1: (MultivariateOnlineSummarizer, MultiClassSummarizer),
c2: (MultivariateOnlineSummarizer, MultiClassSummarizer)) =>
(c1._1.merge(c2._1), c1._2.merge(c2._2))
instances.treeAggregate(
new MultivariateOnlineSummarizer, new MultiClassSummarizer)(seqOp, combOp)
}
val histogram = labelSummarizer.histogram
val numInvalid = labelSummarizer.countInvalid
val numClasses = histogram.length
val numFeatures = summarizer.mean.size
if (isDefined(thresholds)) {
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".train() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
instr.logNumClasses(numClasses)
instr.logNumFeatures(numFeatures)
val (coefficients, intercept, objectiveHistory) = {
if (numInvalid != 0) {
val msg = s"Classification labels should be in {0 to ${numClasses - 1} " +
s"Found $numInvalid invalid labels."
logError(msg)
throw new SparkException(msg)
}
if (numClasses > 2) {
val msg = s"Currently, LogisticRegression with ElasticNet in ML package only supports " +
s"binary classification. Found $numClasses in the input dataset."
logError(msg)
throw new SparkException(msg)
} else if ($(fitIntercept) && numClasses == 2 && histogram(0) == 0.0) {
logWarning(s"All labels are one and fitIntercept=true, so the coefficients will be " +
s"zeros and the intercept will be positive infinity; as a result, " +
s"training is not needed.")
(Vectors.sparse(numFeatures, Seq()), Double.PositiveInfinity, Array.empty[Double])
} else if ($(fitIntercept) && numClasses == 1) {
logWarning(s"All labels are zero and fitIntercept=true, so the coefficients will be " +
s"zeros and the intercept will be negative infinity; as a result, " +
s"training is not needed.")
(Vectors.sparse(numFeatures, Seq()), Double.NegativeInfinity, Array.empty[Double])
} else {
if (!$(fitIntercept) && numClasses == 2 && histogram(0) == 0.0) {
logWarning(s"All labels are one and fitIntercept=false. It's a dangerous ground, " +
s"so the algorithm may not converge.")
} else if (!$(fitIntercept) && numClasses == 1) {
logWarning(s"All labels are zero and fitIntercept=false. It's a dangerous ground, " +
s"so the algorithm may not converge.")
}
val featuresMean = summarizer.mean.toArray
val featuresStd = summarizer.variance.toArray.map(math.sqrt)
if (!$(fitIntercept) && (0 until numFeatures).exists { i =>
featuresStd(i) == 0.0 && featuresMean(i) != 0.0 }) {
logWarning("Fitting LogisticRegressionModel without intercept on dataset with " +
"constant nonzero column, Spark MLlib outputs zero coefficients for constant " +
"nonzero columns. This behavior is the same as R glmnet but different from LIBSVM.")
}
val regParamL1 = $(elasticNetParam) * $(regParam)
val regParamL2 = (1.0 - $(elasticNetParam)) * $(regParam)
val costFun = new LogisticCostFun(instances, numClasses, $(fitIntercept),
$(standardization), featuresStd, featuresMean, regParamL2)
val optimizer = if ($(elasticNetParam) == 0.0 || $(regParam) == 0.0) {
new BreezeLBFGS[BDV[Double]]($(maxIter), 10, $(tol))
} else {
val standardizationParam = $(standardization)
def regParamL1Fun = (index: Int) => {
// Remove the L1 penalization on the intercept
if (index == numFeatures) {
0.0
} else {
if (standardizationParam) {
regParamL1
} else {
// If `standardization` is false, we still standardize the data
// to improve the rate of convergence; as a result, we have to
// perform this reverse standardization by penalizing each component
// differently to get effectively the same objective function when
// the training dataset is not standardized.
if (featuresStd(index) != 0.0) regParamL1 / featuresStd(index) else 0.0
}
}
}
new BreezeOWLQN[Int, BDV[Double]]($(maxIter), 10, regParamL1Fun, $(tol))
}
val initialCoefficientsWithIntercept =
Vectors.zeros(if ($(fitIntercept)) numFeatures + 1 else numFeatures)
if (optInitialModel.isDefined && optInitialModel.get.coefficients.size != numFeatures) {
val vecSize = optInitialModel.get.coefficients.size
logWarning(
s"Initial coefficients will be ignored!! As its size $vecSize did not match the " +
s"expected size $numFeatures")
}
if (optInitialModel.isDefined && optInitialModel.get.coefficients.size == numFeatures) {
val initialCoefficientsWithInterceptArray = initialCoefficientsWithIntercept.toArray
optInitialModel.get.coefficients.foreachActive { case (index, value) =>
initialCoefficientsWithInterceptArray(index) = value
}
if ($(fitIntercept)) {
initialCoefficientsWithInterceptArray(numFeatures) == optInitialModel.get.intercept
}
} else if ($(fitIntercept)) {
/*
For binary logistic regression, when we initialize the coefficients as zeros,
it will converge faster if we initialize the intercept such that
it follows the distribution of the labels.
{{{
P(0) = 1 / (1 + \\exp(b)), and
P(1) = \\exp(b) / (1 + \\exp(b))
}}}, hence
{{{
b = \\log{P(1) / P(0)} = \\log{count_1 / count_0}
}}}
*/
initialCoefficientsWithIntercept.toArray(numFeatures) = math.log(
histogram(1) / histogram(0))
}
val states = optimizer.iterations(new CachedDiffFunction(costFun),
initialCoefficientsWithIntercept.asBreeze.toDenseVector)
/*
Note that in Logistic Regression, the objective history (loss + regularization)
is log-likelihood which is invariance under feature standardization. As a result,
the objective history from optimizer is the same as the one in the original space.
*/
val arrayBuilder = mutable.ArrayBuilder.make[Double]
var state: optimizer.State = null
while (states.hasNext) {
state = states.next()
arrayBuilder += state.adjustedValue
}
if (state == null) {
val msg = s"${optimizer.getClass.getName} failed."
logError(msg)
throw new SparkException(msg)
}
if (!state.actuallyConverged) {
logWarning("LogisticRegression training finished but the result " +
s"is not converged because: ${state.convergedReason.get.reason}")
}
/*
The coefficients are trained in the scaled space; we're converting them back to
the original space.
Note that the intercept in scaled space and original space is the same;
as a result, no scaling is needed.
*/
val rawCoefficients = state.x.toArray.clone()
var i = 0
while (i < numFeatures) {
rawCoefficients(i) *= { if (featuresStd(i) != 0.0) 1.0 / featuresStd(i) else 0.0 }
i += 1
}
if ($(fitIntercept)) {
(Vectors.dense(rawCoefficients.dropRight(1)).compressed, rawCoefficients.last,
arrayBuilder.result())
} else {
(Vectors.dense(rawCoefficients).compressed, 0.0, arrayBuilder.result())
}
}
}
if (handlePersistence) instances.unpersist()
val model = copyValues(new LogisticRegressionModel(uid, coefficients, intercept))
val (summaryModel, probabilityColName) = model.findSummaryModelAndProbabilityCol()
val logRegSummary = new BinaryLogisticRegressionTrainingSummary(
summaryModel.transform(dataset),
probabilityColName,
$(labelCol),
$(featuresCol),
objectiveHistory)
val m = model.setSummary(logRegSummary)
instr.logSuccess(m)
m
}
@Since("1.4.0")
override def copy(extra: ParamMap): LogisticRegression = defaultCopy(extra)
}
@Since("1.6.0")
object LogisticRegression extends DefaultParamsReadable[LogisticRegression] {
@Since("1.6.0")
override def load(path: String): LogisticRegression = super.load(path)
}
/**
* Model produced by [[LogisticRegression]].
*/
@Since("1.4.0")
class LogisticRegressionModel private[spark] (
@Since("1.4.0") override val uid: String,
@Since("2.0.0") val coefficients: Vector,
@Since("1.3.0") val intercept: Double)
extends ProbabilisticClassificationModel[Vector, LogisticRegressionModel]
with LogisticRegressionParams with MLWritable {
@Since("1.5.0")
override def setThreshold(value: Double): this.type = super.setThreshold(value)
@Since("1.5.0")
override def getThreshold: Double = super.getThreshold
@Since("1.5.0")
override def setThresholds(value: Array[Double]): this.type = super.setThresholds(value)
@Since("1.5.0")
override def getThresholds: Array[Double] = super.getThresholds
/** Margin (rawPrediction) for class label 1. For binary classification only. */
private val margin: Vector => Double = (features) => {
BLAS.dot(features, coefficients) + intercept
}
/** Score (probability) for class label 1. For binary classification only. */
private val score: Vector => Double = (features) => {
val m = margin(features)
1.0 / (1.0 + math.exp(-m))
}
@Since("1.6.0")
override val numFeatures: Int = coefficients.size
@Since("1.3.0")
override val numClasses: Int = 2
private var trainingSummary: Option[LogisticRegressionTrainingSummary] = None
/**
* Gets summary of model on training set. An exception is
* thrown if `trainingSummary == None`.
*/
@Since("1.5.0")
def summary: LogisticRegressionTrainingSummary = trainingSummary.getOrElse {
throw new SparkException("No training summary available for this LogisticRegressionModel")
}
/**
* If the probability column is set returns the current model and probability column,
* otherwise generates a new column and sets it as the probability column on a new copy
* of the current model.
*/
private[classification] def findSummaryModelAndProbabilityCol():
(LogisticRegressionModel, String) = {
$(probabilityCol) match {
case "" =>
val probabilityColName = "probability_" + java.util.UUID.randomUUID.toString
(copy(ParamMap.empty).setProbabilityCol(probabilityColName), probabilityColName)
case p => (this, p)
}
}
private[classification] def setSummary(
summary: LogisticRegressionTrainingSummary): this.type = {
this.trainingSummary = Some(summary)
this
}
/** Indicates whether a training summary exists for this model instance. */
@Since("1.5.0")
def hasSummary: Boolean = trainingSummary.isDefined
/**
* Evaluates the model on a test dataset.
* @param dataset Test dataset to evaluate model on.
*/
@Since("2.0.0")
def evaluate(dataset: Dataset[_]): LogisticRegressionSummary = {
// Handle possible missing or invalid prediction columns
val (summaryModel, probabilityColName) = findSummaryModelAndProbabilityCol()
new BinaryLogisticRegressionSummary(summaryModel.transform(dataset),
probabilityColName, $(labelCol), $(featuresCol))
}
/**
* Predict label for the given feature vector.
* The behavior of this can be adjusted using [[thresholds]].
*/
override protected def predict(features: Vector): Double = {
// Note: We should use getThreshold instead of $(threshold) since getThreshold is overridden.
if (score(features) > getThreshold) 1 else 0
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
var i = 0
val size = dv.size
while (i < size) {
dv.values(i) = 1.0 / (1.0 + math.exp(-dv.values(i)))
i += 1
}
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in LogisticRegressionModel:" +
" raw2probabilitiesInPlace encountered SparseVector")
}
}
override protected def predictRaw(features: Vector): Vector = {
val m = margin(features)
Vectors.dense(-m, m)
}
@Since("1.4.0")
override def copy(extra: ParamMap): LogisticRegressionModel = {
val newModel = copyValues(new LogisticRegressionModel(uid, coefficients, intercept), extra)
if (trainingSummary.isDefined) newModel.setSummary(trainingSummary.get)
newModel.setParent(parent)
}
override protected def raw2prediction(rawPrediction: Vector): Double = {
// Note: We should use getThreshold instead of $(threshold) since getThreshold is overridden.
val t = getThreshold
val rawThreshold = if (t == 0.0) {
Double.NegativeInfinity
} else if (t == 1.0) {
Double.PositiveInfinity
} else {
math.log(t / (1.0 - t))
}
if (rawPrediction(1) > rawThreshold) 1 else 0
}
override protected def probability2prediction(probability: Vector): Double = {
// Note: We should use getThreshold instead of $(threshold) since getThreshold is overridden.
if (probability(1) > getThreshold) 1 else 0
}
/**
* Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance.
*
* For [[LogisticRegressionModel]], this does NOT currently save the training [[summary]].
* An option to save [[summary]] may be added in the future.
*
* This also does not save the [[parent]] currently.
*/
@Since("1.6.0")
override def write: MLWriter = new LogisticRegressionModel.LogisticRegressionModelWriter(this)
}
@Since("1.6.0")
object LogisticRegressionModel extends MLReadable[LogisticRegressionModel] {
@Since("1.6.0")
override def read: MLReader[LogisticRegressionModel] = new LogisticRegressionModelReader
@Since("1.6.0")
override def load(path: String): LogisticRegressionModel = super.load(path)
/** [[MLWriter]] instance for [[LogisticRegressionModel]] */
private[LogisticRegressionModel]
class LogisticRegressionModelWriter(instance: LogisticRegressionModel)
extends MLWriter with Logging {
private case class Data(
numClasses: Int,
numFeatures: Int,
intercept: Double,
coefficients: Vector)
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: numClasses, numFeatures, intercept, coefficients
val data = Data(instance.numClasses, instance.numFeatures, instance.intercept,
instance.coefficients)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class LogisticRegressionModelReader
extends MLReader[LogisticRegressionModel] {
/** Checked against metadata when loading model */
private val className = classOf[LogisticRegressionModel].getName
override def load(path: String): LogisticRegressionModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.format("parquet").load(dataPath)
// We will need numClasses, numFeatures in the future for multinomial logreg support.
val Row(numClasses: Int, numFeatures: Int, intercept: Double, coefficients: Vector) =
MLUtils.convertVectorColumnsToML(data, "coefficients")
.select("numClasses", "numFeatures", "intercept", "coefficients")
.head()
val model = new LogisticRegressionModel(metadata.uid, coefficients, intercept)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
/**
* MultiClassSummarizer computes the number of distinct labels and corresponding counts,
* and validates the data to see if the labels used for k class multi-label classification
* are in the range of {0, 1, ..., k - 1} in an online fashion.
*
* Two MultilabelSummarizer can be merged together to have a statistical summary of the
* corresponding joint dataset.
*/
private[classification] class MultiClassSummarizer extends Serializable {
// The first element of value in distinctMap is the actually number of instances,
// and the second element of value is sum of the weights.
private val distinctMap = new mutable.HashMap[Int, (Long, Double)]
private var totalInvalidCnt: Long = 0L
/**
* Add a new label into this MultilabelSummarizer, and update the distinct map.
* @param label The label for this data point.
* @param weight The weight of this instances.
* @return This MultilabelSummarizer
*/
def add(label: Double, weight: Double = 1.0): this.type = {
require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0")
if (weight == 0.0) return this
if (label - label.toInt != 0.0 || label < 0) {
totalInvalidCnt += 1
this
}
else {
val (counts: Long, weightSum: Double) = distinctMap.getOrElse(label.toInt, (0L, 0.0))
distinctMap.put(label.toInt, (counts + 1L, weightSum + weight))
this
}
}
/**
* Merge another MultilabelSummarizer, and update the distinct map.
* (Note that it will merge the smaller distinct map into the larger one using in-place
* merging, so either `this` or `other` object will be modified and returned.)
*
* @param other The other MultilabelSummarizer to be merged.
* @return Merged MultilabelSummarizer object.
*/
def merge(other: MultiClassSummarizer): MultiClassSummarizer = {
val (largeMap, smallMap) = if (this.distinctMap.size > other.distinctMap.size) {
(this, other)
} else {
(other, this)
}
smallMap.distinctMap.foreach {
case (key, value) =>
val (counts: Long, weightSum: Double) = largeMap.distinctMap.getOrElse(key, (0L, 0.0))
largeMap.distinctMap.put(key, (counts + value._1, weightSum + value._2))
}
largeMap.totalInvalidCnt += smallMap.totalInvalidCnt
largeMap
}
/** @return The total invalid input counts. */
def countInvalid: Long = totalInvalidCnt
/** @return The number of distinct labels in the input dataset. */
def numClasses: Int = if (distinctMap.isEmpty) 0 else distinctMap.keySet.max + 1
/** @return The weightSum of each label in the input dataset. */
def histogram: Array[Double] = {
val result = Array.ofDim[Double](numClasses)
var i = 0
val len = result.length
while (i < len) {
result(i) = distinctMap.getOrElse(i, (0L, 0.0))._2
i += 1
}
result
}
}
/**
* Abstraction for multinomial Logistic Regression Training results.
* Currently, the training summary ignores the training weights except
* for the objective trace.
*/
sealed trait LogisticRegressionTrainingSummary extends LogisticRegressionSummary {
/** objective function (scaled loss + regularization) at each iteration. */
def objectiveHistory: Array[Double]
/** Number of training iterations until termination */
def totalIterations: Int = objectiveHistory.length
}
/**
* Abstraction for Logistic Regression Results for a given model.
*/
sealed trait LogisticRegressionSummary extends Serializable {
/** Dataframe output by the model's `transform` method. */
def predictions: DataFrame
/** Field in "predictions" which gives the probability of each class as a vector. */
def probabilityCol: String
/** Field in "predictions" which gives the true label of each instance (if available). */
def labelCol: String
/** Field in "predictions" which gives the features of each instance as a vector. */
def featuresCol: String
}
/**
* :: Experimental ::
* Logistic regression training results.
*
* @param predictions dataframe output by the model's `transform` method.
* @param probabilityCol field in "predictions" which gives the probability of
* each class as a vector.
* @param labelCol field in "predictions" which gives the true label of each instance.
* @param featuresCol field in "predictions" which gives the features of each instance as a vector.
* @param objectiveHistory objective function (scaled loss + regularization) at each iteration.
*/
@Experimental
@Since("1.5.0")
class BinaryLogisticRegressionTrainingSummary private[classification] (
predictions: DataFrame,
probabilityCol: String,
labelCol: String,
featuresCol: String,
@Since("1.5.0") val objectiveHistory: Array[Double])
extends BinaryLogisticRegressionSummary(predictions, probabilityCol, labelCol, featuresCol)
with LogisticRegressionTrainingSummary {
}
/**
* :: Experimental ::
* Binary Logistic regression results for a given model.
*
* @param predictions dataframe output by the model's `transform` method.
* @param probabilityCol field in "predictions" which gives the probability of
* each class as a vector.
* @param labelCol field in "predictions" which gives the true label of each instance.
* @param featuresCol field in "predictions" which gives the features of each instance as a vector.
*/
@Experimental
@Since("1.5.0")
class BinaryLogisticRegressionSummary private[classification] (
@Since("1.5.0") @transient override val predictions: DataFrame,
@Since("1.5.0") override val probabilityCol: String,
@Since("1.5.0") override val labelCol: String,
@Since("1.6.0") override val featuresCol: String) extends LogisticRegressionSummary {
private val sparkSession = predictions.sparkSession
import sparkSession.implicits._
/**
* Returns a BinaryClassificationMetrics object.
*/
// TODO: Allow the user to vary the number of bins using a setBins method in
// BinaryClassificationMetrics. For now the default is set to 100.
@transient private val binaryMetrics = new BinaryClassificationMetrics(
predictions.select(col(probabilityCol), col(labelCol).cast(DoubleType)).rdd.map {
case Row(score: Vector, label: Double) => (score(1), label)
}, 100
)
/**
* Returns the receiver operating characteristic (ROC) curve,
* which is a Dataframe having two fields (FPR, TPR)
* with (0.0, 0.0) prepended and (1.0, 1.0) appended to it.
* See http://en.wikipedia.org/wiki/Receiver_operating_characteristic
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
@transient lazy val roc: DataFrame = binaryMetrics.roc().toDF("FPR", "TPR")
/**
* Computes the area under the receiver operating characteristic (ROC) curve.
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
lazy val areaUnderROC: Double = binaryMetrics.areaUnderROC()
/**
* Returns the precision-recall curve, which is a Dataframe containing
* two fields recall, precision with (0.0, 1.0) prepended to it.
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
@transient lazy val pr: DataFrame = binaryMetrics.pr().toDF("recall", "precision")
/**
* Returns a dataframe with two fields (threshold, F-Measure) curve with beta = 1.0.
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
@transient lazy val fMeasureByThreshold: DataFrame = {
binaryMetrics.fMeasureByThreshold().toDF("threshold", "F-Measure")
}
/**
* Returns a dataframe with two fields (threshold, precision) curve.
* Every possible probability obtained in transforming the dataset are used
* as thresholds used in calculating the precision.
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
@transient lazy val precisionByThreshold: DataFrame = {
binaryMetrics.precisionByThreshold().toDF("threshold", "precision")
}
/**
* Returns a dataframe with two fields (threshold, recall) curve.
* Every possible probability obtained in transforming the dataset are used
* as thresholds used in calculating the recall.
*
* Note: This ignores instance weights (setting all to 1.0) from `LogisticRegression.weightCol`.
* This will change in later Spark versions.
*/
@Since("1.5.0")
@transient lazy val recallByThreshold: DataFrame = {
binaryMetrics.recallByThreshold().toDF("threshold", "recall")
}
}
/**
* LogisticAggregator computes the gradient and loss for binary logistic loss function, as used
* in binary classification for instances in sparse or dense vector in an online fashion.
*
* Note that multinomial logistic loss is not supported yet!
*
* Two LogisticAggregator can be merged together to have a summary of loss and gradient of
* the corresponding joint dataset.
*
* @param numClasses the number of possible outcomes for k classes classification problem in
* Multinomial Logistic Regression.
* @param fitIntercept Whether to fit an intercept term.
*/
private class LogisticAggregator(
private val numFeatures: Int,
numClasses: Int,
fitIntercept: Boolean) extends Serializable {
private var weightSum = 0.0
private var lossSum = 0.0
private val gradientSumArray =
Array.ofDim[Double](if (fitIntercept) numFeatures + 1 else numFeatures)
/**
* Add a new training instance to this LogisticAggregator, and update the loss and gradient
* of the objective function.
*
* @param instance The instance of data point to be added.
* @param coefficients The coefficients corresponding to the features.
* @param featuresStd The standard deviation values of the features.
* @return This LogisticAggregator object.
*/
def add(
instance: Instance,
coefficients: Vector,
featuresStd: Array[Double]): this.type = {
instance match { case Instance(label, weight, features) =>
require(numFeatures == features.size, s"Dimensions mismatch when adding new instance." +
s" Expecting $numFeatures but got ${features.size}.")
require(weight >= 0.0, s"instance weight, $weight has to be >= 0.0")
if (weight == 0.0) return this
val coefficientsArray = coefficients match {
case dv: DenseVector => dv.values
case _ =>
throw new IllegalArgumentException(
s"coefficients only supports dense vector but got type ${coefficients.getClass}.")
}
val localGradientSumArray = gradientSumArray
numClasses match {
case 2 =>
// For Binary Logistic Regression.
val margin = - {
var sum = 0.0
features.foreachActive { (index, value) =>
if (featuresStd(index) != 0.0 && value != 0.0) {
sum += coefficientsArray(index) * (value / featuresStd(index))
}
}
sum + {
if (fitIntercept) coefficientsArray(numFeatures) else 0.0
}
}
val multiplier = weight * (1.0 / (1.0 + math.exp(margin)) - label)
features.foreachActive { (index, value) =>
if (featuresStd(index) != 0.0 && value != 0.0) {
localGradientSumArray(index) += multiplier * (value / featuresStd(index))
}
}
if (fitIntercept) {
localGradientSumArray(numFeatures) += multiplier
}
if (label > 0) {
// The following is equivalent to log(1 + exp(margin)) but more numerically stable.
lossSum += weight * MLUtils.log1pExp(margin)
} else {
lossSum += weight * (MLUtils.log1pExp(margin) - margin)
}
case _ =>
new NotImplementedError("LogisticRegression with ElasticNet in ML package " +
"only supports binary classification for now.")
}
weightSum += weight
this
}
}
/**
* Merge another LogisticAggregator, and update the loss and gradient
* of the objective function.
* (Note that it's in place merging; as a result, `this` object will be modified.)
*
* @param other The other LogisticAggregator to be merged.
* @return This LogisticAggregator object.
*/
def merge(other: LogisticAggregator): this.type = {
require(numFeatures == other.numFeatures, s"Dimensions mismatch when merging with another " +
s"LeastSquaresAggregator. Expecting $numFeatures but got ${other.numFeatures}.")
if (other.weightSum != 0.0) {
weightSum += other.weightSum
lossSum += other.lossSum
var i = 0
val localThisGradientSumArray = this.gradientSumArray
val localOtherGradientSumArray = other.gradientSumArray
val len = localThisGradientSumArray.length
while (i < len) {
localThisGradientSumArray(i) += localOtherGradientSumArray(i)
i += 1
}
}
this
}
def loss: Double = {
require(weightSum > 0.0, s"The effective number of instances should be " +
s"greater than 0.0, but $weightSum.")
lossSum / weightSum
}
def gradient: Vector = {
require(weightSum > 0.0, s"The effective number of instances should be " +
s"greater than 0.0, but $weightSum.")
val result = Vectors.dense(gradientSumArray.clone())
scal(1.0 / weightSum, result)
result
}
}
/**
* LogisticCostFun implements Breeze's DiffFunction[T] for a multinomial logistic loss function,
* as used in multi-class classification (it is also used in binary logistic regression).
* It returns the loss and gradient with L2 regularization at a particular point (coefficients).
* It's used in Breeze's convex optimization routines.
*/
private class LogisticCostFun(
instances: RDD[Instance],
numClasses: Int,
fitIntercept: Boolean,
standardization: Boolean,
featuresStd: Array[Double],
featuresMean: Array[Double],
regParamL2: Double) extends DiffFunction[BDV[Double]] {
override def calculate(coefficients: BDV[Double]): (Double, BDV[Double]) = {
val numFeatures = featuresStd.length
val coeffs = Vectors.fromBreeze(coefficients)
val n = coeffs.size
val localFeaturesStd = featuresStd
val logisticAggregator = {
val seqOp = (c: LogisticAggregator, instance: Instance) =>
c.add(instance, coeffs, localFeaturesStd)
val combOp = (c1: LogisticAggregator, c2: LogisticAggregator) => c1.merge(c2)
instances.treeAggregate(
new LogisticAggregator(numFeatures, numClasses, fitIntercept)
)(seqOp, combOp)
}
val totalGradientArray = logisticAggregator.gradient.toArray
// regVal is the sum of coefficients squares excluding intercept for L2 regularization.
val regVal = if (regParamL2 == 0.0) {
0.0
} else {
var sum = 0.0
coeffs.foreachActive { (index, value) =>
// If `fitIntercept` is true, the last term which is intercept doesn't
// contribute to the regularization.
if (index != numFeatures) {
// The following code will compute the loss of the regularization; also
// the gradient of the regularization, and add back to totalGradientArray.
sum += {
if (standardization) {
totalGradientArray(index) += regParamL2 * value
value * value
} else {
if (featuresStd(index) != 0.0) {
// If `standardization` is false, we still standardize the data
// to improve the rate of convergence; as a result, we have to
// perform this reverse standardization by penalizing each component
// differently to get effectively the same objective function when
// the training dataset is not standardized.
val temp = value / (featuresStd(index) * featuresStd(index))
totalGradientArray(index) += regParamL2 * temp
value * temp
} else {
0.0
}
}
}
}
}
0.5 * regParamL2 * sum
}
(logisticAggregator.loss + regVal, new BDV(totalGradientArray))
}
}
|
gioenn/xSpark
|
mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
|
Scala
|
apache-2.0
| 43,476
|
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.web.api.item
import com.tle.beans.item.ItemId
import com.tle.legacy.LegacyGuice
import com.tle.web.sections.{SectionId, SectionInfo, SectionNode, SectionUtils}
import com.tle.web.sections.equella.AbstractScalaSection
import com.tle.web.sections.generic.DefaultSectionTree
import com.tle.web.selection.{SelectedResource, SelectedResourceKey}
import com.tle.web.template.RenderNewTemplate
import io.swagger.annotations.Api
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import javax.ws.rs.core.{Context, Response}
import javax.ws.rs.{POST, Path, PathParam, QueryParam}
import scala.collection.JavaConverters._
case class SelectionKey(uuid: String,
version: Int,
`type`: String,
attachmentUuid: Option[String],
folderId: Option[String],
url: Option[String])
case class ResourceSelection(key: SelectionKey, title: String)
@Path("selection/")
@Api("Selection session")
class SelectionApi {
import SelectionApi._
@POST
@Path("{sessid}/return")
def returnSelections(@QueryParam("integid") integid: String,
@PathParam("sessid") sessid: String,
@Context request: HttpServletRequest,
@Context response: HttpServletResponse): Response = {
LegacyGuice.userSessionService.reenableSessionUse()
val info = setupSession(sessid, Option(integid), request, response)
val sessionData = selectionService.getCurrentSession(info)
val integ = integrationService.getIntegrationInterface(info)
val ok = integ.select(info, sessionData)
val rc = info.getRootRenderContext
info.setAttribute(RenderNewTemplate.DisableNewUI, true)
val output = SectionUtils.renderToString(rc, SectionUtils.renderSection(rc, "temp"))
Response.ok(output).build()
}
@POST
@Path("{sessid}/add")
def addResource(@PathParam("sessid") sessid: String,
@Context request: HttpServletRequest,
@Context response: HttpServletResponse,
resource: ResourceSelection): Response = {
LegacyGuice.userSessionService.reenableSessionUse()
val info = setupSession(sessid, None, request, response)
val res = new SelectedResource(toSRK(resource.key))
res.setTitle(resource.title)
selectionService.addSelectedResource(info, res, false)
Response.ok().build()
}
def toSRK(resKey: SelectionKey): SelectedResourceKey = {
val key = new SelectedResourceKey(new ItemId(resKey.uuid, resKey.version), null)
key.setType(resKey.`type`.charAt(0))
resKey.folderId.foreach(key.setFolderId)
resKey.attachmentUuid.foreach(key.setAttachmentUuid)
resKey.url.foreach(key.setUrl)
key
}
@POST
@Path("{sessid}/remove")
def removeResource(@PathParam("sessid") sessid: String,
@Context request: HttpServletRequest,
@Context response: HttpServletResponse,
resKey: SelectionKey): Response = {
LegacyGuice.userSessionService.reenableSessionUse()
val info = setupSession(sessid, None, request, response)
selectionService.removeSelectedResource(info, toSRK(resKey))
Response.ok().build()
}
}
object SelectionApi {
lazy val integrationService = LegacyGuice.integrationService.get()
lazy val selectionService = LegacyGuice.selectionService.get()
val blankTree =
new DefaultSectionTree(LegacyGuice.treeRegistry, new SectionNode("", new AbstractScalaSection {
override type M = Int
override def newModel: SectionInfo => Int = _ => 1
}))
def setupSession(sessid: String,
integid: Option[String],
request: HttpServletRequest,
response: HttpServletResponse) = {
val paramMap = Iterable(Some("_sl.stateId" -> Array(sessid)),
integid.map("_int.id" -> Array(_))).flatten.toMap
val info = LegacyGuice.sectionsController.createInfo(blankTree,
"/",
request,
response,
null,
paramMap.asJava,
null)
info.fireBeforeEvents()
info
}
}
|
equella/Equella
|
Source/Plugins/Core/com.equella.core/scalasrc/com/tle/web/api/item/SelectionApi.scala
|
Scala
|
apache-2.0
| 5,315
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.streaming.dstream
import spark.Logging
import spark.storage.StorageLevel
import spark.streaming.{Time, DStreamCheckpointData, StreamingContext}
import java.util.Properties
import java.util.concurrent.Executors
import kafka.consumer._
import kafka.message.{Message, MessageSet, MessageAndMetadata}
import kafka.serializer.Decoder
import kafka.utils.{Utils, ZKGroupTopicDirs}
import kafka.utils.ZkUtils._
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient._
import scala.collection.Map
import scala.collection.mutable.HashMap
import scala.collection.JavaConversions._
/**
* Input stream that pulls messages from a Kafka Broker.
*
* @param kafkaParams Map of kafka configuration paramaters. See: http://kafka.apache.org/configuration.html
* @param topics Map of (topic_name -> numPartitions) to consume. Each partition is consumed
* in its own thread.
* @param storageLevel RDD storage level.
*/
private[streaming]
class KafkaInputDStream[T: ClassManifest, D <: Decoder[_]: Manifest](
@transient ssc_ : StreamingContext,
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel
) extends NetworkInputDStream[T](ssc_ ) with Logging {
def getReceiver(): NetworkReceiver[T] = {
new KafkaReceiver[T, D](kafkaParams, topics, storageLevel)
.asInstanceOf[NetworkReceiver[T]]
}
}
private[streaming]
class KafkaReceiver[T: ClassManifest, D <: Decoder[_]: Manifest](
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel
) extends NetworkReceiver[Any] {
// Handles pushing data into the BlockManager
lazy protected val blockGenerator = new BlockGenerator(storageLevel)
// Connection to Kafka
var consumerConnector : ConsumerConnector = null
def onStop() {
blockGenerator.stop()
}
def onStart() {
blockGenerator.start()
// In case we are using multiple Threads to handle Kafka Messages
val executorPool = Executors.newFixedThreadPool(topics.values.reduce(_ + _))
logInfo("Starting Kafka Consumer Stream with group: " + kafkaParams("groupid"))
// Kafka connection properties
val props = new Properties()
kafkaParams.foreach(param => props.put(param._1, param._2))
// Create the connection to the cluster
logInfo("Connecting to Zookeper: " + kafkaParams("zk.connect"))
val consumerConfig = new ConsumerConfig(props)
consumerConnector = Consumer.create(consumerConfig)
logInfo("Connected to " + kafkaParams("zk.connect"))
// When autooffset.reset is defined, it is our responsibility to try and whack the
// consumer group zk node.
if (kafkaParams.contains("autooffset.reset")) {
tryZookeeperConsumerGroupCleanup(kafkaParams("zk.connect"), kafkaParams("groupid"))
}
// Create Threads for each Topic/Message Stream we are listening
val decoder = manifest[D].erasure.newInstance.asInstanceOf[Decoder[T]]
val topicMessageStreams = consumerConnector.createMessageStreams(topics, decoder)
// Start the messages handler for each partition
topicMessageStreams.values.foreach { streams =>
streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) }
}
}
// Handles Kafka Messages
private class MessageHandler[T: ClassManifest](stream: KafkaStream[T]) extends Runnable {
def run() {
logInfo("Starting MessageHandler.")
for (msgAndMetadata <- stream) {
blockGenerator += msgAndMetadata.message
}
}
}
// It is our responsibility to delete the consumer group when specifying autooffset.reset. This is because
// Kafka 0.7.2 only honors this param when the group is not in zookeeper.
//
// The kafka high level consumer doesn't expose setting offsets currently, this is a trick copied from Kafkas'
// ConsoleConsumer. See code related to 'autooffset.reset' when it is set to 'smallest'/'largest':
// https://github.com/apache/kafka/blob/0.7.2/core/src/main/scala/kafka/consumer/ConsoleConsumer.scala
private def tryZookeeperConsumerGroupCleanup(zkUrl: String, groupId: String) {
try {
val dir = "/consumers/" + groupId
logInfo("Cleaning up temporary zookeeper data under " + dir + ".")
val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
zk.deleteRecursive(dir)
zk.close()
} catch {
case _ => // swallow
}
}
}
|
wgpshashank/spark
|
streaming/src/main/scala/spark/streaming/dstream/KafkaInputDStream.scala
|
Scala
|
apache-2.0
| 5,201
|
/**
* Copyright 2015 Adrian Hurtado (adrianhurt)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.html.b3
package object horizontal {
import play.twirl.api.Html
import play.api.mvc.{ Call, RequestHeader }
import play.api.i18n.Messages
import views.html.helper._
import views.html.bs.Args.{ inner, isTrue }
/**
* Declares the class for the Horizontal FieldConstructor.
* It needs the column widths for the corresponding Bootstrap3 form-group
*/
case class HorizontalFieldConstructor(colLabel: String, colInput: String, val withFeedbackIcons: Boolean = false) extends B3FieldConstructor {
/* The equivalent offset if label is not present (ex: colLabel = "col-md-2" => colOffset = "col-md-offset-2") */
val colOffset: String = {
val chunks = colLabel.split("-")
(chunks.init :+ "offset" :+ chunks.last).mkString("-")
}
/* Define the class of the corresponding form */
val formClass = "form-horizontal"
/* Renders the corresponding template of the field constructor */
def apply(fieldInfo: B3FieldInfo, inputHtml: Html)(implicit messages: Messages) = bsFieldConstructor(fieldInfo, inputHtml, colLabel, colOffset, colInput)(this, messages)
/* Renders the corresponding template of the form group */
def apply(contentHtml: Html, argsMap: Map[Symbol, Any])(implicit messages: Messages) = bsFormGroup(contentHtml, argsMap, colLabel, colOffset, colInput)(messages)
}
/**
* Returns a new HorizontalFieldConstructor to use for specific forms or scopes (don't use it as a default one).
* If a default B3FieldConstructor and a specific HorizontalFieldConstructor are within the same scope, the more
* specific will be chosen.
*/
def fieldConstructorSpecific(colLabel: String, colInput: String, withFeedbackIcons: Boolean = false): HorizontalFieldConstructor =
new HorizontalFieldConstructor(colLabel, colInput, withFeedbackIcons)
/**
* Returns it as a B3FieldConstructor to use it as default within a template
*/
def fieldConstructor(colLabel: String, colInput: String, withFeedbackIcons: Boolean = false): B3FieldConstructor =
fieldConstructorSpecific(colLabel, colInput, withFeedbackIcons)
/**
* **********************************************************************************************************************************
* SHORTCUT HELPERS
* *********************************************************************************************************************************
*/
def form(action: Call, colLabel: String, colInput: String, args: (Symbol, Any)*)(body: HorizontalFieldConstructor => Html) = {
val hfc = fieldConstructorSpecific(colLabel, colInput, withFeedbackIcons = isTrue(args, '_feedbackIcons))
views.html.b3.form(action, inner(args): _*)(body(hfc))(hfc)
}
def formCSRF(action: Call, colLabel: String, colInput: String, args: (Symbol, Any)*)(body: HorizontalFieldConstructor => Html)(implicit request: RequestHeader) = {
val hfc = fieldConstructorSpecific(colLabel, colInput, withFeedbackIcons = isTrue(args, '_feedbackIcons))
views.html.b3.formCSRF(action, inner(args): _*)(body(hfc))(hfc, request)
}
}
|
adrianhurt/play-bootstrap3
|
play25-bootstrap3/module/app/views/b3/horizontal/package.scala
|
Scala
|
apache-2.0
| 3,692
|
package org.chipmunk.value
import java.sql.{ Date => SQLDate }
import scala.language.implicitConversions
import org.joda.time.DateTime
object Date {
implicit def sqlDateAsPersistentDate(date: SQLDate): Date = new Date(date)
}
class Date(date: SQLDate) {
def asJoda: DateTime = new DateTime(date.getTime)
}
|
kpjjpk/chipmunk
|
src/main/scala/org/chipmunk/value/Date.scala
|
Scala
|
mit
| 315
|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.constants
object ActorsConstant {
val UnitVersion = 1
}
|
Frannie-Ludmilla/sparta
|
serving-core/src/main/scala/com/stratio/sparta/serving/core/constants/ActorsConstant.scala
|
Scala
|
apache-2.0
| 722
|
package com.twitter.finagle.loadbalancer
import com.twitter.util.{Duration, Stopwatch}
import scala.util.Random
import scala.io.Source
private object FailureProfile {
val rng: Random = new Random("seed".hashCode)
/** Create a failure profile using this instantiation as the start time */
def apply(): FailureProfile = new FailureProfile(Stopwatch.start())
/** Fulfill all requests successfully */
val alwaysSucceed: () => Boolean = () => false
/** Fail all requests */
val alwaysFail: () => Boolean = () => true
/**
* Creates a failure profile from a file where each line is a boolean
* representing a response success or failure. One of these responses is
* picked uniformly at random as a starting point for the profile. Retrieving
* the next response type is synchronized across threads
*/
def fromFile(path: java.net.URL): () => Boolean = {
val responses = Source.fromURL(path).getLines.toIndexedSeq.map { line: String =>
line.toBoolean
}
() =>
responses(rng.nextInt(responses.size))
}
}
/**
* Creates a profile to determine whether the next incoming request should be
* a success or failure based on the elapsed time.
*/
private class FailureProfile(elapsed: () => Duration) {
/** Successfully fulfill requests within the healthy period but fail all else. */
def failAfter(healthyPeriod: Duration): () => Boolean = () => {
val timeElapsed = elapsed()
timeElapsed >= healthyPeriod
}
/** Fail all requests within min and max, inclusive. Succeed otherwise. */
def failWithin(min: Duration, max: Duration): () => Boolean = () => {
val timeElapsed = elapsed()
timeElapsed >= min && timeElapsed <= max
}
}
|
luciferous/finagle
|
finagle-benchmark/src/main/scala/com/twitter/finagle/loadbalancer/FailureProfile.scala
|
Scala
|
apache-2.0
| 1,707
|
package org.infinispan.spark.test
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder
import org.infinispan.client.hotrod.{RemoteCache, RemoteCacheManager}
import org.infinispan.spark.config.ConnectorConfiguration
import org.infinispan.spark.rdd.RemoteCacheManagerBuilder
import org.jboss.dmr.scala.ModelNode
import org.scalatest.{BeforeAndAfterAll, DoNotDiscover, Suite}
/**
* Trait to be mixed-in by tests that require a reference to a RemoteCache
*
* @author gustavonalle
*/
sealed trait RemoteTest {
protected def getRemoteCache[K, V]: RemoteCache[K, V] = remoteCacheManager.getCache(getCacheName)
protected lazy val remoteCacheManager = RemoteCacheManagerBuilder.create(getConfiguration)
def getCacheName: String = getClass.getName
def getCacheConfig: Option[ModelNode] = None
def getServerPort: Int
def withFilters(): List[FilterDef] = List.empty
def getConfiguration = {
val config = new ConnectorConfiguration()
val port = getServerPort
config.setServerList(Seq("localhost", port).mkString(":"))
config.setCacheName(getCacheName)
}
}
/**
* Traits to be mixed-in for a single server with a custom cache
*/
@DoNotDiscover
trait SingleServer extends RemoteTest with BeforeAndAfterAll {
this: Suite =>
val node: SingleNode
override def getServerPort = node.getServerPort
override protected def beforeAll(): Unit = {
node.start()
withFilters().foreach(node.addFilter)
node.createCache(getCacheName, getCacheConfig)
getRemoteCache.clear()
super.beforeAll()
}
override protected def afterAll(): Unit = {
withFilters().foreach(node.removeFilter)
super.afterAll()
}
}
@DoNotDiscover
trait SingleStandardServer extends SingleServer {
this: Suite =>
override val node = SingleStandardNode
}
@DoNotDiscover
trait SingleSecureServer extends SingleServer {
this: Suite =>
val KeyStore = getClass.getResource("/keystore_client.jks").getFile
val TrustStore = getClass.getResource("/truststore_client.jks").getFile
val StorePassword = "secret".toCharArray
override val node = SingleSecureNode
override protected lazy val remoteCacheManager = new RemoteCacheManager(
new ConfigurationBuilder().addServer().host("localhost").port(getServerPort)
.security().ssl().enable()
.keyStoreFileName(KeyStore)
.keyStorePassword(StorePassword)
.trustStoreFileName(TrustStore)
.trustStorePassword(StorePassword)
.build
)
override def getConfiguration = {
val configuration = super.getConfiguration
configuration
.addHotRodClientProperty("infinispan.client.hotrod.use_ssl", "true")
.addHotRodClientProperty("infinispan.client.hotrod.key_store_file_name", KeyStore)
.addHotRodClientProperty("infinispan.client.hotrod.trust_store_file_name", TrustStore)
.addHotRodClientProperty("infinispan.client.hotrod.key_store_password", "secret")
.addHotRodClientProperty("infinispan.client.hotrod.trust_store_password", "secret")
}
}
trait MultipleServers extends RemoteTest with BeforeAndAfterAll {
this: Suite =>
def getCacheType: CacheType.Value
override def getServerPort = Cluster.getFirstServerPort
override protected def beforeAll(): Unit = {
Cluster.start()
withFilters().foreach(Cluster.addFilter)
Cluster.createCache(getCacheName, getCacheType, getCacheConfig)
getRemoteCache.clear()
super.beforeAll()
}
override protected def afterAll(): Unit = {
withFilters().foreach(Cluster.removeFilter)
super.afterAll()
}
}
|
galderz/infinispan-spark
|
src/test/scala/org/infinispan/spark/test/RemoteTest.scala
|
Scala
|
apache-2.0
| 3,663
|
/*
* Copyright 2013 Sanjin Sehic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package at.ac.tuwien.infosys
package amber
package akka
package origin
class RemoteBuilderSpec extends Spec("RemoteBuilderSpec")
with BuilderComponent.Remote
with amber.origin.BuilderBehaviors.Remote {
override type Origin[+A] = Origin.Remote[A]
override protected type Configuration = BuilderComponent.Remote.Configuration
override protected object configuration extends Configuration {
override val local = RemoteBuilderSpec.this.system
}
"akka.Remote.OriginBuilder" should {
behave like aBuilder
}
}
|
tuwiendsg/CAPA
|
akka/test/scala/origin/RemoteBuilderSpec.scala
|
Scala
|
apache-2.0
| 1,172
|
package uk.gov.gds.ier.model
case class PostalOrProxyVote (
typeVote: WaysToVoteType,
postalVoteOption: Option[Boolean],
deliveryMethod: Option[PostalVoteDeliveryMethod]) {
def toApiMap = {
val voteMap = postalVoteOption match {
case Some(pvote) => typeVote match {
case WaysToVoteType.ByPost => Map("pvote" -> pvote.toString)
case WaysToVoteType.ByProxy => Map("proxyvote" -> pvote.toString)
case _ => Map.empty
}
case _ => Map.empty
}
val emailMap = deliveryMethod.flatMap(_.emailAddress) match {
case Some(email) => typeVote match {
case WaysToVoteType.ByPost => Map("pvoteemail" -> email)
case WaysToVoteType.ByProxy => Map("proxyvoteemail" -> email)
case _ => Map.empty
}
case _ => Map.empty
}
voteMap ++ emailMap
}
}
object PostalOrProxyVote extends ModelMapping {
import playMappings._
def mapping = playMappings.mapping(
keys.voteType.key -> text,
keys.optIn.key -> optional(boolean),
keys.deliveryMethod.key -> optional(PostalVoteDeliveryMethod.mapping)
) (
(voteType, postalVoteOption, deliveryMethod) => PostalOrProxyVote(
WaysToVoteType.parse(voteType),
postalVoteOption,
deliveryMethod
)
) (
postalVote => Some(
postalVote.typeVote.name,
postalVote.postalVoteOption,
postalVote.deliveryMethod
)
)
}
|
alphagov/ier-frontend
|
app/uk/gov/gds/ier/model/PostalOrProxyVote.scala
|
Scala
|
mit
| 1,411
|
package ch.ninecode.cim
import org.apache.commons.logging.{Log, LogFactory}
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import ch.ninecode.model.Element
class CIMRecordReader (debug: Boolean = false) extends RecordReader[String, Element]
{
val log: Log = LogFactory.getLog(getClass)
var cim: CHIM = _
def initialize (genericSplit: InputSplit, context: TaskAttemptContext): Unit =
{
if (debug)
{
log.info("initialize")
log.info(s"genericSplit: ${genericSplit.toString}")
log.info(s"context: ${context.getTaskAttemptID.toString}")
}
val job = context.getConfiguration
val split = genericSplit.asInstanceOf[FileSplit]
val start = split.getStart
val bytes = split.getLength
val file = split.getPath
// open the file and seek to the start of the split
val fs = file.getFileSystem(job)
val in = fs.open(file)
val end = start + bytes
val available = fs.getFileStatus(file).getLen
val extra = if (available > end) Math.min(CHIM.OVERREAD.toLong, available - end) else 0L
// ToDo: may need to handle block sizes bigger than 2GB - what happens for size > 2^31?
val size = (bytes + extra).toInt
val buffer = new Array[Byte](size)
in.readFully(start, buffer)
val low =
if (0 == start)
// strip any BOM(Byte Order Mark) i.e. 0xEF,0xBB,0xBF
if ((size >= 3) && (buffer(0) == 0xef) && (buffer(1) == 0xbb) && (buffer(2) == 0xbf))
3
else
0
else
0
val first =
if (0 != start)
{
// skip to next UTF-8 non-continuation byte (high order bit zero)
// by advancing past at most 4 bytes
var i = 0
if ((buffer(low + i) & 0xc0) != 0xc0) // check for the start of a UTF-8 character
while (0 != (buffer(low + i) & 0x80) && (i < Math.min(4, size)))
i += 1
low + i
}
else
low
val xml = Text.decode(buffer, first, size - first)
val len = if (0 == extra) xml.length else Text.decode(buffer, first, (size - first - extra).toInt).length
// ToDo: using first here is approximate,
// the real character count would require reading the complete file
// from 0 to (start + first) and converting to characters
if (debug)
log.debug(s"XML text starting at byte offset ${start + first} of length $len characters begins with: ${xml.substring(0, 120)}")
CIMContext.DEBUG = debug
cim = new CHIM(xml, first, first + len, start, start + bytes)
}
def close (): Unit =
{
if (debug)
{
log.info("close")
for (error <- cim.context.errors)
log.error(error)
}
cim = null
}
def getCurrentKey: String = cim.value.id
def getCurrentValue: Element = cim.value
def getProgress: Float = cim.progress()
def nextKeyValue (): Boolean = cim.parse_one()
}
|
derrickoswald/CIMScala
|
CIMReader/src/main/scala/ch/ninecode/cim/CIMRecordReader.scala
|
Scala
|
mit
| 3,399
|
package com.rklaehn.interval
import org.scalatest.FunSuite
import spire.implicits._
import spire.math.Interval
class IntervalMapTest extends FunSuite {
test("IntervalMap[Int, Bool]") {
val a = IntervalMap.FromBool.above(1, true)
val b = IntervalMap.FromBool.below(1, true)
val c = a | b
val d = ~a
assert(a.entries.toSeq === Seq(Interval.atOrBelow(1) -> false, Interval.above(1) -> true))
assert(c.entries.head === Interval.below(1) -> true)
assert(c.entries.last === Interval.above(1) -> true)
assert(d.entries.head === Interval.atOrBelow(1) -> true)
assert((a & b) == IntervalMap.FromBool.zero[Int, Boolean])
}
test("equalsWrongType") {
assert(IntervalMap.FromBool.zero[Int, Boolean] != "foo")
}
test("apply") {
val t = IntervalMap.FromBool.above(0, true) | IntervalMap.FromBool.below(-100, true)
assert(t(0) == false)
assert(t.below(-1) == false)
assert(t.at(-1) == false)
assert(t.above(-1) == false)
assert(t.below(0) == false)
assert(t.at(0) == false)
assert(t.above(0) == true)
assert(t.below(1) == true)
assert(t.at(1) == true)
assert(t.above(1) == true)
val u = IntervalMap.FromBool.above(0, true)
assert(u(0) == false)
assert(u.below(-1) == false)
assert(u.at(-1) == false)
assert(u.above(-1) == false)
assert(u.below(0) == false)
assert(u.at(0) == false)
assert(u.above(0) == true)
assert(u.below(1) == true)
assert(u.at(1) == true)
assert(u.above(1) == true)
}
test("step") {
assert(IntervalMap.step2(0, 0, 0, 0, 1, 1, 1) == IntervalMap.step1(0, 1, 1, 1))
assert(IntervalMap.step2(0, 0, 1, 1, 1, 1, 1) == IntervalMap.step1(0, 0, 1, 1))
}
test("FromMonoid") {
import IntervalMap.FromMonoid._
val z = empty[Int, String]
assert(below(0, "") == z)
assert(atOrBelow(0, "") == z)
assert(point(0, "") == z)
assert(hole(0, "") == z)
assert(above(0, "") == z)
assert(atOrAbove(0, "") == z)
assert(apply(Interval(0, 1), "") == z)
assert(apply(Interval.empty[Int], "x") == z)
assert(apply[Int, String]() == z)
assert(apply(Interval(0, 2), "x") == apply(Interval(0, 1) -> "x", Interval.openLower(1, 2) -> "x"))
assert(hole(0, "x").at(0) == "")
}
test("FromBool") {
import IntervalMap.FromBool._
val z = zero[Int, Boolean]
val o = one[Int, Boolean]
assert(below(0, false) == z)
assert(atOrBelow(0, false) == z)
assert(point(0, false) == z)
assert(hole(0, false) == z)
assert(above(0, false) == z)
assert(atOrAbove(0, false) == z)
assert(~o == z)
assert(apply(Interval.empty[Int], true) == z)
assert(hole(0, true) == ~point(0, true))
}
}
|
rklaehn/intervalset
|
src/test/scala/com/rklaehn/interval/IntervalMapTest.scala
|
Scala
|
apache-2.0
| 2,703
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.cloud
import java.io.{Closeable, EOFException, File, FileNotFoundException, IOException}
import java.net.URL
import java.nio.charset.Charset
import scala.collection.JavaConverters._
import scala.language.postfixOps
import scala.reflect.ClassTag
import com.cloudera.spark.cloud.s3.S3ACommitterConstants._
import com.cloudera.spark.cloud.utils.{HConf, TimeOperations}
import com.cloudera.spark.cloud.GeneralCommitterConstants._
import com.fasterxml.jackson.databind.JsonNode
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, LocatedFileStatus, Path, PathFilter, RemoteIterator, StorageStatistics}
import org.apache.hadoop.io.{NullWritable, Text}
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.{PairRDDFunctions, RDD}
import org.apache.spark.sql._
/**
* Trait for operations to aid object store integration.
*/
trait ObjectStoreOperations extends Logging /*with CloudTestKeys*/ with
TimeOperations with HConf {
def saveTextFile[T](rdd: RDD[T], path: Path): Unit = {
rdd.saveAsTextFile(path.toString)
}
/**
* Save this RDD as a text file, using string representations of elements
* and the via the Hadoop mapreduce API, rather than the older mapred API.
*
* There's a bit of convoluted-ness here, as this supports writing to any Hadoop FS,
* rather than the default one in the configuration ... this is addressed by creating a
* new configuration. Spark makes a bit too much of the RDD private, so the key
* and value of the RDD needs to be restated
*
* @param rdd RDD to save
* @param path path
* @param conf config
* @tparam T type of RDD
*/
def saveAsNewTextFile[T](rdd: RDD[T],
path: Path,
conf: Configuration): Unit = {
val textRdd = rdd.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
text.set(x.toString)
(NullWritable.get(), text)
}
}
val pairOps = new PairRDDFunctions(textRdd)(
implicitly[ClassTag[NullWritable]],
implicitly[ClassTag[Text]],
null)
pairOps.saveAsNewAPIHadoopFile(
path.toUri.toString,
classOf[NullWritable],
classOf[Text],
classOf[TextOutputFormat[NullWritable, Text]],
conf)
}
/**
* Take a predicate, generate a path filter from it.
*
* @param filterPredicate predicate
* @return a filter which uses the predicate to decide whether to accept a file or not
*/
def pathFilter(filterPredicate: Path => Boolean): PathFilter = {
// ignore IDEA if it suggests simplifying this...it won't compile.
new PathFilter {
def accept(path: Path): Boolean = filterPredicate(path)
}
}
/**
* List files in a filesystem.
* @param fs filesystem
* @param path path to list
* @param recursive flag to request recursive listing
* @return a sequence of all files (but not directories) underneath the path.
*/
def listFiles(fs: FileSystem, path: Path, recursive: Boolean): Seq[LocatedFileStatus] = {
remoteIteratorSequence(fs.listFiles(path, recursive))
}
/**
* Take the output of a remote iterator and covert it to a scala sequence. Network
* IO may take place during the operation, and changes to a remote FS may result in
* a sequence which is not consistent with any single state of the FS.
* @param source source
* @tparam T type of source
* @return a sequence
*/
def remoteIteratorSequence[T](source: RemoteIterator[T]): Seq[T] = {
new RemoteOutputIterator[T](source).toSeq
}
/**
* Put a string to the destination.
* @param path path
* @param conf configuration to use when requesting the filesystem
* @param body string body
*/
def put(
path: Path,
conf: Configuration,
body: String): Unit = {
put(path.getFileSystem(conf), path, body)
}
/**
* Put a string to the destination.
*
* @param fs dest FS
* @param path path to file
* @param body string body
*/
def put(
fs: FileSystem,
path: Path,
body: String): Unit = {
val out = fs.create(path, true)
try {
IOUtils.write(body, out, "UTF-8")
} finally {
closeQuietly(out)
}
}
/**
* Get a file from a path in the default charset.
*
* @param fs filesystem
* @param path path to file
* @return the contents of the path
*/
def get(fs: FileSystem, path: Path): String = {
val in = fs.open(path)
try {
val s= IOUtils.toString(in, Charset.defaultCharset())
in.close()
s
} finally {
closeQuietly(in)
}
}
def closeQuietly(c: Closeable) = {
if (c != null) {
try {
c.close();
} catch {
case e: Exception =>
logDebug("When closing", e)
}
}
}
/**
* Get a file from a path in the default charset.
* This is here to ease spark-shell use
* @param p path string.
* @param conf configuration for the FS
* @return the contents of the path
*/
def get(p: String, conf: Configuration): String = {
val path = new Path(p)
get(path.getFileSystem(conf), path)
}
/**
* Load a file as JSON; fail fast if the file is 0 bytes long
*
* @param fs filesystem
* @param path path to file
* @return the contents of the path as a JSON node
*/
def loadJson(fs: FileSystem, path: Path): JsonNode = {
val status = fs.getFileStatus(path)
if (status.getLen == 0) {
throw new EOFException("Empty File: " + path)
}
import com.fasterxml.jackson.databind.ObjectMapper
new ObjectMapper().readTree(get(fs, path))
}
/**
* Save a dataframe in a specific format.
*
* @param df dataframe
* @param dest destination path
* @param format format
* @return the path the DF was saved to
*/
def saveDF(
df: DataFrame,
dest: Path,
format: String): Path = {
logDuration(s"write to $dest in format $format") {
df.write.format(format).save(dest.toString)
}
dest
}
/**
* Load a dataframe.
* @param spark spark session
* @param source source path
* @param srcFormat format
* @return the loaded dataframe
*/
def loadDF(
spark: SparkSession,
source: Path,
srcFormat: String,
opts: Map[String, String] = Map()): DataFrame = {
val reader = spark.read
reader.options(opts)
reader.format(srcFormat)
.load(source.toUri.toString)
}
/**
* Take a dotted classname and return the resource
* @param classname classname to look for
* @return the resource for the .class
*/
def classnameToResource(classname: String): String = {
classname.replace('.','/') + ".class"
}
/**
* Get a resource URL or None, if the resource wasn't found
* @param resource resource to look for
* @return the URL, if any
*/
def resourceURL(resource: String): Option[URL] = {
Option(this.getClass.getClassLoader.getResource(resource))
}
/**
* Create the JVM's temp dir, and return its path.
* @return the temp directory
*/
def createTmpDir(): File = {
val tmp = new File(System.getProperty("java.io.tmpdir"))
tmp.mkdirs()
tmp
}
/**
* Create a temporary warehouse directory for those tests which neeed on.
* @return a warehouse directory
*/
def createWarehouseDir(): File = {
tempDir("warehouse", ".db")
}
/**
* Create a temporary warehouse directory for those tests which neeed on.
* @return a warehouse directory
*/
def tempDir(name: String, suffix: String): File = {
val dir = File.createTempFile(name, suffix, createTmpDir())
dir.delete()
dir
}
/**
* Get a sorted list of the FS statistics.
*/
def getStorageStatistics(fs: FileSystem): List[StorageStatistics.LongStatistic] = {
fs.getStorageStatistics.getLongStatistics.asScala.toList
.sortWith((left, right) => left.getName > right.getName)
}
/**
* Dump the storage stats; logs nothing if there are none
*/
def dumpFileSystemStatistics(stats: StorageStatistics) : Unit = {
for (entry <- stats.getLongStatistics.asScala) {
logInfo(s" ${entry.getName} = ${entry.getValue}")
}
}
/*
def dumpFileSystemStatistics(): Unit = {
}
*/
/**
* Copy a file across filesystems, through the local machine.
* There's no attempt to optimise the operation if the
* src and dest files are on the same FS.
* @param src source file
* @param dest destination
* @param conf config for the FS binding
* @param overwrite should the dest be overwritten?
*/
def copyFile(
src: Path,
dest: Path,
conf: Configuration,
overwrite: Boolean): Boolean = {
val srcFS = src.getFileSystem(conf)
val sourceStatus = srcFS.getFileStatus(src)
require(sourceStatus.isFile, s"Not a file $src")
if (!overwrite) {
val destFS = dest.getFileSystem(conf)
try {
val destStatus = destFS.getFileStatus(dest)
if (destStatus.isFile) {
logInfo(s"Destinaion $dest exists")
return false
}
} catch {
case _: FileNotFoundException =>
}
}
val sizeKB = sourceStatus.getLen / 1024
logInfo(s"Copying $src to $dest (${sizeKB} KB)")
val (_, time) = durationOf {
FileUtil.copy(srcFS,
sourceStatus,
dest.getFileSystem(conf),
dest,
false, overwrite, conf)
}
val durationS = time / (1e9)
logInfo(s"Copy Duration = $durationS seconds")
val bandwidth = sizeKB / durationS
logInfo(s"Effective copy bandwidth = $bandwidth KiB/s")
true
}
/**
* Write text to a file
* @param fs filesystem
* @param p path
* @param t text, if "" writes an empty file
*/
def write(fs: FileSystem, p: Path, t: String): Unit = {
val out = fs.create(p, true)
try {
if (!t.isEmpty) {
out.write(t.getBytes())
}
} finally {
out.close()
}
}
/**
* Read from the FS up to the length; there is no retry after the first read
* @param fs filesystem
* @param p path
* @param maxLen max buffer size
* @return the data read
*/
def read(fs: FileSystem, p: Path, maxLen: Int = 1024): String = {
val in = fs.open(p)
val buffer = new Array[Byte](maxLen)
val len = in.read(buffer)
new String(buffer, 0, len)
}
/**
* Recursive delete.
*
* @param fs filesystem
* @param path path to delete
* @return the restult of the delete
*/
protected def rm(
fs: FileSystem,
path: Path): Boolean = {
try {
fs.delete(path, true)
} catch {
case e: IOException =>
throw new IOException(s"Failed to delete $path on $fs $e", e)
}
}
/**
* Set the base spark/Hadoop/ORC/parquet options to be used in examples.
* Also patches spark.master to local, unless already set.
*
* @param sparkConf spark configuration to patch
* @param randomIO is the IO expected to be random access?
*/
protected def applyObjectStoreConfigurationOptions(
sparkConf: SparkConf,
randomIO: Boolean): Unit = {
// commit with v2 algorithm
sparkConf.setAll(ObjectStoreConfigurations.RW_TEST_OPTIONS)
if (!sparkConf.contains("spark.master")) {
sparkConf.set("spark.master", "local")
}
}
protected def verifyConfigurationOption(sparkConf: SparkConf,
key: String, expected: String): Unit = {
val v = sparkConf.get(key)
require(v == expected,
s"value of configuration option $key is '$v'; expected '$expected'")
}
protected def verifyConfigurationOptions(sparkConf: SparkConf,
settings: Traversable[(String, String)]): Unit = {
settings.foreach(t => verifyConfigurationOption(sparkConf, t._1, t._2))
}
val Parquet = "parquet"
val Csv = "csv"
val Orc = "orc"
/**
* Write a dataset.
*
* @param dest destination path
* @param source source DS
* @param format format
* @param parted should the DS be parted by year & month?
* @param committer name of committer to use in s3a options
* @param conflict conflict policy to set in config
* @param extraOps extra operations to pass to the committer/context
* @tparam T type of returned DS
* @return success data
*/
def writeDataset[T](
@transient destFS: FileSystem,
@transient dest: Path,
source: Dataset[T],
summary: String = "",
format: String = Orc,
parted: Boolean = true,
committer: String = PARTITIONED,
conflict: String = CONFLICT_MODE_FAIL,
extraOps: Map[String, String] = Map()): Long = {
val text =
s"$summary + committer=$committer format $format partitioning: $parted" +
s" conflict=$conflict"
val (_, t) = logDuration2(s"write to $dest: $text") {
val writer = source.write
if (parted) {
writer.partitionBy("year", "month")
}
writer.mode(SaveMode.Append)
extraOps.foreach(t => writer.option(t._1, t._2))
writer.option(S3A_COMMITTER_NAME, committer)
writer.option(CONFLICT_MODE, conflict)
writer
.format(format)
.save(dest.toUri.toString)
}
t
}
}
/**
* Iterator over remote output.
* @param source source iterator
* @tparam T type of response
*/
class RemoteOutputIterator[T](private val source: RemoteIterator[T]) extends Iterator[T] {
def hasNext: Boolean = source.hasNext
def next: T = source.next()
}
/**
* A referenceable instance
*/
object ObjectStoreOperations extends ObjectStoreOperations {
}
/**
* An object store configurations to play with.
*/
object ObjectStoreConfigurations extends HConf {
/**
* General spark options
*/
val GENERAL_SPARK_OPTIONS: Map[String, String] = Map(
"spark.ui.enabled" -> "false",
"spark.driver.allowMultipleContexts" -> "true"
)
/**
* Options for ORC.
*/
val ORC_OPTIONS: Map[String, String] = Map(
"spark.hadoop.orc.splits.include.file.footer" -> "true",
"spark.hadoop.orc.cache.stripe.details.size" -> "1000",
"spark.hadoop.orc.filterPushdown" -> "true")
/**
* Options for Parquet.
*/
val PARQUET_OPTIONS: Map[String, String] = Map(
"spark.sql.parquet.mergeSchema" -> "false",
"spark.sql.parquet.filterPushdown" -> "true"
)
val ALL_READ_OPTIONS: Map[String, String] =
GENERAL_SPARK_OPTIONS ++ ORC_OPTIONS ++ PARQUET_OPTIONS
/**
* Options for file output committer: algorithm 2 & skip cleanup.
*/
val FILE_COMMITTER_OPTIONS: Map[String, String] = Map(
hkey(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION) -> "2",
hkey(FILEOUTPUTCOMMITTER_CLEANUP_SKIPPED) -> "true")
/**
* Options for committer setup.
* 1. Set the commit algorithm to 3 to force failures if the classic
* committer was ever somehow picked up.
* 2. Switch parquet to the parquet committer subclass which will
* then bind to the factory committer.
* 3.Set spark.sql.sources.commitProtocolClass to PathOutputCommitProtocol
*/
val COMMITTER_OPTIONS: Map[String, String] = Map(
"spark.sql.parquet.output.committer.class" ->
BINDING_PARQUET_OUTPUT_COMMITTER_CLASS,
"spark.sql.sources.commitProtocolClass" ->
GeneralCommitterConstants.PATH_OUTPUT_COMMITTER_NAME,
ABFS_SCHEME_COMMITTER_FACTORY ->
MANIFEST_COMMITTER_FACTORY,
"mapreduce.manifest.committer.validate.output" -> "true"
)
/**
* Extra options for testing with hive.
*/
val HIVE_TEST_SETUP_OPTIONS: Map[String, String] = Map(
"spark.ui.enabled" -> "false",
"spark.sql.test" -> "",
"spark.sql.codegen.fallback" -> "true",
"spark.unsafe.exceptionOnMemoryLeak" -> "true",
"spark.sql.shuffle.partitions" -> "5",
"spark.sql.hive.metastore.barrierPrefixes" ->
"org.apache.spark.sql.hive.execution.PairSerDe",
"spark.sql.hive.metastore.sharedPrefixes" ->
"com.amazonaws."
)
/**
* Everything needed for tests.
*/
val RW_TEST_OPTIONS: Map[String, String] =
ALL_READ_OPTIONS ++ COMMITTER_OPTIONS ++ HIVE_TEST_SETUP_OPTIONS
/**
* Set the options defined in [[COMMITTER_OPTIONS]] on the
* spark context.
*
* Warning: this is purely experimental.
*
* @param sparkConf spark configuration to bind.
*/
def bind(sparkConf: SparkConf): Unit = {
sparkConf.setAll(COMMITTER_OPTIONS)
}
}
|
hortonworks-spark/cloud-integration
|
spark-cloud-integration/src/main/scala/com/cloudera/spark/cloud/ObjectStoreOperations.scala
|
Scala
|
apache-2.0
| 17,237
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.heron.streamlet.scala
/**
* A Streamlet is a (potentially unbounded) ordered collection of tuples.
* The StreamletBase class contains basic information of a Streamlet
* such as name and partition count without the connection functions
* such as map() and filter().
*/
trait StreamletBase[R] {
/**
* Sets the name of the Streamlet.
*
* @param sName The name given by the user for this Streamlet
* @return Returns back the Streamlet with changed name
*/
def setName(sName: String): StreamletBase[R]
/**
* Gets the name of the Streamlet.
*
* @return Returns the name of the Streamlet
*/
def getName: String
/**
* Sets the number of partitions of the streamlet
*
* @param numPartitions The user assigned number of partitions
* @return Returns back the Streamlet with changed number of partitions
*/
def setNumPartitions(numPartitions: Int): StreamletBase[R]
/**
* Gets the number of partitions of this Streamlet.
*
* @return the number of partitions of this Streamlet
*/
def getNumPartitions: Int
}
|
twitter/heron
|
heron/api/src/scala/org/apache/heron/streamlet/scala/StreamletBase.scala
|
Scala
|
apache-2.0
| 1,921
|
package org.junit.runners.model
class FrameworkMethod {
// Dummy for classOf[...]
}
|
nicolasstucki/scala-js-junit
|
runtime/src/main/scala/org/junit/runners/model/FrameworkMethod.scala
|
Scala
|
bsd-3-clause
| 87
|
package chapter.twentyone
object ExerciseNine extends App {
}
|
deekim/impatient-scala
|
src/main/scala/chapter/twentyone/ExerciseNine.scala
|
Scala
|
apache-2.0
| 65
|
package uk.gov.dvla.vehicles.presentation.common.model
final case class DisposeModel(vehicleMake: String,
vehicleModel: String,
dealerName: String,
dealerAddress: AddressModel,
transactionId: Option[String] = None,
registrationNumber: String)
|
dvla/vehicles-presentation-common
|
app/uk/gov/dvla/vehicles/presentation/common/model/DisposeModel.scala
|
Scala
|
mit
| 394
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, Date, List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.mesos.{Scheduler, SchedulerDriver}
import org.apache.mesos.Protos.{TaskState => MesosTaskState, _}
import org.apache.mesos.Protos.Environment.Variable
import org.apache.mesos.Protos.TaskStatus.Reason
import org.apache.spark.{SecurityManager, SparkConf, SparkException, TaskState}
import org.apache.spark.deploy.mesos.MesosDriverDescription
import org.apache.spark.deploy.rest.{CreateSubmissionResponse, KillSubmissionResponse, SubmissionStatusResponse}
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.util.Utils
/**
* Tracks the current state of a Mesos Task that runs a Spark driver.
* @param driverDescription Submitted driver description from
* [[org.apache.spark.deploy.rest.mesos.MesosRestServer]]
* @param taskId Mesos TaskID generated for the task
* @param slaveId Slave ID that the task is assigned to
* @param mesosTaskStatus The last known task status update.
* @param startDate The date the task was launched
* @param finishDate The date the task finished
* @param frameworkId Mesos framework ID the task registers with
*/
private[spark] class MesosClusterSubmissionState(
val driverDescription: MesosDriverDescription,
val taskId: TaskID,
val slaveId: SlaveID,
var mesosTaskStatus: Option[TaskStatus],
var startDate: Date,
var finishDate: Option[Date],
val frameworkId: String)
extends Serializable {
def copy(): MesosClusterSubmissionState = {
new MesosClusterSubmissionState(
driverDescription, taskId, slaveId, mesosTaskStatus, startDate, finishDate, frameworkId)
}
}
/**
* Tracks the retry state of a driver, which includes the next time it should be scheduled
* and necessary information to do exponential backoff.
* This class is not thread-safe, and we expect the caller to handle synchronizing state.
*
* @param lastFailureStatus Last Task status when it failed.
* @param retries Number of times it has been retried.
* @param nextRetry Time at which it should be retried next
* @param waitTime The amount of time driver is scheduled to wait until next retry.
*/
private[spark] class MesosClusterRetryState(
val lastFailureStatus: TaskStatus,
val retries: Int,
val nextRetry: Date,
val waitTime: Int) extends Serializable {
def copy(): MesosClusterRetryState =
new MesosClusterRetryState(lastFailureStatus, retries, nextRetry, waitTime)
}
/**
* The full state of the cluster scheduler, currently being used for displaying
* information on the UI.
*
* @param frameworkId Mesos Framework id for the cluster scheduler.
* @param masterUrl The Mesos master url
* @param queuedDrivers All drivers queued to be launched
* @param launchedDrivers All launched or running drivers
* @param finishedDrivers All terminated drivers
* @param pendingRetryDrivers All drivers pending to be retried
*/
private[spark] class MesosClusterSchedulerState(
val frameworkId: String,
val masterUrl: Option[String],
val queuedDrivers: Iterable[MesosDriverDescription],
val launchedDrivers: Iterable[MesosClusterSubmissionState],
val finishedDrivers: Iterable[MesosClusterSubmissionState],
val pendingRetryDrivers: Iterable[MesosDriverDescription])
/**
* The full state of a Mesos driver, that is being used to display driver information on the UI.
*/
private[spark] class MesosDriverState(
val state: String,
val description: MesosDriverDescription,
val submissionState: Option[MesosClusterSubmissionState] = None)
/**
* A Mesos scheduler that is responsible for launching submitted Spark drivers in cluster mode
* as Mesos tasks in a Mesos cluster.
* All drivers are launched asynchronously by the framework, which will eventually be launched
* by one of the slaves in the cluster. The results of the driver will be stored in slave's task
* sandbox which is accessible by visiting the Mesos UI.
* This scheduler supports recovery by persisting all its state and performs task reconciliation
* on recover, which gets all the latest state for all the drivers from Mesos master.
*/
private[spark] class MesosClusterScheduler(
engineFactory: MesosClusterPersistenceEngineFactory,
conf: SparkConf)
extends Scheduler with MesosSchedulerUtils {
var frameworkUrl: String = _
private val metricsSystem =
MetricsSystem.createMetricsSystem("mesos_cluster", conf, new SecurityManager(conf))
private val master = conf.get("spark.master")
private val appName = conf.get("spark.app.name")
private val queuedCapacity = conf.getInt("spark.mesos.maxDrivers", 200)
private val retainedDrivers = conf.getInt("spark.mesos.retainedDrivers", 200)
private val maxRetryWaitTime = conf.getInt("spark.mesos.cluster.retry.wait.max", 60) // 1 minute
private val schedulerState = engineFactory.createEngine("scheduler")
private val stateLock = new Object()
private val finishedDrivers =
new mutable.ArrayBuffer[MesosClusterSubmissionState](retainedDrivers)
private var frameworkId: String = null
// Holds all the launched drivers and current launch state, keyed by driver id.
private val launchedDrivers = new mutable.HashMap[String, MesosClusterSubmissionState]()
// Holds a map of driver id to expected slave id that is passed to Mesos for reconciliation.
// All drivers that are loaded after failover are added here, as we need get the latest
// state of the tasks from Mesos.
private val pendingRecover = new mutable.HashMap[String, SlaveID]()
// Stores all the submitted drivers that hasn't been launched.
private val queuedDrivers = new ArrayBuffer[MesosDriverDescription]()
// All supervised drivers that are waiting to retry after termination.
private val pendingRetryDrivers = new ArrayBuffer[MesosDriverDescription]()
private val queuedDriversState = engineFactory.createEngine("driverQueue")
private val launchedDriversState = engineFactory.createEngine("launchedDrivers")
private val pendingRetryDriversState = engineFactory.createEngine("retryList")
// Flag to mark if the scheduler is ready to be called, which is until the scheduler
// is registered with Mesos master.
@volatile protected var ready = false
private var masterInfo: Option[MasterInfo] = None
def submitDriver(desc: MesosDriverDescription): CreateSubmissionResponse = {
val c = new CreateSubmissionResponse
if (!ready) {
c.success = false
c.message = "Scheduler is not ready to take requests"
return c
}
stateLock.synchronized {
if (isQueueFull()) {
c.success = false
c.message = "Already reached maximum submission size"
return c
}
c.submissionId = desc.submissionId
queuedDriversState.persist(desc.submissionId, desc)
queuedDrivers += desc
c.success = true
}
c
}
def killDriver(submissionId: String): KillSubmissionResponse = {
val k = new KillSubmissionResponse
if (!ready) {
k.success = false
k.message = "Scheduler is not ready to take requests"
return k
}
k.submissionId = submissionId
stateLock.synchronized {
// We look for the requested driver in the following places:
// 1. Check if submission is running or launched.
// 2. Check if it's still queued.
// 3. Check if it's in the retry list.
// 4. Check if it has already completed.
if (launchedDrivers.contains(submissionId)) {
val task = launchedDrivers(submissionId)
mesosDriver.killTask(task.taskId)
k.success = true
k.message = "Killing running driver"
} else if (removeFromQueuedDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's still pending"
} else if (removeFromPendingRetryDrivers(submissionId)) {
k.success = true
k.message = "Removed driver while it's being retried"
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
k.success = false
k.message = "Driver already terminated"
} else {
k.success = false
k.message = "Cannot find driver"
}
}
k
}
def getDriverStatus(submissionId: String): SubmissionStatusResponse = {
val s = new SubmissionStatusResponse
if (!ready) {
s.success = false
s.message = "Scheduler is not ready to take requests"
return s
}
s.submissionId = submissionId
stateLock.synchronized {
if (queuedDrivers.exists(_.submissionId.equals(submissionId))) {
s.success = true
s.driverState = "QUEUED"
} else if (launchedDrivers.contains(submissionId)) {
s.success = true
s.driverState = "RUNNING"
launchedDrivers(submissionId).mesosTaskStatus.foreach(state => s.message = state.toString)
} else if (finishedDrivers.exists(_.driverDescription.submissionId.equals(submissionId))) {
s.success = true
s.driverState = "FINISHED"
finishedDrivers
.find(d => d.driverDescription.submissionId.equals(submissionId)).get.mesosTaskStatus
.foreach(state => s.message = state.toString)
} else if (pendingRetryDrivers.exists(_.submissionId.equals(submissionId))) {
val status = pendingRetryDrivers.find(_.submissionId.equals(submissionId))
.get.retryState.get.lastFailureStatus
s.success = true
s.driverState = "RETRYING"
s.message = status.toString
} else {
s.success = false
s.driverState = "NOT_FOUND"
}
}
s
}
/**
* Gets the driver state to be displayed on the Web UI.
*/
def getDriverState(submissionId: String): Option[MesosDriverState] = {
stateLock.synchronized {
queuedDrivers.find(_.submissionId.equals(submissionId))
.map(d => new MesosDriverState("QUEUED", d))
.orElse(launchedDrivers.get(submissionId)
.map(d => new MesosDriverState("RUNNING", d.driverDescription, Some(d))))
.orElse(finishedDrivers.find(_.driverDescription.submissionId.equals(submissionId))
.map(d => new MesosDriverState("FINISHED", d.driverDescription, Some(d))))
.orElse(pendingRetryDrivers.find(_.submissionId.equals(submissionId))
.map(d => new MesosDriverState("RETRYING", d)))
}
}
private def isQueueFull(): Boolean = launchedDrivers.size >= queuedCapacity
/**
* Recover scheduler state that is persisted.
* We still need to do task reconciliation to be up to date of the latest task states
* as it might have changed while the scheduler is failing over.
*/
private def recoverState(): Unit = {
stateLock.synchronized {
launchedDriversState.fetchAll[MesosClusterSubmissionState]().foreach { state =>
launchedDrivers(state.taskId.getValue) = state
pendingRecover(state.taskId.getValue) = state.slaveId
}
queuedDriversState.fetchAll[MesosDriverDescription]().foreach(d => queuedDrivers += d)
// There is potential timing issue where a queued driver might have been launched
// but the scheduler shuts down before the queued driver was able to be removed
// from the queue. We try to mitigate this issue by walking through all queued drivers
// and remove if they're already launched.
queuedDrivers
.filter(d => launchedDrivers.contains(d.submissionId))
.foreach(d => removeFromQueuedDrivers(d.submissionId))
pendingRetryDriversState.fetchAll[MesosDriverDescription]()
.foreach(s => pendingRetryDrivers += s)
// TODO: Consider storing finished drivers so we can show them on the UI after
// failover. For now we clear the history on each recovery.
finishedDrivers.clear()
}
}
/**
* Starts the cluster scheduler and wait until the scheduler is registered.
* This also marks the scheduler to be ready for requests.
*/
def start(): Unit = {
// TODO: Implement leader election to make sure only one framework running in the cluster.
val fwId = schedulerState.fetch[String]("frameworkId")
fwId.foreach { id =>
frameworkId = id
}
recoverState()
metricsSystem.registerSource(new MesosClusterSchedulerSource(this))
metricsSystem.start()
val driver = createSchedulerDriver(
master,
MesosClusterScheduler.this,
Utils.getCurrentUserName(),
appName,
conf,
Some(frameworkUrl),
Some(true),
Some(Integer.MAX_VALUE),
fwId)
startScheduler(driver)
ready = true
}
def stop(): Unit = {
ready = false
metricsSystem.report()
metricsSystem.stop()
mesosDriver.stop(true)
}
override def registered(
driver: SchedulerDriver,
newFrameworkId: FrameworkID,
masterInfo: MasterInfo): Unit = {
logInfo("Registered as framework ID " + newFrameworkId.getValue)
if (newFrameworkId.getValue != frameworkId) {
frameworkId = newFrameworkId.getValue
schedulerState.persist("frameworkId", frameworkId)
}
markRegistered()
stateLock.synchronized {
this.masterInfo = Some(masterInfo)
if (!pendingRecover.isEmpty) {
// Start task reconciliation if we need to recover.
val statuses = pendingRecover.collect {
case (taskId, slaveId) =>
val newStatus = TaskStatus.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId).build())
.setSlaveId(slaveId)
.setState(MesosTaskState.TASK_STAGING)
.build()
launchedDrivers.get(taskId).map(_.mesosTaskStatus.getOrElse(newStatus))
.getOrElse(newStatus)
}
// TODO: Page the status updates to avoid trying to reconcile
// a large amount of tasks at once.
driver.reconcileTasks(statuses.toSeq.asJava)
}
}
}
private def getDriverExecutorURI(desc: MesosDriverDescription): Option[String] = {
desc.conf.getOption("spark.executor.uri")
.orElse(desc.command.environment.get("SPARK_EXECUTOR_URI"))
}
private def getDriverFrameworkID(desc: MesosDriverDescription): String = {
s"${frameworkId}-${desc.submissionId}"
}
private def adjust[A, B](m: collection.Map[A, B], k: A, default: B)(f: B => B) = {
m.updated(k, f(m.getOrElse(k, default)))
}
private def getDriverEnvironment(desc: MesosDriverDescription): Environment = {
// TODO(mgummelt): Don't do this here. This should be passed as a --conf
val commandEnv = adjust(desc.command.environment, "SPARK_SUBMIT_OPTS", "")(
v => s"$v -Dspark.mesos.driver.frameworkId=${getDriverFrameworkID(desc)}"
)
val env = desc.conf.getAllWithPrefix("spark.mesos.driverEnv.") ++ commandEnv
val envBuilder = Environment.newBuilder()
env.foreach { case (k, v) =>
envBuilder.addVariables(Variable.newBuilder().setName(k).setValue(v))
}
envBuilder.build()
}
private def getDriverUris(desc: MesosDriverDescription): List[CommandInfo.URI] = {
val confUris = List(conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.mesos.uris"),
desc.conf.getOption("spark.submit.pyFiles")).flatMap(
_.map(_.split(",").map(_.trim))
).flatten
val jarUrl = desc.jarUrl.stripPrefix("file:").stripPrefix("local:")
((jarUrl :: confUris) ++ getDriverExecutorURI(desc).toList).map(uri =>
CommandInfo.URI.newBuilder().setValue(uri.trim()).build())
}
private def getDriverCommandValue(desc: MesosDriverDescription): String = {
val dockerDefined = desc.conf.contains("spark.mesos.executor.docker.image")
val executorUri = getDriverExecutorURI(desc)
// Gets the path to run spark-submit, and the path to the Mesos sandbox.
val (executable, sandboxPath) = if (dockerDefined) {
// Application jar is automatically downloaded in the mounted sandbox by Mesos,
// and the path to the mounted volume is stored in $MESOS_SANDBOX env variable.
("./bin/spark-submit", "$MESOS_SANDBOX")
} else if (executorUri.isDefined) {
val folderBasename = executorUri.get.split('/').last.split('.').head
val entries = conf.getOption("spark.executor.extraLibraryPath")
.map(path => Seq(path) ++ desc.command.libraryPathEntries)
.getOrElse(desc.command.libraryPathEntries)
val prefixEnv = if (!entries.isEmpty) Utils.libraryPathEnvPrefix(entries) else ""
val cmdExecutable = s"cd $folderBasename*; $prefixEnv bin/spark-submit"
// Sandbox path points to the parent folder as we chdir into the folderBasename.
(cmdExecutable, "..")
} else {
val executorSparkHome = desc.conf.getOption("spark.mesos.executor.home")
.orElse(conf.getOption("spark.home"))
.orElse(Option(System.getenv("SPARK_HOME")))
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val cmdExecutable = new File(executorSparkHome, "./bin/spark-submit").getPath
// Sandbox points to the current directory by default with Mesos.
(cmdExecutable, ".")
}
val cmdOptions = generateCmdOption(desc, sandboxPath).mkString(" ")
val primaryResource = new File(sandboxPath, desc.jarUrl.split("/").last).toString()
val appArguments = desc.command.arguments.mkString(" ")
s"$executable $cmdOptions $primaryResource $appArguments"
}
private def buildDriverCommand(desc: MesosDriverDescription): CommandInfo = {
val builder = CommandInfo.newBuilder()
builder.setValue(getDriverCommandValue(desc))
builder.setEnvironment(getDriverEnvironment(desc))
builder.addAllUris(getDriverUris(desc).asJava)
builder.build()
}
private def generateCmdOption(desc: MesosDriverDescription, sandboxPath: String): Seq[String] = {
var options = Seq(
"--name", desc.conf.get("spark.app.name"),
"--master", s"mesos://${conf.get("spark.master")}",
"--driver-cores", desc.cores.toString,
"--driver-memory", s"${desc.mem}M")
// Assume empty main class means we're running python
if (!desc.command.mainClass.equals("")) {
options ++= Seq("--class", desc.command.mainClass)
}
desc.conf.getOption("spark.executor.memory").foreach { v =>
options ++= Seq("--executor-memory", v)
}
desc.conf.getOption("spark.cores.max").foreach { v =>
options ++= Seq("--total-executor-cores", v)
}
desc.conf.getOption("spark.submit.pyFiles").foreach { pyFiles =>
val formattedFiles = pyFiles.split(",")
.map { path => new File(sandboxPath, path.split("/").last).toString() }
.mkString(",")
options ++= Seq("--py-files", formattedFiles)
}
// --conf
val replicatedOptionsBlacklist = Set(
"spark.jars", // Avoids duplicate classes in classpath
"spark.submit.deployMode", // this would be set to `cluster`, but we need client
"spark.master" // this contains the address of the dispatcher, not master
)
val defaultConf = conf.getAllWithPrefix("spark.mesos.dispatcher.driverDefault.").toMap
val driverConf = desc.conf.getAll
.filter { case (key, _) => !replicatedOptionsBlacklist.contains(key) }
.toMap
(defaultConf ++ driverConf).foreach { case (key, value) =>
options ++= Seq("--conf", s"$key=${shellEscape(value)}") }
options
}
/**
* Escape args for Unix-like shells, unless already quoted by the user.
* Based on: http://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html
* and http://www.grymoire.com/Unix/Quote.html
*
* @param value argument
* @return escaped argument
*/
private[scheduler] def shellEscape(value: String): String = {
val WrappedInQuotes = """^(".+"|'.+')$""".r
val ShellSpecialChars = (""".*([ '<>&|\\?\\*;!#\\\\(\\)"$`]).*""").r
value match {
case WrappedInQuotes(c) => value // The user quoted his args, don't touch it!
case ShellSpecialChars(c) => "\\"" + value.replaceAll("""(["`\\$\\\\])""", """\\\\$1""") + "\\""
case _: String => value // Don't touch harmless strings
}
}
private class ResourceOffer(
val offerId: OfferID,
val slaveId: SlaveID,
var resources: JList[Resource]) {
override def toString(): String = {
s"Offer id: ${offerId}, resources: ${resources}"
}
}
private def createTaskInfo(desc: MesosDriverDescription, offer: ResourceOffer): TaskInfo = {
val taskId = TaskID.newBuilder().setValue(desc.submissionId).build()
val (remainingResources, cpuResourcesToUse) =
partitionResources(offer.resources, "cpus", desc.cores)
val (finalResources, memResourcesToUse) =
partitionResources(remainingResources.asJava, "mem", desc.mem)
offer.resources = finalResources.asJava
val appName = desc.conf.get("spark.app.name")
val taskInfo = TaskInfo.newBuilder()
.setTaskId(taskId)
.setName(s"Driver for ${appName}")
.setSlaveId(offer.slaveId)
.setCommand(buildDriverCommand(desc))
.addAllResources(cpuResourcesToUse.asJava)
.addAllResources(memResourcesToUse.asJava)
desc.conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
MesosSchedulerBackendUtil.setupContainerBuilderDockerInfo(image,
desc.conf,
taskInfo.getContainerBuilder)
}
taskInfo.build
}
/**
* This method takes all the possible candidates and attempt to schedule them with Mesos offers.
* Every time a new task is scheduled, the afterLaunchCallback is called to perform post scheduled
* logic on each task.
*/
private def scheduleTasks(
candidates: Seq[MesosDriverDescription],
afterLaunchCallback: (String) => Boolean,
currentOffers: List[ResourceOffer],
tasks: mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]): Unit = {
for (submission <- candidates) {
val driverCpu = submission.cores
val driverMem = submission.mem
logTrace(s"Finding offer to launch driver with cpu: $driverCpu, mem: $driverMem")
val offerOption = currentOffers.find { o =>
getResource(o.resources, "cpus") >= driverCpu &&
getResource(o.resources, "mem") >= driverMem
}
if (offerOption.isEmpty) {
logDebug(s"Unable to find offer to launch driver id: ${submission.submissionId}, " +
s"cpu: $driverCpu, mem: $driverMem")
} else {
val offer = offerOption.get
val queuedTasks = tasks.getOrElseUpdate(offer.offerId, new ArrayBuffer[TaskInfo])
val task = createTaskInfo(submission, offer)
queuedTasks += task
logTrace(s"Using offer ${offer.offerId.getValue} to launch driver " +
submission.submissionId)
val newState = new MesosClusterSubmissionState(submission, task.getTaskId, offer.slaveId,
None, new Date(), None, getDriverFrameworkID(submission))
launchedDrivers(submission.submissionId) = newState
launchedDriversState.persist(submission.submissionId, newState)
afterLaunchCallback(submission.submissionId)
}
}
}
override def resourceOffers(driver: SchedulerDriver, offers: JList[Offer]): Unit = {
logTrace(s"Received offers from Mesos: \\n${offers.asScala.mkString("\\n")}")
val tasks = new mutable.HashMap[OfferID, ArrayBuffer[TaskInfo]]()
val currentTime = new Date()
val currentOffers = offers.asScala.map {
o => new ResourceOffer(o.getId, o.getSlaveId, o.getResourcesList)
}.toList
stateLock.synchronized {
// We first schedule all the supervised drivers that are ready to retry.
// This list will be empty if none of the drivers are marked as supervise.
val driversToRetry = pendingRetryDrivers.filter { d =>
d.retryState.get.nextRetry.before(currentTime)
}
scheduleTasks(
copyBuffer(driversToRetry),
removeFromPendingRetryDrivers,
currentOffers,
tasks)
// Then we walk through the queued drivers and try to schedule them.
scheduleTasks(
copyBuffer(queuedDrivers),
removeFromQueuedDrivers,
currentOffers,
tasks)
}
tasks.foreach { case (offerId, taskInfos) =>
driver.launchTasks(Collections.singleton(offerId), taskInfos.asJava)
}
for (o <- currentOffers if !tasks.contains(o.offerId)) {
driver.declineOffer(o.offerId)
}
}
private def copyBuffer(
buffer: ArrayBuffer[MesosDriverDescription]): ArrayBuffer[MesosDriverDescription] = {
val newBuffer = new ArrayBuffer[MesosDriverDescription](buffer.size)
buffer.copyToBuffer(newBuffer)
newBuffer
}
def getSchedulerState(): MesosClusterSchedulerState = {
stateLock.synchronized {
new MesosClusterSchedulerState(
frameworkId,
masterInfo.map(m => s"http://${m.getIp}:${m.getPort}"),
copyBuffer(queuedDrivers),
launchedDrivers.values.map(_.copy()).toList,
finishedDrivers.map(_.copy()).toList,
copyBuffer(pendingRetryDrivers))
}
}
override def offerRescinded(driver: SchedulerDriver, offerId: OfferID): Unit = {}
override def disconnected(driver: SchedulerDriver): Unit = {}
override def reregistered(driver: SchedulerDriver, masterInfo: MasterInfo): Unit = {
logInfo(s"Framework re-registered with master ${masterInfo.getId}")
}
override def slaveLost(driver: SchedulerDriver, slaveId: SlaveID): Unit = {}
override def error(driver: SchedulerDriver, error: String): Unit = {
logError("Error received: " + error)
markErr()
}
/**
* Check if the task state is a recoverable state that we can relaunch the task.
* Task state like TASK_ERROR are not relaunchable state since it wasn't able
* to be validated by Mesos.
*/
private def shouldRelaunch(state: MesosTaskState): Boolean = {
state == MesosTaskState.TASK_FAILED ||
state == MesosTaskState.TASK_KILLED ||
state == MesosTaskState.TASK_LOST
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
val taskId = status.getTaskId.getValue
stateLock.synchronized {
if (launchedDrivers.contains(taskId)) {
if (status.getReason == Reason.REASON_RECONCILIATION &&
!pendingRecover.contains(taskId)) {
// Task has already received update and no longer requires reconciliation.
return
}
val state = launchedDrivers(taskId)
// Check if the driver is supervise enabled and can be relaunched.
if (state.driverDescription.supervise && shouldRelaunch(status.getState)) {
removeFromLaunchedDrivers(taskId)
state.finishDate = Some(new Date())
val retryState: Option[MesosClusterRetryState] = state.driverDescription.retryState
val (retries, waitTimeSec) = retryState
.map { rs => (rs.retries + 1, Math.min(maxRetryWaitTime, rs.waitTime * 2)) }
.getOrElse{ (1, 1) }
val nextRetry = new Date(new Date().getTime + waitTimeSec * 1000L)
val newDriverDescription = state.driverDescription.copy(
retryState = Some(new MesosClusterRetryState(status, retries, nextRetry, waitTimeSec)))
pendingRetryDrivers += newDriverDescription
pendingRetryDriversState.persist(taskId, newDriverDescription)
} else if (TaskState.isFinished(mesosToTaskState(status.getState))) {
removeFromLaunchedDrivers(taskId)
state.finishDate = Some(new Date())
if (finishedDrivers.size >= retainedDrivers) {
val toRemove = math.max(retainedDrivers / 10, 1)
finishedDrivers.trimStart(toRemove)
}
finishedDrivers += state
}
state.mesosTaskStatus = Option(status)
} else {
logError(s"Unable to find driver $taskId in status update")
}
}
}
override def frameworkMessage(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
message: Array[Byte]): Unit = {}
override def executorLost(
driver: SchedulerDriver,
executorId: ExecutorID,
slaveId: SlaveID,
status: Int): Unit = {}
private def removeFromQueuedDrivers(id: String): Boolean = {
val index = queuedDrivers.indexWhere(_.submissionId.equals(id))
if (index != -1) {
queuedDrivers.remove(index)
queuedDriversState.expunge(id)
true
} else {
false
}
}
private def removeFromLaunchedDrivers(id: String): Boolean = {
if (launchedDrivers.remove(id).isDefined) {
launchedDriversState.expunge(id)
true
} else {
false
}
}
private def removeFromPendingRetryDrivers(id: String): Boolean = {
val index = pendingRetryDrivers.indexWhere(_.submissionId.equals(id))
if (index != -1) {
pendingRetryDrivers.remove(index)
pendingRetryDriversState.expunge(id)
true
} else {
false
}
}
def getQueuedDriversSize: Int = queuedDrivers.size
def getLaunchedDriversSize: Int = launchedDrivers.size
def getPendingRetryDriversSize: Int = pendingRetryDrivers.size
}
|
likithkailas/StreamingSystems
|
mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala
|
Scala
|
apache-2.0
| 30,007
|
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent
import java.time.LocalDateTime
import com.wegtam.tensei.agent.adt.ParserDataContainer
import net.openhft.hashing.LongHashFunction
import org.github.jamm.MemoryMeter
object ParserDataContainerTimestampMemoryUsage {
def main(args: Array[String]): Unit = {
val meter = new MemoryMeter()
val hasher = LongHashFunction.xx()
val builder = Vector.newBuilder[ParserDataContainer]
println(
s"Going to create a Vector with $SAMPLE_SIZE ParserDataContainer elements using Timestamp."
)
println(
"Each ParserDataContainer will contain data, elementId, dfasdlId, sequenceRowCounter and dataElementHash."
)
val baseTime = LocalDateTime.now()
for (cnt <- 1L to SAMPLE_SIZE) {
builder += ParserDataContainer(
data = java.sql.Timestamp.valueOf(baseTime.plusMinutes(cnt)),
elementId = "AN-ID",
dfasdlId = Option("DFASDL-ID"),
sequenceRowCounter = cnt,
dataElementHash = Option(hasher.hashBytes(s"Something-$cnt".getBytes("UTF-8")))
)
if (cnt % 250000 == 0)
println(s"\\tCreated $cnt elements.")
}
println("Calling .result() on VectorBuilder.")
val r = builder.result()
println("Checking length of generated collection.")
require(r.length.toLong == SAMPLE_SIZE)
println("Measuring memory usage. This may take a while.")
val bytes = meter.measureDeep(r)
val mBytes = "%.2f".format(bytes.toDouble / 1024 / 1024)
println(s"Vector allocates $bytes bytes ($mBytes MB).")
}
}
|
Tensei-Data/tensei-agent
|
benchmarks/src/main/scala/com/wegtam/tensei/agent/ParserDataContainerTimestampMemoryUsage.scala
|
Scala
|
agpl-3.0
| 2,298
|
package gapt.provers.spass
import gapt.examples.CountingEquivalence
import gapt.expr._
import gapt.expr.formula.Bottom
import gapt.expr.formula.Top
import gapt.expr.formula.fol.{ naive, thresholds }
import gapt.proofs.context.mutable.MutableContext
import gapt.proofs.resolution.{ AvatarComponent, AvatarNegNonGroundComp, ResolutionToLKProof }
import gapt.proofs.{ Clause, HOLSequent, Sequent, SequentMatchers }
import gapt.utils.SatMatchers
import org.specs2.mutable.Specification
class SpassTest extends Specification with SequentMatchers with SatMatchers {
args( skipAll = !SPASS.isInstalled )
"SPASS" should {
"prove identity" in {
val s = Sequent() :+ hof"k=k"
SPASS.getLKProof( s ) must beLike {
case Some( p ) => p.endSequent must beMultiSetEqual( s )
}
}
"prove { A or B :- -(-A and -B) }" in {
val s = hof"A | B" +: Sequent() :+ hof"-(-A & -B)"
SPASS.getLKProof( s ) must beLike {
case Some( p ) => p.endSequent must beMultiSetEqual( s )
}
}
"handle quantified antecedents" in {
val seq = hof"!x 0+x=x" +: hof"!x!y s(x)+y=s(x+y)" +: Sequent() :+ hof"s(0)+s(s(0)) = s(s(s(0)))"
SPASS.getLKProof( seq ) must beLike {
case Some( p ) => p.endSequent must beMultiSetEqual( seq )
}
}
"prove top" in { SPASS.getLKProof( HOLSequent( Seq(), Seq( Top() ) ) ) must beSome }
"not prove bottom" in { SPASS.getLKProof( HOLSequent( Seq(), Seq( Bottom() ) ) ) must beNone }
"not refute top" in { SPASS.getLKProof( HOLSequent( Seq( Top() ), Seq() ) ) must beNone }
"refute bottom" in { SPASS.getLKProof( HOLSequent( Seq( Bottom() ), Seq() ) ) must beSome }
"ground sequents" in {
val seq = hof"x=y" +: Sequent() :+ hof"y=x"
SPASS.getLKProof( seq ) must beLike {
case Some( p ) => p.endSequent must beMultiSetEqual( seq )
}
}
"treat variables in sequents as constants" in {
val seq = hof"P(x)" +: Sequent() :+ hof"P(c)"
SPASS getExpansionProof seq must beNone
}
"handle weird sequents" in {
val cnf = Set( Clause(), hoa"a" +: Clause() )
SPASS.getResolutionProof( cnf ) must beSome
}
"large cnf" in {
SPASS getExpansionProof CountingEquivalence( 3 ) must beLike { case Some( p ) => p.deep must beValidSequent }
}
"bug with quantified splitting" in {
SPASS getExpansionProof CountingEquivalence( 2 ) must beLike { case Some( p ) => p.deep must beValidSequent }
}
"bug with ground parts in quantified splits" in {
val Some( res ) = SPASS.getResolutionProof( CountingEquivalence( 1 ) )
res.subProofs.collect { case AvatarComponent( c: AvatarNegNonGroundComp ) => c } must not( beEmpty )
ResolutionToLKProof( res )
ok
}
"splitting definitions" in {
val formula = CountingEquivalence( 2 )
implicit val ctx: MutableContext = MutableContext.guess( formula )
val Some( proof ) = SPASS.getResolutionProof( formula )
ctx.check( proof )
ok
}
}
}
|
gapt/gapt
|
tests/src/test/scala/gapt/provers/spass/SpassTest.scala
|
Scala
|
gpl-3.0
| 3,035
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.util.{HashMap => JavaHashMap}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.metric.LongSQLMetric
import org.apache.spark.util.collection.CompactBuffer
@DeveloperApi
trait HashOuterJoin {
self: SparkPlan =>
val leftKeys: Seq[Expression]
val rightKeys: Seq[Expression]
val joinType: JoinType
val condition: Option[Expression]
val left: SparkPlan
val right: SparkPlan
override def output: Seq[Attribute] = {
joinType match {
case LeftOuter =>
left.output ++ right.output.map(_.withNullability(true))
case RightOuter =>
left.output.map(_.withNullability(true)) ++ right.output
case FullOuter =>
left.output.map(_.withNullability(true)) ++ right.output.map(_.withNullability(true))
case x =>
throw new IllegalArgumentException(s"HashOuterJoin should not take $x as the JoinType")
}
}
protected[this] lazy val (buildPlan, streamedPlan) = joinType match {
case RightOuter => (left, right)
case LeftOuter => (right, left)
case x =>
throw new IllegalArgumentException(
s"HashOuterJoin should not take $x as the JoinType")
}
protected[this] lazy val (buildKeys, streamedKeys) = joinType match {
case RightOuter => (leftKeys, rightKeys)
case LeftOuter => (rightKeys, leftKeys)
case x =>
throw new IllegalArgumentException(
s"HashOuterJoin should not take $x as the JoinType")
}
protected[this] def isUnsafeMode: Boolean = {
(self.codegenEnabled && self.unsafeEnabled && joinType != FullOuter
&& UnsafeProjection.canSupport(buildKeys)
&& UnsafeProjection.canSupport(self.schema))
}
override def outputsUnsafeRows: Boolean = isUnsafeMode
override def canProcessUnsafeRows: Boolean = isUnsafeMode
override def canProcessSafeRows: Boolean = !isUnsafeMode
protected def buildKeyGenerator: Projection =
if (isUnsafeMode) {
UnsafeProjection.create(buildKeys, buildPlan.output)
} else {
newMutableProjection(buildKeys, buildPlan.output)()
}
protected[this] def streamedKeyGenerator: Projection = {
if (isUnsafeMode) {
UnsafeProjection.create(streamedKeys, streamedPlan.output)
} else {
newProjection(streamedKeys, streamedPlan.output)
}
}
protected[this] def resultProjection: InternalRow => InternalRow = {
if (isUnsafeMode) {
UnsafeProjection.create(self.schema)
} else {
identity[InternalRow]
}
}
@transient private[this] lazy val DUMMY_LIST = CompactBuffer[InternalRow](null)
@transient protected[this] lazy val EMPTY_LIST = CompactBuffer[InternalRow]()
@transient private[this] lazy val leftNullRow = new GenericInternalRow(left.output.length)
@transient private[this] lazy val rightNullRow = new GenericInternalRow(right.output.length)
@transient private[this] lazy val boundCondition =
newPredicate(condition.getOrElse(Literal(true)), left.output ++ right.output)
// TODO we need to rewrite all of the iterators with our own implementation instead of the Scala
// iterator for performance purpose.
protected[this] def leftOuterIterator(
key: InternalRow,
joinedRow: JoinedRow,
rightIter: Iterable[InternalRow],
resultProjection: InternalRow => InternalRow,
numOutputRows: LongSQLMetric): Iterator[InternalRow] = {
val ret: Iterable[InternalRow] = {
if (!key.anyNull) {
val temp = if (rightIter != null) {
rightIter.collect {
case r if boundCondition(joinedRow.withRight(r)) => {
numOutputRows += 1
resultProjection(joinedRow).copy()
}
}
} else {
List.empty
}
if (temp.isEmpty) {
numOutputRows += 1
resultProjection(joinedRow.withRight(rightNullRow)) :: Nil
} else {
temp
}
} else {
numOutputRows += 1
resultProjection(joinedRow.withRight(rightNullRow)) :: Nil
}
}
ret.iterator
}
protected[this] def rightOuterIterator(
key: InternalRow,
leftIter: Iterable[InternalRow],
joinedRow: JoinedRow,
resultProjection: InternalRow => InternalRow,
numOutputRows: LongSQLMetric): Iterator[InternalRow] = {
val ret: Iterable[InternalRow] = {
if (!key.anyNull) {
val temp = if (leftIter != null) {
leftIter.collect {
case l if boundCondition(joinedRow.withLeft(l)) => {
numOutputRows += 1
resultProjection(joinedRow).copy()
}
}
} else {
List.empty
}
if (temp.isEmpty) {
numOutputRows += 1
resultProjection(joinedRow.withLeft(leftNullRow)) :: Nil
} else {
temp
}
} else {
numOutputRows += 1
resultProjection(joinedRow.withLeft(leftNullRow)) :: Nil
}
}
ret.iterator
}
protected[this] def fullOuterIterator(
key: InternalRow, leftIter: Iterable[InternalRow], rightIter: Iterable[InternalRow],
joinedRow: JoinedRow, numOutputRows: LongSQLMetric): Iterator[InternalRow] = {
if (!key.anyNull) {
// Store the positions of records in right, if one of its associated row satisfy
// the join condition.
//如果其关联行之一满足连接条件,则将记录的位置存储在右侧
val rightMatchedSet = scala.collection.mutable.Set[Int]()
leftIter.iterator.flatMap[InternalRow] { l =>
joinedRow.withLeft(l)
var matched = false
rightIter.zipWithIndex.collect {
// 1. For those matched (satisfy the join condition) records with both sides filled,
// append them directly
//对于那些匹配(满足连接条件)记录并填充两边的记录,直接附加它们
case (r, idx) if boundCondition(joinedRow.withRight(r)) =>
numOutputRows += 1
matched = true
// if the row satisfy the join condition, add its index into the matched set
//如果行满足连接条件,则将其索引添加到匹配的集合中
rightMatchedSet.add(idx)
joinedRow.copy()
} ++ DUMMY_LIST.filter(_ => !matched).map( _ => {
// 2. For those unmatched records in left, append additional records with empty right.
//对于左边那些不匹配的记录,添加空白右边的附加记录
// DUMMY_LIST.filter(_ => !matched) is a tricky way to add additional row,
// as we don't know whether we need to append it until finish iterating all
// of the records in right side.
// If we didn't get any proper row, then append a single row with empty right.
numOutputRows += 1
joinedRow.withRight(rightNullRow).copy()
})
} ++ rightIter.zipWithIndex.collect {
// 3. For those unmatched records in right, append additional records with empty left.
//对于右边那些不匹配的记录,附加空左边的附加记录。
// Re-visiting the records in right, and append additional row with empty left, if its not
// in the matched set.
case (r, idx) if !rightMatchedSet.contains(idx) =>
numOutputRows += 1
joinedRow(leftNullRow, r).copy()
}
} else {
leftIter.iterator.map[InternalRow] { l =>
numOutputRows += 1
joinedRow(l, rightNullRow).copy()
} ++ rightIter.iterator.map[InternalRow] { r =>
numOutputRows += 1
joinedRow(leftNullRow, r).copy()
}
}
}
// This is only used by FullOuter
//这仅供FullOuter使用
protected[this] def buildHashTable(
iter: Iterator[InternalRow],
numIterRows: LongSQLMetric,
keyGenerator: Projection): JavaHashMap[InternalRow, CompactBuffer[InternalRow]] = {
val hashTable = new JavaHashMap[InternalRow, CompactBuffer[InternalRow]]()
while (iter.hasNext) {
val currentRow = iter.next()
numIterRows += 1
val rowKey = keyGenerator(currentRow)
var existingMatchList = hashTable.get(rowKey)
if (existingMatchList == null) {
existingMatchList = new CompactBuffer[InternalRow]()
hashTable.put(rowKey.copy(), existingMatchList)
}
existingMatchList += currentRow.copy()
}
hashTable
}
}
|
tophua/spark1.52
|
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashOuterJoin.scala
|
Scala
|
apache-2.0
| 9,414
|
package com.twitter.algebird.statistics
import com.twitter.algebird.CheckProperties
import org.scalacheck.Arbitrary
import org.scalacheck.Gen._
import org.scalatest.{ Matchers, _ }
class StatisticsRingLaws extends CheckProperties with Matchers {
import com.twitter.algebird.BaseProperties._
val statsRing = new StatisticsRing[Int]
val gen = for (v <- choose(0, 1 << 30)) yield v
property("StatisticsRing is a Ring") {
ringLaws[Int](statsRing, Arbitrary(gen))
}
}
class StatisticsMonoidLaws extends CheckProperties with Matchers {
import com.twitter.algebird.BaseProperties._
val statsMonoid = new StatisticsMonoid[Int]
val gen = for (v <- choose(0, 1 << 14)) yield v
property("StatisticsMonoid is a Monoid") {
monoidLaws[Int](statsMonoid, Arbitrary(gen))
}
}
class StatisticsTest extends WordSpec with Matchers {
// the test framework garbles the exceptions :/
lazy val statsMonoid = new StatisticsMonoid[Int]
try {
for (i <- 1 to 2) statsMonoid.zero
for (i <- 1 to 3) statsMonoid.plus(i, i)
for (i <- 1 to 3000) statsMonoid.sum(for (v <- 1 to i) yield v)
for (i <- 1 to 2000) statsMonoid.sumOption(for (v <- 1 to i) yield v)
} catch {
case e: Exception => {
e.printStackTrace()
throw e
}
}
"StatisticsMonoid" should {
"count zero calls" in {
assert(statsMonoid.getZeroCallCount == 2)
}
"count plus calls" in {
assert(statsMonoid.getPlusCallCount == 3)
}
"count sum calls" in {
assert(statsMonoid.getSumCallCount == 3000)
assert(statsMonoid.getSumCallTime > 0L)
statsMonoid.toString.contains("sum calls: <1: 0, <2: 1, <4: 2, <8: 4, <16: 8, <32: 16, <64: 32, <128: 64, <256: 128, <512: 256, >: 2489, avg=1500.5 count=3000")
}
"count sumOption calls" in {
assert(statsMonoid.getSumOptionCallCount == 2000)
assert(statsMonoid.getSumOptionCallTime > 0L)
statsMonoid.toString.contains("sumOption calls: <1: 0, <2: 1, <4: 2, <8: 4, <16: 8, <32: 16, <64: 32, <128: 64, <256: 128, <512: 256, >: 1489, avg=1000.5 count=2000")
}
}
}
|
nvoron23/algebird
|
algebird-test/src/test/scala/com/twitter/algebird/statistics/StatisticsTests.scala
|
Scala
|
apache-2.0
| 2,100
|
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Mark Harrah
*/
package sbt
import jline.{ConsoleReader, History}
import java.io.{File, InputStream, PrintWriter}
import complete.Parser
import java.util.concurrent.atomic.AtomicBoolean
abstract class JLine extends LineReader
{
protected[this] val handleCONT: Boolean
protected[this] val reader: ConsoleReader
/** Is the input stream at EOF? Compensates for absent EOF detection in JLine's UnsupportedTerminal. */
protected[this] val inputEof = new AtomicBoolean(false)
protected[this] val historyPath: Option[File]
def readLine(prompt: String, mask: Option[Char] = None) = JLine.withJLine { unsynchronizedReadLine(prompt, mask) }
private[this] def unsynchronizedReadLine(prompt: String, mask: Option[Char]) =
readLineWithHistory(prompt, mask) match
{
case null => None
case x => Some(x.trim)
}
private[this] def readLineWithHistory(prompt: String, mask: Option[Char]): String =
historyPath match
{
case None => readLineDirect(prompt, mask)
case Some(file) =>
val h = reader.getHistory
JLine.loadHistory(h, file)
try { readLineDirect(prompt, mask) }
finally { JLine.saveHistory(h, file) }
}
private[this] def readLineDirect(prompt: String, mask: Option[Char]): String =
if(handleCONT)
Signals.withHandler(() => resume(), signal = Signals.CONT)( () => readLineDirectRaw(prompt, mask) )
else
readLineDirectRaw(prompt, mask)
private[this] def readLineDirectRaw(prompt: String, mask: Option[Char]): String =
{
val newprompt = handleMultilinePrompt(prompt)
val line = mask match {
case Some(m) => reader.readLine(newprompt, m)
case None => reader.readLine(newprompt)
}
if (inputEof.get) null else line
}
private[this] def handleMultilinePrompt(prompt: String): String = {
val lines = """\\r?\\n""".r.split(prompt)
lines.size match {
case 0 | 1 => prompt
case _ => reader.printString(lines.init.mkString("\\n") + "\\n"); lines.last;
}
}
private[this] def resume()
{
jline.Terminal.resetTerminal
JLine.terminal.disableEcho()
reader.drawLine()
reader.flushConsole()
}
}
private object JLine
{
// When calling this, ensure that enableEcho has been or will be called.
// getTerminal will initialize the terminal to disable echo.
private def terminal = jline.Terminal.getTerminal
private def withTerminal[T](f: jline.Terminal => T): T =
synchronized
{
val t = terminal
t.synchronized { f(t) }
}
/** For accessing the JLine Terminal object.
* This ensures synchronized access as well as re-enabling echo after getting the Terminal. */
def usingTerminal[T](f: jline.Terminal => T): T =
withTerminal { t =>
t.enableEcho()
f(t)
}
def createReader() =
usingTerminal { t =>
val cr = new ConsoleReader
cr.setBellEnabled(false)
cr
}
def withJLine[T](action: => T): T =
withTerminal { t =>
t.disableEcho()
try { action }
finally { t.enableEcho() }
}
private[sbt] def loadHistory(h: History, file: File)
{
h.setMaxSize(MaxHistorySize)
if(file.isFile) IO.reader(file)( h.load )
}
private[sbt] def saveHistory(h: History, file: File): Unit =
Using.fileWriter()(file) { writer =>
val out = new PrintWriter(writer, false)
h.setOutput(out)
h.flushBuffer()
out.close()
h.setOutput(null)
}
def simple(historyPath: Option[File], handleCONT: Boolean = HandleCONT): SimpleReader = new SimpleReader(historyPath, handleCONT)
val MaxHistorySize = 500
val HandleCONT = !java.lang.Boolean.getBoolean("sbt.disable.cont") && Signals.supported(Signals.CONT)
}
trait LineReader
{
def readLine(prompt: String, mask: Option[Char] = None): Option[String]
}
final class FullReader(val historyPath: Option[File], complete: Parser[_], val handleCONT: Boolean = JLine.HandleCONT) extends JLine
{
protected[this] val reader =
{
val cr = new ConsoleReader
if (!cr.getTerminal.isSupported) {
val input = cr.getInput
cr.setInput(new InputStream {
def read(): Int = {
val c = input.read()
if (c == -1) inputEof.set(true)
c
}
})
}
cr.setBellEnabled(false)
sbt.complete.JLineCompletion.installCustomCompletor(cr, complete)
cr
}
}
class SimpleReader private[sbt] (val historyPath: Option[File], val handleCONT: Boolean) extends JLine
{
protected[this] val reader = JLine.createReader()
}
object SimpleReader extends SimpleReader(None, JLine.HandleCONT)
|
olove/xsbt
|
util/complete/src/main/scala/sbt/LineReader.scala
|
Scala
|
bsd-3-clause
| 4,401
|
package org.jetbrains.plugins.scala.lang.completion.postfix.templates
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.completion.postfix.templates.selector.{SelectorConditions, AncestorSelector}
import org.jetbrains.plugins.scala.lang.completion.postfix.templates.selector.SelectorType._
/**
* @author Roman.Shein
* @since 13.09.2015.
*/
class ScalaPrintlnPostfixTemplate(val alias: String = "sout") extends ScalaStringBasedPostfixTemplate(alias, "println(expr)",
new AncestorSelector(SelectorConditions.ANY_EXPR, Topmost)) {
override def getTemplateString(element: PsiElement): String = "println($expr$)"
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/completion/postfix/templates/ScalaPrintlnPostfixTemplate.scala
|
Scala
|
apache-2.0
| 645
|
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.google.protobuf.ByteString
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
class SubstrSpec extends FlatSpec with Matchers {
"Substr operation" should "works correctly" in {
import com.intel.analytics.bigdl.utils.tf.TFTensorNumeric.NumericByteString
val data = Tensor.scalar(ByteString.copyFromUtf8("abc"))
val pos = Tensor.scalar(0)
val len = Tensor.scalar(2)
val expectOutput = Tensor.scalar(ByteString.copyFromUtf8("ab"))
val output = Substr().forward(T(data, pos, len))
output should be(expectOutput)
}
}
class SubstrSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
import com.intel.analytics.bigdl.utils.tf.TFTensorNumeric.NumericByteString
val subStr = Substr[Float]().setName("subStr")
val input = T(Tensor.scalar[ByteString](ByteString.copyFromUtf8("HelloBigDL")),
Tensor.scalar[Int](0), Tensor.scalar[Int](5))
runSerializationTest(subStr, input)
}
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/SubstrSpec.scala
|
Scala
|
apache-2.0
| 1,757
|
package chess
final case class LagTracker(
quotaGain: Centis,
quota: Centis,
quotaMax: Centis,
lagEstimator: DecayingRecorder,
uncompStats: Stats = Stats.empty,
lagStats: Stats = Stats.empty,
// We can remove compEst fields after tuning estimate.
compEstSqErr: Int = 0,
compEstOvers: Centis = Centis(0),
compEstimate: Option[Centis] = None
) {
def onMove(lag: Centis) = {
val comp = lag atMost quota
val uncomped = lag - comp
val ceDiff = compEstimate.getOrElse(Centis(1)) - comp
(
comp,
copy(
quota = (quota + quotaGain - comp) atMost quotaMax,
uncompStats = {
// start recording after first uncomp.
if (uncomped == Centis(0) && uncompStats.samples == 0) uncompStats
else uncompStats record uncomped.centis.toFloat
},
lagStats = lagStats record (lag atMost Centis(2000)).centis.toFloat,
compEstSqErr = compEstSqErr + ceDiff.centis * ceDiff.centis,
compEstOvers = compEstOvers + ceDiff.nonNeg
).recordLag(lag)
)
}
def recordLag(lag: Centis) = {
val e = lagEstimator.record((lag atMost quotaMax).centis.toFloat)
copy(
lagEstimator = e,
compEstimate = Option(Centis(e.mean - .8f * e.deviation).nonNeg atMost quota)
)
}
def moves = lagStats.samples
def lagMean: Option[Centis] = moves > 0 option Centis(lagStats.mean)
def compEstStdErr: Option[Float] =
moves > 2 option Math.sqrt(compEstSqErr).toFloat / (moves - 2)
def compAvg: Option[Centis] = totalComp / moves
def totalComp: Centis = totalLag - totalUncomped
def totalLag: Centis = Centis(lagStats.total)
def totalUncomped: Centis = Centis(uncompStats.total)
def withFrameLag(frameLag: Centis, clock: Clock.Config) = copy(
quotaGain = LagTracker.maxQuotaGainFor(clock).atMost {
frameLag + LagTracker.estimatedCpuLag
}
)
}
object LagTracker {
private val estimatedCpuLag = Centis(5)
def maxQuotaGainFor(config: Clock.Config) = Centis(config.estimateTotalSeconds match {
case i if i >= 180 => 100
case i if i <= 15 => 20
case i if i <= 30 => 35
case i => i / 4 + 30
})
def init(config: Clock.Config) = {
val quotaGain = maxQuotaGainFor(config)
LagTracker(
quotaGain = quotaGain,
quota = quotaGain * 3,
quotaMax = quotaGain * 7,
lagEstimator = EmptyDecayingStats(deviation = 4f, decay = 0.85f)
)
}
}
|
ornicar/scalachess
|
src/main/scala/LagTracker.scala
|
Scala
|
mit
| 2,471
|
package com.sksamuel.avro4s.schema
import com.sksamuel.avro4s.AvroSchema
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class NamespaceSchemaTest extends AnyFunSuite with Matchers {
test("use package name for top level class") {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_class_namespace.json"))
val schema = AvroSchema[Tau]
schema.toString(true) shouldBe expected.toString(true)
}
test("use package name without .package for classes defined in the package object") {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_package_object_namespace.json"))
val schema = AvroSchema[Sigma]
schema.toString(true) shouldBe expected.toString(true)
}
test("use namespace of object for classes defined inside an object") {
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/top_level_object_namespace.json"))
val schema = AvroSchema[A]
schema.toString(true) shouldBe expected.toString(true)
}
test("local classes should use the namespace of their parent object package") {
case class NamespaceTestFoo(inner: String)
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/local_class_namespace.json"))
val schema = AvroSchema[NamespaceTestFoo]
schema.toString(true) shouldBe expected.toString(true)
}
}
case class Tau(a: String, b: Boolean)
case class A(inner: A.Inner)
object A {
final case class Inner(s: String)
}
|
sksamuel/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/schema/NamespaceSchemaTest.scala
|
Scala
|
apache-2.0
| 1,596
|
package org.jetbrains.plugins.scala
package codeInspection.recursion
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{RecursionType, ScAnnotationsHolder, ScFunctionDefinition}
/**
* Pavel Fatin
*/
class NoTailRecursionAnnotationInspection extends AbstractInspection("No tail recursion annotation") {
def actionFor(holder: ProblemsHolder) = {
case f: ScFunctionDefinition if f.canBeTailRecursive && !f.hasTailRecursionAnnotation &&
f.recursionType == RecursionType.TailRecursion =>
holder.registerProblem(f.nameId, getDisplayName, new AddAnnotationQuickFix(f))
}
class AddAnnotationQuickFix(holder: ScAnnotationsHolder)
extends AbstractFixOnPsiElement("Add @tailrec annotation", holder) {
def doApplyFix(project: Project) {
getElement.addAnnotation("scala.annotation.tailrec")
}
}
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/recursion/NoTailRecursionAnnotationInspection.scala
|
Scala
|
apache-2.0
| 1,043
|
package org.apache.spark.ml.classification
import breeze.linalg.{Vector => BV}
import org.apache.spark.SparkException
import org.apache.spark.ml.Estimator
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.util.{Identifiable, SchemaUtils}
import org.apache.spark.ml.{Estimator, Model, Predictor}
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{ArrayType, DoubleType, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.mllib.rdd.MLPairRDDFunctions._
import org.apache.spark.mllib.knn.KNNUtils
import org.apache.spark.ml.Predictor
import org.apache.spark.ml.util.MetadataUtils
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.annotation.Since
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.ml.clustering.KMeans
import scala.collection.mutable.{ArrayBuffer, LinkedHashMap, PriorityQueue}
import scala.collection.{Map => MAP}
import scala.collection.mutable.{Map => muMap}
class KNN(override val uid: String) extends Predictor[Vector, KNN, KNNModel] {
def this() = this(Identifiable.randomUID("knn"))
override def copy(extra: ParamMap): KNN = defaultCopy(extra)
var clusterPoints = muMap[Vector, ArrayBuffer[VectorWithNorm]]()
var clusterRadius = muMap[Vector, Double]()
var clusterCenters: Array[Vector] = null
var k: Int = 1
def setK(value: Int): this.type = {
k = value
this
}
override def train(dataset: DataFrame): KNNModel = {
val instances = extractLabeledPoints(dataset).map {
case LabeledPoint(label: Double, features: Vector) => (label, features)
}
val handlePersistence = dataset.rdd.getStorageLevel == StorageLevel.NONE
if (handlePersistence) instances.persist(StorageLevel.MEMORY_AND_DISK)
val labelSchema = dataset.schema("label")
val computeNumClasses: () => Int = () => {
val Row(maxLabelIndex: Double) = dataset.agg(max(col("label").cast(DoubleType))).head()
maxLabelIndex.toInt + 1
}
val numClasses = MetadataUtils.getNumClasses(labelSchema).fold(computeNumClasses())(identity)
val points = instances.map{
case (label, features) => (label, new VectorWithNorm(features))
}
var kVal = Math.min(50, Math.sqrt(dataset.count()/2).toInt)
if(kVal>1)
{
val kMeans = new KMeans().setK(kVal)
val kMeansModel = kMeans.fit(dataset)
clusterCenters = kMeansModel.clusterCenters
for(i <- 0 to clusterCenters.size-1) {
clusterPoints.put(clusterCenters(i), new ArrayBuffer[VectorWithNorm]())
clusterRadius.put(clusterCenters(i), Double.MaxValue)
}
}
else {
val kMeans = new KMeans().setK(2)
val kMeansModel = kMeans.fit(dataset)
clusterCenters = kMeansModel.clusterCenters
clusterPoints.put(clusterCenters(0), new ArrayBuffer[VectorWithNorm]())
clusterRadius.put(clusterCenters(0), Double.MaxValue)
}
// Map every point to thier respective cluster centers
val intermediateRDD = points.map {
case (label, point) =>
var dist:Double = Double.MaxValue
var curr_dist:Double = 0.0
var curr_center:Vector = null
for(value <- clusterCenters) {
curr_dist = point.fastSquaredDistance(new VectorWithNorm(value))
if(dist > curr_dist) {
dist = curr_dist
curr_center = value
}
}
((point, label), (curr_center, dist))
}
val clusterPointsRDD = intermediateRDD.map {
case (pointWithLabel, (curr_center, dist)) =>
(curr_center, pointWithLabel)
}.groupByKey()
val clusterRadiusRDD = intermediateRDD.map {
case (point, value) =>
value
}.topByKey(1)
.map{
case (curr_center, distArray) =>
(curr_center, distArray(0))
}
new KNNModel(uid, k, numClasses, clusterCenters, clusterPointsRDD.asInstanceOf[RDD[(Vector, MAP[VectorWithNorm, Double])]], clusterRadiusRDD)
}
}
class KNNModel(override val uid: String,
val k: Int,
val _numClasses: Int,
val clusterCenters: Array[Vector],
val clusterPointsRDD: RDD[(Vector, MAP[VectorWithNorm, Double])],
val clusterRadiusRDD: RDD[(Vector, Double)])
extends ProbabilisticClassificationModel[Vector, KNNModel] {
override def numClasses: Int = _numClasses
override def transform(dataset: DataFrame): DataFrame = {
val clusterPoints : MAP[Vector, MAP[VectorWithNorm, Double]] = clusterPointsRDD.collectAsMap()
val clusterRadius : MAP[Vector, Double] = clusterRadiusRDD.collectAsMap()
var inputPointsWithClusters = muMap[VectorWithNorm, ArrayBuffer[Vector]]()
val features = dataset.select($(featuresCol))
.map {
r => new VectorWithNorm(r.getAs[Vector](0))
}
val merged = features.zipWithUniqueId().map {
case (point, i) =>
var dist:Double = Double.MaxValue
var curr_dist:Double = 0.0
var center:Vector = null
var maxRadius:Double = 0.0
for(value <- clusterCenters) {
curr_dist = point.fastSquaredDistance(new VectorWithNorm(value))
maxRadius = clusterRadius.get(value).get
if(dist > (curr_dist + maxRadius)) {
dist = curr_dist + maxRadius
center = value
}
}
val radius = clusterRadius.get(center).get
var clustersMatched = ArrayBuffer[Vector]()
inputPointsWithClusters.put(point, new ArrayBuffer[Vector]())
if(curr_dist > clusterRadius.get(center).get)
for((k, v) <- clusterRadius) {
if(dist + radius >= point.fastSquaredDistance(new VectorWithNorm(k)) - v)
clustersMatched += k
}
class HeapData(var dist: Double, var label: Double) extends Ordered[HeapData] {
def compare(that: HeapData): Int = (this.dist) compare (that.dist)
}
val orderingHeap: Ordering[HeapData] = Ordering.by(e => e.dist)
val Q = PriorityQueue[HeapData]()
for (eachCenter <- clustersMatched) {
for((eachClusterPoint, label) <- clusterPoints.get(eachCenter).get) {
val dist = point.fastSquaredDistance(eachClusterPoint)
var hd = new HeapData(dist , label)
if(Q.size < k) {
Q += hd
}
else if (dist < Q.head.dist) {
Q.dequeue()
Q += hd
}
}
}
var predictMap = muMap[Double, Int]()
for(i <- 0 to Q.size-1) {
var temp = Q.dequeue()
if(predictMap.contains(temp.label)) {
predictMap.put(temp.label, predictMap.get(temp.label).get + 1)
}
else {
predictMap.put(temp.label, 1)
}
}
var count: Int = Int.MinValue
var predictedLabel:Double = 0.0
for((key, value) <- predictMap) {
if(count < value) {
count = value
predictedLabel = key
}
}
var vector = new Array[Double](k)
for(i <- 0 to Q.size-1) {
vector(i) += Q.dequeue().label
}
val values = new ArrayBuffer[Any]
val rawPrediction = Vectors.dense(vector)
lazy val probability = raw2probability(rawPrediction)
lazy val prediction = predictedLabel
if ($(predictionCol).nonEmpty) {
values.append(prediction)
}
(i, values.toSeq)
}
dataset.sqlContext.createDataFrame(
dataset.rdd.zipWithUniqueId().map { case (row, i) => (i, row) }
.join(merged)
.map {
case (i, (row, values)) => Row.fromSeq(row.toSeq ++ values)
},
transformSchema(dataset.schema)
)
}
override def transformSchema(schema: StructType): StructType = {
var transformed = schema
if ($(predictionCol).nonEmpty) {
transformed = SchemaUtils.appendColumn(transformed, $(predictionCol), DoubleType)
}
transformed
}
override def copy(extra: ParamMap): KNNModel = {
val copied = new KNNModel(uid, k, numClasses, clusterCenters, clusterPointsRDD, clusterRadiusRDD)
copyValues(copied, extra).setParent(parent)
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
var i = 0
val size = dv.size
var sum = 0.0
while (i < size) {
sum += dv.values(i)
i += 1
}
i = 0
while (i < size) {
dv.values(i) /= sum
i += 1
}
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in KNNClassificationModel:" +
" raw2probabilitiesInPlace encountered SparseVector")
}
}
override protected def predictRaw(features: Vector): Vector = {
throw new SparkException("predictRaw function should not be called directly since kNN prediction is done in distributed fashion. Use transform instead.")
}
}
/**
* VectorWithNorm can use more efficient algorithm to calculate distance
*/
case class VectorWithNorm(vector: Vector, norm: Double) {
def this(vector: Vector) = this(vector, Vectors.norm(vector, 2))
def this(vector: BV[Double]) = this(Vectors.fromBreeze(vector))
def fastSquaredDistance(v: VectorWithNorm): Double = {
KNNUtils.fastSquaredDistance(vector, norm, v.vector, v.norm)
}
def fastDistance(v: VectorWithNorm): Double = math.sqrt(fastSquaredDistance(v))
}
/**
* VectorWithNorm plus auxiliary row information
*/
case class RowWithVector(vector: VectorWithNorm, row: Row) {
def this(vector: Vector, row: Row) = this(new VectorWithNorm(vector), row)
}
|
arun4av/Spark_KNN
|
spark-knn/src/main/scala/org/apache/spark/ml/classification/KNN.scala
|
Scala
|
apache-2.0
| 9,975
|
package geotrellis.test.singleband.file
import geotrellis.config.Dataset
import geotrellis.raster.Tile
import geotrellis.spark._
import geotrellis.spark.io._
import geotrellis.test.FileTest
import geotrellis.test.singleband.load.S3Load
import geotrellis.vector.ProjectedExtent
import geotrellis.util.S3Support
import org.apache.spark.SparkContext
abstract class S3IngestTest(dataset: Dataset) extends FileTest[ProjectedExtent, SpatialKey, Tile](dataset) with S3Support with S3Load
object S3IngestTest {
def apply(implicit dataset: Dataset, _sc: SparkContext) = new S3IngestTest(dataset) {
@transient implicit val sc = _sc
}
}
|
geotrellis/geotrellis-integration-tests-tool
|
src/main/scala/geotrellis/test/singleband/file/S3IngestTest.scala
|
Scala
|
apache-2.0
| 638
|
object Solution {
def movs(arr:Array[Long]) = {
def go(mid:Int,lefts:Long,rights:Long,mov:Int,lo:Int,hi:Int,le:Int):Int = {
if(lo > hi) return mov
if(lefts < rights && mid < hi) {
if(le == -1) return mov
go(mid+1,lefts+arr(mid),rights-arr(mid),mov,lo,hi,1)
} else if(lefts > rights && mid > 0) {
if(le == 1) return mov
go(mid-1,lefts-arr(mid-1),rights+arr(mid-1),mov,lo,hi,-1)
} else if(lefts == rights) {
val newhil = mid - 1
val newmidl = (lo + newhil) / 2
val newrightsl = (for(i <- newmidl to newhil) yield arr(i)).sum
val newleftsl = lefts - newrightsl
val msleft = go(newmidl,newleftsl,newrightsl,mov+1,lo,newhil,0)
val newlor = mid+1
val newmidr = (newlor + hi) / 2
val newrightsr = (for(i <- newmidr to hi) yield arr(i)).sum
val newleftsr = rights - newrightsr
val msright = go(newmidr,newleftsr,newrightsr,mov+1,newlor,hi,0)
math.max(msleft,msright)
} else mov
}
val mid = arr.length / 2
val lefts = (for(i <- 0 until mid) yield arr(i)).sum
val rights = (for(i <- mid until arr.length) yield arr(i)).sum
go(mid,lefts,rights,0, 0,arr.length-1,0)
}
def main(args:Array[String]) = {
val it = io.Source.stdin.getLines
val n = it.next.toInt
for(_ <- 0 until n) {
val sz = it.next.toInt
val arr = it.next.split(" ").map(_.toLong)
if(arr.sum > 0)
println(movs(arr))
else println(arr.length - 1)
}
}
}
|
marcos-sb/hacker-rank
|
algorithms/dynamic-programming/nikita-and-the-game/Solution.scala
|
Scala
|
apache-2.0
| 1,534
|
package main.scala.projectEulerScala
import scala.collection.mutable
// 4782
object P25_1000DigitFibonacciNumber {
var memo = mutable.Map[Int, BigInt]()
def f(n: Int): BigInt = {
if (n <= 2)
return 1
else if (memo.contains(n))
return memo(n)
val result: BigInt = f(n - 1) + f(n - 2)
memo(n) = result
result
}
def main(args: Array[String]) {
var len: Int = 0
var n: Int = 4
while (len < 1000) {
n += 1
len = f(n).toString.length
}
println(n + " " + len)
}
}
|
rck109d/projectEuler
|
src/main/scala/projectEulerScala/P25_1000DigitFibonacciNumber.scala
|
Scala
|
lgpl-3.0
| 535
|
package dsentric.contracts
import dsentric.failure.ClosedContractFailure
import dsentric.{DObject, Dsentric, PathEnd}
import org.scalatest.EitherValues
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class ContractLensSpec extends AnyFunSpec with Matchers with EitherValues {
import Dsentric._
import dsentric.codecs.std.DCodecs._
import dsentric.Implicits._
object ClosedContract extends Contract {
val expected = \[String]
val maybe = \?[Int]
}
object OpenContract extends Contract with Open {
val expected = \[String]
val maybe = \?[Int]
}
describe("$verify") {
describe("Closed for additional properties") {
it("Should return empty list if no additional properties") {
val base = DObject("expected" := "value")
ClosedContract.$verify(base) shouldBe Symbol("right")
}
it("Should return ClosedContractFailure if additional properties") {
val base = DObject("expected" := "value", "additional" := 1)
ClosedContract.$verify(base).left.value should contain (ClosedContractFailure(ClosedContract, PathEnd, "additional"))
}
}
describe("Additional properties") {
it("Should return empty list if additional properties") {
val base = DObject("expected" := "value", "additional" := 1)
OpenContract.$verify(base) shouldBe Symbol("right")
}
}
}
// describe("$get") {
// describe("Closed for additional properties") {
// it("Should return object if no additional properties") {
// val base = DObject("expected" := "value")
// ClosedContract.$get(base).value shouldBe base
// }
// it("Should fail with ClosedContractFailure if additional properties") {
// val base = DObject("expected" := "value", "additional" := 1)
// ClosedContract.$get(base).left.value should contain(ClosedContractFailure(ClosedContract, PathEnd, "additional"))
// }
// }
// describe("Additional properties") {
// it("Should return object if additional properties") {
// val base = DObject("expected" := "value", "additional" := 1)
// OpenContract.$get(base).value shouldBe base
// }
// }
// }
}
|
HigherState/dsentric
|
maps/src/test/scala/dsentric/contracts/ContractLensSpec.scala
|
Scala
|
apache-2.0
| 2,221
|
package sgl.android.util
import sgl.util.JsonProvider
import org.json._
import scala.language.implicitConversions
trait AndroidJsonProvider extends JsonProvider {
object AndroidJson extends Json {
type JValue = Any
override def parse(raw: String): JValue = new JSONTokener(raw).nextValue()
class AndroidRichJsonAst(v: Any) extends RichJsonAst {
override def \\ (field: String): JValue = v match {
case (o: JSONObject) => {
val r = o.opt(field)
if(r == null) JNothing else r
}
case _ => JNothing
}
}
override implicit def richJsonAst(ast: JValue): RichJsonAst = new AndroidRichJsonAst(ast)
object AndroidJNothing
type JNothing = AndroidJNothing.type
override val JNothing: JNothing = AndroidJNothing
type JNull = JSONObject.NULL.type
override val JNull: JNull = JSONObject.NULL
object AndroidJString extends JStringCompanion {
override def unapply(ast: JValue): Option[String] = ast match {
case (s: java.lang.String) => Some(s)
case _ => None
}
}
type JString = String
override val JString: JStringCompanion = AndroidJString
object AndroidJNumber extends JNumberCompanion {
override def unapply(ast: JValue): Option[Double] = ast match {
case (d: java.lang.Double) => Some(d)
case (f: java.lang.Float) => Some(f.toDouble)
case (i: java.lang.Integer) => Some(i.toDouble)
case (l: java.lang.Long) => Some(l.toDouble)
case _ => None
}
}
type JNumber = Double
override val JNumber: JNumberCompanion = AndroidJNumber
object AndroidJBoolean extends JBooleanCompanion {
override def unapply(ast: JValue): Option[Boolean] = ast match {
case (b: java.lang.Boolean) => Some(b)
case (b: Boolean) => Some(b)
case _ => None
}
}
type JBoolean = Boolean
override val JBoolean: JBooleanCompanion = AndroidJBoolean
object AndroidJObject extends JObjectCompanion {
override def unapply(ast: JValue): Option[List[JField]] = ast match {
case (o: JSONObject) => {
val buffy = new scala.collection.mutable.ListBuffer[(String, Any)]
val it = o.keys()
while(it.hasNext) {
val k = it.next()
buffy.append((k, o.get(k)))
}
Some(buffy.toList)
}
case _ => None
}
}
type JObject = JSONObject
override val JObject: JObjectCompanion = AndroidJObject
object AndroidJArray extends JArrayCompanion {
override def unapply(ast: JValue): Option[List[JValue]] = ast match {
case (a: JSONArray) => Some((0 until a.length()).map(i => a.get(i)).toList)
case _ => None
}
}
type JArray = JSONArray
override val JArray: JArrayCompanion = AndroidJArray
//type JField = (String, JValue)
}
override val Json: Json = AndroidJson
}
|
regb/scala-game-library
|
android/core/src/main/scala/sgl/android/util/AndroidJsonProvider.scala
|
Scala
|
mit
| 2,939
|
package es.uvigo.ei.sing.sds
package controller
import scala.concurrent.Future
import play.api.Play
import play.api.libs.json.{ Json, JsValue }
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc._
import entity._
import annotator._
object AnnotatorController extends Controller with Authorization {
lazy val annotator = SDSSettings.annotator
def annotateOne(id: Article.ID): Action[AnyContent] =
AuthorizedAction(parse.anyContent) { _ => _ =>
annotator ! Annotate(id)
Accepted
}
def annotate: Action[JsValue] =
AuthorizedAction(parse.json) { _ => request =>
(request.body \\ "ids").validate[Set[Article.ID]].fold(
errors => BadRequest(Json.obj("err" -> errors.mkString("\\n"))),
ids => { ids.foreach(id => annotator ! Annotate(id)); Accepted }
)
}
}
|
agjacome/smart-drug-search
|
src/main/scala/controller/AnnotatorController.scala
|
Scala
|
mit
| 857
|
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.io.IOException
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import org.specs2.mutable.Specification
import play.core.test.{ FakeHeaders, FakeRequest }
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class MultipartBodyParserSpec extends Specification {
"Multipart body parser" should {
implicit val system = ActorSystem()
implicit val executionContext = system.dispatcher
implicit val materializer = ActorMaterializer()
val playBodyParsers = PlayBodyParsers(
tfc = new InMemoryTemporaryFileCreator(10))
"return an error if temporary file creation fails" in {
val fileSize = 100
val boundary = "-----------------------------14568445977970839651285587160"
val header =
s"--$boundary\r\n" +
"Content-Disposition: form-data; name=\"uploadedfile\"; filename=\"uploadedfile.txt\"\r\n" +
"Content-Type: application/octet-stream\r\n" +
"\r\n"
val content = Array.ofDim[Byte](fileSize)
val footer =
"\r\n" +
"\r\n" +
s"--$boundary--\r\n"
val body = Source(
ByteString(header) ::
ByteString(content) ::
ByteString(footer) ::
Nil)
val bodySize = header.length + fileSize + footer.length
val request = FakeRequest(
method = "POST",
uri = "/x",
headers = FakeHeaders(Seq(
"Content-Type" -> s"multipart/form-data; boundary=$boundary",
"Content-Length" -> bodySize.toString)),
body = body)
val response = playBodyParsers.multipartFormData.apply(request).run(body)
Await.result(response, Duration.Inf) must throwA[IOException]
}
}
}
|
Shenker93/playframework
|
framework/src/play/src/test/scala/play/api/mvc/MultipartBodyParserSpec.scala
|
Scala
|
apache-2.0
| 1,899
|
package org.powlab.jeye.decode.corrector
import org.junit.Assert._
import org.junit.Test
import org.powlab.jeye.decode.TestUtils.makeNopOpcode
import org.powlab.jeye.decode.TestUtils.makeOpcode
import org.powlab.jeye.decode.graph.OpcodeNode
import org.powlab.jeye.decode.corrector.TryCatchFinallyCorrector.isSameNode
import org.powlab.jeye.core.Opcodes
class TryCatchFinallyCorrectorTest {
/**
* Тест на идентичность 2х инструкций
*/
@Test
def testIsSame {
assertFalse(isSameNode(makeNopOpcode, null))
assertFalse(isSameNode(null, makeNopOpcode))
assertFalse(isSameNode(makeNopOpcode, makeOpcode(Opcodes.OPCODE_AALOAD)))
assertTrue(isSameNode(null, null))
assertTrue(isSameNode(makeNopOpcode, makeNopOpcode))
assertTrue(isSameNode(makeOpcode(Opcodes.OPCODE_AALOAD, Array(0, 1, 2)), makeOpcode(Opcodes.OPCODE_AALOAD, Array(0, 1, 2))))
}
}
|
powlab/jeye
|
src/test/scala/org/powlab/jeye/decode/corrector/TryCatchFinallyCorrectorTest.scala
|
Scala
|
apache-2.0
| 911
|
package dhg.ccg.tag.learn
import org.junit.Test
import dhg.util.CollectionUtil._
import dhg.util.StringUtil._
import org.junit.Assert._
import dhg.ccg.tag._
/*
* TODO: Test with unrestricted tag dictionary
*/
class SupHmmDistributionTests {
@Test
def test_tr_unsmoothed() {
val sentences = Vector(
"the|D dog|N walks|V",
"the|D man|N walks|V the|D dog|N",
"the|D man|N runs|V")
.map(_.lsplit(" ").map(_.split('|').toTuple2))
val tagdict = SimpleTagDictionary(Map(
"the" -> Set("D"),
"man" -> Set("N", "V"),
"walks" -> Set("V")),
"<S>", "<S>", "<E>", "<E>")
/* C(t1,t2)
* S D N V E total
* S 3 | 3
* D 4 | 4
* N 3 1 | 4
* V 1 2 | 3
* E
*/
val tr = new UnsmoothedTransitionDistributioner()(sentences, tagdict)
assertEquals(0.0, tr("<S>", "<S>"), 1e-5)
assertEquals(3 / 3.0, tr("D", "<S>"), 1e-5)
assertEquals(0.0, tr("N", "<S>"), 1e-5)
assertEquals(0.0, tr("V", "<S>"), 1e-5)
assertEquals(0.0, tr("default", "<S>"), 1e-5)
assertEquals(0.0, tr("<E>", "<S>"), 1e-5)
assertEquals(0.0, tr("<S>", "D"), 1e-5)
assertEquals(0.0, tr("D", "D"), 1e-5)
assertEquals(4 / 4.0, tr("N", "D"), 1e-5)
assertEquals(0.0, tr("V", "D"), 1e-5)
assertEquals(0.0, tr("default", "D"), 1e-5)
assertEquals(0.0, tr("<E>", "D"), 1e-5)
assertEquals(0.0, tr("<S>", "N"), 1e-5)
assertEquals(0.0, tr("D", "N"), 1e-5)
assertEquals(0.0, tr("N", "N"), 1e-5)
assertEquals(3 / 4.0, tr("V", "N"), 1e-5)
assertEquals(0.0, tr("default", "N"), 1e-5)
assertEquals(1 / 4.0, tr("<E>", "N"), 1e-5)
assertEquals(0.0, tr("<S>", "V"), 1e-5)
assertEquals(1 / 3.0, tr("D", "V"), 1e-5)
assertEquals(0.0, tr("N", "V"), 1e-5)
assertEquals(0.0, tr("V", "V"), 1e-5)
assertEquals(0.0, tr("default", "V"), 1e-5)
assertEquals(2 / 3.0, tr("<E>", "V"), 1e-5)
assertEquals(0.0, tr("<S>", "default"), 1e-5)
assertEquals(0.0, tr("D", "default"), 1e-5)
assertEquals(0.0, tr("N", "default"), 1e-5)
assertEquals(0.0, tr("V", "default"), 1e-5)
assertEquals(0.0, tr("default", "default"), 1e-5)
assertEquals(0.0, tr("default", "<E>"), 1e-5)
assertEquals(0.0, tr("<S>", "<E>"), 1e-5)
assertEquals(0.0, tr("D", "<E>"), 1e-5)
assertEquals(0.0, tr("N", "<E>"), 1e-5)
assertEquals(0.0, tr("V", "<E>"), 1e-5)
assertEquals(0.0, tr("default", "<E>"), 1e-5)
assertEquals(0.0, tr("<E>", "<E>"), 1e-5)
}
@Test
def test_em_unsmoothed() {
val sentences = Vector(
"a|D cat|N chases|V the|D walks|N",
"the|D dog|N walks|V",
"the|D man|N walks|V the|D dog|N",
"the|D man|N runs|V",
"the|N bird|V walks|D")
.map(_.lsplit(" ").map(_.split('|').toTuple2))
val tagdict = SimpleTagDictionary(Map(
"the" -> Set("D"),
"a" -> Set("D"),
"every" -> Set("D"),
"some" -> Set("D"),
"man" -> Set("N", "V"),
"cat" -> Set("N"),
"bird" -> Set("N"),
"fox" -> Set("N"),
"walks" -> Set("N", "V"),
"flies" -> Set("N", "V")),
"<S>", "<S>", "<E>", "<E>")
/* Words not in TD:
* chases
* dog
* runs
*/
/* C(t,w)
* D N V
* a 1
* cat 1
* chases 1
* the 5
* dog 2
* walks 1 2
* man 2
* runs 1
* - - -
* total 6 6 4
*/
val em = new UnsmoothedEmissionDistributioner()(sentences, tagdict)
assertEquals(1.0, em("<S>", "<S>"), 1e-5)
assertEquals(0.0, em("the", "<S>"), 1e-5)
assertEquals(0.0, em("a", "<S>"), 1e-5)
assertEquals(0.0, em("cat", "<S>"), 1e-5)
assertEquals(0.0, em("man", "<S>"), 1e-5)
assertEquals(0.0, em("walks", "<S>"), 1e-5)
assertEquals(0.0, em("dog", "<S>"), 1e-5)
assertEquals(0.0, em("runs", "<S>"), 1e-5)
assertEquals(0.0, em("chases", "<S>"), 1e-5)
assertEquals(0.0, em("default", "<S>"), 1e-5)
assertEquals(0.0, em("<E>", "<S>"), 1e-5)
assertEquals(0.0, em("<S>", "D"), 1e-5)
assertEquals(5 / 6.0, em("the", "D"), 1e-5)
assertEquals(1 / 6.0, em("a", "D"), 1e-5)
assertEquals(0.0, em("cat", "D"), 1e-5)
assertEquals(0.0, em("man", "D"), 1e-5)
assertEquals(0.0, em("walks", "D"), 1e-5)
assertEquals(0.0, em("dog", "D"), 1e-5)
assertEquals(0.0, em("runs", "D"), 1e-5)
assertEquals(0.0, em("chases", "D"), 1e-5)
assertEquals(0.0, em("default", "D"), 1e-5)
assertEquals(0.0, em("<E>", "D"), 1e-5)
assertEquals(0.0, em("<S>", "N"), 1e-5)
assertEquals(0.0, em("the", "N"), 1e-5)
assertEquals(0.0, em("a", "N"), 1e-5)
assertEquals(1 / 6.0, em("cat", "N"), 1e-5)
assertEquals(2 / 6.0, em("man", "N"), 1e-5)
assertEquals(1 / 6.0, em("walks", "N"), 1e-5)
assertEquals(2 / 6.0, em("dog", "N"), 1e-5)
assertEquals(0.0, em("runs", "N"), 1e-5)
assertEquals(0.0, em("chases", "N"), 1e-5)
assertEquals(0.0, em("default", "N"), 1e-5)
assertEquals(0.0, em("<E>", "N"), 1e-5)
assertEquals(0.0, em("<S>", "V"), 1e-5)
assertEquals(0.0, em("the", "V"), 1e-5)
assertEquals(0.0, em("a", "V"), 1e-5)
assertEquals(0.0, em("cat", "V"), 1e-5)
assertEquals(0.0, em("man", "V"), 1e-5)
assertEquals(2 / 4.0, em("walks", "V"), 1e-5)
assertEquals(0.0, em("dog", "V"), 1e-5)
assertEquals(1 / 4.0, em("runs", "V"), 1e-5)
assertEquals(1 / 4.0, em("chases", "V"), 1e-5)
assertEquals(0.0, em("default", "V"), 1e-5)
assertEquals(0.0, em("<E>", "V"), 1e-5)
assertEquals(0.0, em("<S>", "default"), 1e-5)
assertEquals(0.0, em("the", "default"), 1e-5)
assertEquals(0.0, em("a", "default"), 1e-5)
assertEquals(0.0, em("cat", "default"), 1e-5)
assertEquals(0.0, em("man", "default"), 1e-5)
assertEquals(0.0, em("walks", "default"), 1e-5)
assertEquals(0.0, em("dog", "default"), 1e-5)
assertEquals(0.0, em("runs", "default"), 1e-5)
assertEquals(0.0, em("chases", "default"), 1e-5)
assertEquals(0.0, em("default", "default"), 1e-5)
assertEquals(0.0, em("<E>", "default"), 1e-5)
assertEquals(0.0, em("<S>", "<E>"), 1e-5)
assertEquals(0.0, em("the", "<E>"), 1e-5)
assertEquals(0.0, em("a", "<E>"), 1e-5)
assertEquals(0.0, em("cat", "<E>"), 1e-5)
assertEquals(0.0, em("man", "<E>"), 1e-5)
assertEquals(0.0, em("walks", "<E>"), 1e-5)
assertEquals(0.0, em("dog", "<E>"), 1e-5)
assertEquals(0.0, em("runs", "<E>"), 1e-5)
assertEquals(0.0, em("chases", "<E>"), 1e-5)
assertEquals(0.0, em("default", "<E>"), 1e-5)
assertEquals(1.0, em("<E>", "<E>"), 1e-5)
}
@Test
def test_tr_addLambda() {
val sentences = Vector(
"the|D dog|N walks|V",
"the|D man|N walks|V the|D dog|N",
"the|D man|N runs|V")
.map(_.lsplit(" ").map(_.split('|').toTuple2))
val tagdict = SimpleTagDictionary(Map(
"the" -> Set("D"),
"man" -> Set("N", "V"),
"walks" -> Set("V")),
"<S>", "<S>", "<E>", "<E>")
/* C(t1,t2)
* S D N V E total
* S 3 | 3
* D 4 | 4
* N 3 1 | 4
* V 1 2 | 3
* E
*/
val tr = new AddLambdaTransitionDistributioner(0.2)(sentences, tagdict)
assertEquals(0.0, tr("<S>", "<S>"), 1e-5)
assertEquals((3 + 0.2) / (3 + 3 * 0.2), tr("D", "<S>"), 1e-5)
assertEquals((0 + 0.2) / (3 + 3 * 0.2), tr("N", "<S>"), 1e-5)
assertEquals((0 + 0.2) / (3 + 3 * 0.2), tr("V", "<S>"), 1e-5)
assertEquals((0 + 0.2) / (3 + 3 * 0.2), tr("default", "<S>"), 1e-5)
assertEquals(0.0, tr("<E>", "<S>"), 1e-5)
assertEquals(0.0, tr("<S>", "D"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("D", "D"), 1e-5)
assertEquals((4 + 0.2) / (4 + 4 * 0.2), tr("N", "D"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("V", "D"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("default", "D"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("<E>", "D"), 1e-5)
assertEquals(0.0, tr("<S>", "N"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("D", "N"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("N", "N"), 1e-5)
assertEquals((3 + 0.2) / (4 + 4 * 0.2), tr("V", "N"), 1e-5)
assertEquals((0 + 0.2) / (4 + 4 * 0.2), tr("default", "N"), 1e-5)
assertEquals((1 + 0.2) / (4 + 4 * 0.2), tr("<E>", "N"), 1e-5)
assertEquals(0.0, tr("<S>", "V"), 1e-5)
assertEquals((1 + 0.2) / (3 + 4 * 0.2), tr("D", "V"), 1e-5)
assertEquals((0 + 0.2) / (3 + 4 * 0.2), tr("N", "V"), 1e-5)
assertEquals((0 + 0.2) / (3 + 4 * 0.2), tr("V", "V"), 1e-5)
assertEquals((0 + 0.2) / (3 + 4 * 0.2), tr("default", "V"), 1e-5)
assertEquals((2 + 0.2) / (3 + 4 * 0.2), tr("<E>", "V"), 1e-5)
assertEquals(0.0, tr("<S>", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 4 * 0.2), tr("D", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 4 * 0.2), tr("N", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 4 * 0.2), tr("V", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 4 * 0.2), tr("default", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 4 * 0.2), tr("<E>", "default"), 1e-5)
assertEquals(0.0, tr("<S>", "<E>"), 1e-5)
assertEquals(0.0, tr("D", "<E>"), 1e-5)
assertEquals(0.0, tr("N", "<E>"), 1e-5)
assertEquals(0.0, tr("V", "<E>"), 1e-5)
assertEquals(0.0, tr("default", "<E>"), 1e-5)
assertEquals(0.0, tr("<E>", "<E>"), 1e-5)
}
@Test
def test_em_addLambda() {
val sentences = Vector(
"a|D cat|N chases|V the|D walks|N",
"the|D dog|N walks|V",
"the|D man|N walks|V the|D dog|N",
"the|D man|N runs|V",
"the|N bird|V walks|D")
.map(_.lsplit(" ").map(_.split('|').toTuple2))
val tagdict = SimpleTagDictionary(Map(
"the" -> Set("D"),
"a" -> Set("D"),
"big" -> Set("A"),
"good" -> Set("A", "N"),
"every" -> Set("D"),
"some" -> Set("D"),
"man" -> Set("N", "V"),
"cat" -> Set("N"),
"bird" -> Set("N"),
"fox" -> Set("N"),
"walks" -> Set("N", "V"),
"flies" -> Set("N", "V")),
"<S>", "<S>", "<E>", "<E>")
/* Words not in TD:
* chases
* dog
* runs
*/
/* C(t,w)
* D N V
* a 1
* cat 1
* chases 1
* the 5
* dog 2
* walks 1 2
* man 2
* runs 1
* - - -
* total 6 6 4
*/
val em = new AddLambdaEmissionDistributioner(0.2)(sentences, tagdict)
assertEquals(1.0, em("<S>", "<S>"), 1e-5)
assertEquals(0.0, em("a", "<S>"), 1e-5)
assertEquals(0.0, em("bird", "<S>"), 1e-5)
assertEquals(0.0, em("cat", "<S>"), 1e-5)
assertEquals(0.0, em("chases", "<S>"), 1e-5)
assertEquals(0.0, em("dog", "<S>"), 1e-5)
assertEquals(0.0, em("every", "<S>"), 1e-5)
assertEquals(0.0, em("flies", "<S>"), 1e-5)
assertEquals(0.0, em("fox", "<S>"), 1e-5)
assertEquals(0.0, em("man", "<S>"), 1e-5)
assertEquals(0.0, em("runs", "<S>"), 1e-5)
assertEquals(0.0, em("some", "<S>"), 1e-5)
assertEquals(0.0, em("the", "<S>"), 1e-5)
assertEquals(0.0, em("walks", "<S>"), 1e-5)
assertEquals(0.0, em("big", "<S>"), 1e-5)
assertEquals(0.0, em("good", "<S>"), 1e-5)
assertEquals(0.0, em("default", "<S>"), 1e-5)
assertEquals(0.0, em("<E>", "<S>"), 1e-5)
assertEquals(0.0, em("<S>", "D"), 1e-5)
assertEquals((1 + 0.2) / (6 + 7 * 0.2), em("a", "D"), 1e-5)
assertEquals(0.0, em("bird", "D"), 1e-5)
assertEquals(0.0, em("cat", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("chases", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("dog", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("every", "D"), 1e-5)
assertEquals(0.0, em("flies", "D"), 1e-5)
assertEquals(0.0, em("fox", "D"), 1e-5)
assertEquals(0.0, em("man", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("runs", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("some", "D"), 1e-5)
assertEquals((5 + 0.2) / (6 + 7 * 0.2), em("the", "D"), 1e-5)
assertEquals(0.0, em("walks", "D"), 1e-5)
assertEquals(0.0, em("big", "D"), 1e-5)
assertEquals(0.0, em("good", "D"), 1e-5)
assertEquals((0 + 0.2) / (6 + 7 * 0.2), em("default", "D"), 1e-5)
assertEquals(0.0, em("<E>", "D"), 1e-5)
assertEquals(0.0, em("<S>", "N"), 1e-5)
assertEquals(0.0, em("a", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("bird", "N"), 1e-5)
assertEquals((1 + 0.2) / (6 + 10 * 0.2), em("cat", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("chases", "N"), 1e-5)
assertEquals((2 + 0.2) / (6 + 10 * 0.2), em("dog", "N"), 1e-5)
assertEquals(0.0, em("every", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("flies", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("fox", "N"), 1e-5)
assertEquals((2 + 0.2) / (6 + 10 * 0.2), em("man", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("runs", "N"), 1e-5)
assertEquals(0.0, em("some", "N"), 1e-5)
assertEquals(0.0, em("the", "N"), 1e-5)
assertEquals((1 + 0.2) / (6 + 10 * 0.2), em("walks", "N"), 1e-5)
assertEquals(0.0, em("big", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("good", "N"), 1e-5)
assertEquals((0 + 0.2) / (6 + 10 * 0.2), em("default", "N"), 1e-5)
assertEquals(0.0, em("<E>", "N"), 1e-5)
assertEquals(0.0, em("<S>", "V"), 1e-5)
assertEquals(0.0, em("a", "V"), 1e-5)
assertEquals(0.0, em("bird", "V"), 1e-5)
assertEquals(0.0, em("cat", "V"), 1e-5)
assertEquals((1 + 0.2) / (4 + 6 * 0.2), em("chases", "V"), 1e-5)
assertEquals((0 + 0.2) / (4 + 6 * 0.2), em("dog", "V"), 1e-5)
assertEquals(0.0, em("every", "V"), 1e-5)
assertEquals((0 + 0.2) / (4 + 6 * 0.2), em("flies", "V"), 1e-5)
assertEquals(0.0, em("fox", "V"), 1e-5)
assertEquals((0 + 0.2) / (4 + 6 * 0.2), em("man", "V"), 1e-5)
assertEquals((1 + 0.2) / (4 + 6 * 0.2), em("runs", "V"), 1e-5)
assertEquals(0.0, em("some", "V"), 1e-5)
assertEquals(0.0, em("the", "V"), 1e-5)
assertEquals((2 + 0.2) / (4 + 6 * 0.2), em("walks", "V"), 1e-5)
assertEquals(0.0, em("big", "V"), 1e-5)
assertEquals(0.0, em("good", "V"), 1e-5)
assertEquals((0 + 0.2) / (4 + 6 * 0.2), em("default", "V"), 1e-5)
assertEquals(0.0, em("<E>", "V"), 1e-5)
assertEquals(0.0, em("<S>", "A"), 1e-5)
assertEquals(0.0, em("a", "A"), 1e-5)
assertEquals(0.0, em("bird", "A"), 1e-5)
assertEquals(0.0, em("cat", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("chases", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("dog", "A"), 1e-5)
assertEquals(0.0, em("every", "A"), 1e-5)
assertEquals(0.0, em("flies", "A"), 1e-5)
assertEquals(0.0, em("fox", "A"), 1e-5)
assertEquals(0.0, em("man", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("runs", "A"), 1e-5)
assertEquals(0.0, em("some", "A"), 1e-5)
assertEquals(0.0, em("the", "A"), 1e-5)
assertEquals(0.0, em("walks", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("big", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("good", "A"), 1e-5)
assertEquals((0 + 0.2) / (0 + 5 * 0.2), em("default", "A"), 1e-5)
assertEquals(0.0, em("<E>", "A"), 1e-5)
assertEquals(0.0, em("<S>", "default"), 1e-5)
assertEquals(0.0, em("a", "default"), 1e-5)
assertEquals(0.0, em("bird", "default"), 1e-5)
assertEquals(0.0, em("cat", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 3 * 0.2), em("chases", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 3 * 0.2), em("dog", "default"), 1e-5)
assertEquals(0.0, em("every", "default"), 1e-5)
assertEquals(0.0, em("flies", "default"), 1e-5)
assertEquals(0.0, em("fox", "default"), 1e-5)
assertEquals(0.0, em("man", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 3 * 0.2), em("runs", "default"), 1e-5)
assertEquals(0.0, em("some", "default"), 1e-5)
assertEquals(0.0, em("the", "default"), 1e-5)
assertEquals(0.0, em("walks", "default"), 1e-5)
assertEquals(0.0, em("big", "default"), 1e-5)
assertEquals(0.0, em("good", "default"), 1e-5)
assertEquals((0 + 0.2) / (0 + 3 * 0.2), em("default", "default"), 1e-5)
assertEquals(0.0, em("<E>", "default"), 1e-5)
assertEquals(0.0, em("<S>", "<E>"), 1e-5)
assertEquals(0.0, em("a", "<E>"), 1e-5)
assertEquals(0.0, em("bird", "<E>"), 1e-5)
assertEquals(0.0, em("cat", "<E>"), 1e-5)
assertEquals(0.0, em("chases", "<E>"), 1e-5)
assertEquals(0.0, em("dog", "<E>"), 1e-5)
assertEquals(0.0, em("every", "<E>"), 1e-5)
assertEquals(0.0, em("flies", "<E>"), 1e-5)
assertEquals(0.0, em("fox", "<E>"), 1e-5)
assertEquals(0.0, em("man", "<E>"), 1e-5)
assertEquals(0.0, em("runs", "<E>"), 1e-5)
assertEquals(0.0, em("some", "<E>"), 1e-5)
assertEquals(0.0, em("the", "<E>"), 1e-5)
assertEquals(0.0, em("walks", "<E>"), 1e-5)
assertEquals(0.0, em("big", "<E>"), 1e-5)
assertEquals(0.0, em("good", "<E>"), 1e-5)
assertEquals(0.0, em("default", "<E>"), 1e-5)
assertEquals(1.0, em("<E>", "<E>"), 1e-5)
}
}
|
dhgarrette/2014-ccg-supertagging
|
src/test/scala/dhg/ccg/tag/learn/SupHmmDistributionerTests.scala
|
Scala
|
apache-2.0
| 17,211
|
/*******************************************************************************
Copyright (c) 2012-2014, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.cfg
import _root_.java.util.{List => JList}
import scala.collection.mutable.{Set => MSet}
import scala.collection.mutable.{HashSet => MHashSet}
import scala.collection.immutable.HashSet
import kr.ac.kaist.jsaf.useful.HasAt
import kr.ac.kaist.jsaf.exceptions.StaticError
import kr.ac.kaist.jsaf.nodes._
import kr.ac.kaist.jsaf.scala_src.nodes._
import kr.ac.kaist.jsaf.scala_src.useful.ErrorLog
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
class CapturedVariableCollector {
/* Error handling
* The signal function collects errors during the disambiguation phase.
* To collect multiple errors,
* we should return a dummy value after signaling an error.
*/
val errors: ErrorLog = new ErrorLog
def signal(msg:String, hasAt:HasAt) = errors.signal(msg, hasAt)
def signal(hasAt:HasAt, msg:String) = errors.signal(msg, hasAt)
def signal(error: StaticError) = errors.signal(error)
def getErrors(): JList[StaticError] = toJavaList(errors.errors)
val captured: MSet[String] = MHashSet()
def collect(ir: IRRoot): MSet[String] = {
ir match {
case SIRRoot(info, fds, vds, stmts) =>
fds.foreach(checkFunDecl)
checkStmts(stmts, HashSet[String]())
}
captured
}
private def checkId(id: IRId, locals: Set[String]) = id match {
case SIRUserId(_, _, uniqueName, false, _) =>
if (!locals.contains(uniqueName)) {
captured.add(uniqueName)
}
case SIRUserId(_, _, _, true, _) => ()
case SIRTmpId(_, _, _, _) => ()
}
private def checkFunDecl(fd: IRFunDecl): Unit = fd match {
case SIRFunDecl(_, func) => checkFunctional(func)
}
private def checkFunctional(func: IRFunctional): Unit = func match {
case SIRFunctional(_,name,params,args,fds,vds,body) =>
val locals = namesOfArgs(args) ++ namesOfFunDecls(fds) ++ namesOfVars(vds)
fds.foreach(checkFunDecl)
checkStmts(body, locals)
}
private def namesOfFunDecls(fds: List[IRFunDecl]): Set[String] = {
fds.foldLeft(HashSet[String]())((set, fd) => set + fd.getFtn.getName.getUniqueName)
}
// flatten IRSeq
private def flatten(stmts: List[IRStmt]): List[IRStmt] =
stmts.foldRight(List[IRStmt]())((s, l) =>
if (s.isInstanceOf[IRSeq])
toList(s.asInstanceOf[IRSeq].getStmts) ++ l
else List(s) ++ l)
private def namesOfArgs(loads: List[IRStmt]): Set[String] = {
// for Concolic testing
// arguments may not be a list of IRExprStmts
// because concolic testing uses compiler.IRSimplifier
// to move IRBin, IRUn, and IRLoad out of IRExpr
/*
loads.asInstanceOf[List[IRExprStmt]].foldLeft(HashSet[String]())((set, load) =>
set + load.getLhs.getUniqueName)
*/
flatten(loads).foldLeft(HashSet[String]())((set, load) =>
if (load.isInstanceOf[IRExprStmt]) {
val name = load.asInstanceOf[IRExprStmt].getLhs
if (name.isInstanceOf[IRUserId] ||
name.getOriginalName.startsWith("<>arguments"))
set + load.asInstanceOf[IRExprStmt].getLhs.getUniqueName
else set
} else set)
}
private def namesOfVars(vds: List[IRVarStmt]): Set[String] = {
vds.foldLeft(HashSet[String]())((set, vd) => set + vd.getLhs.getUniqueName)
}
private def checkStmts(stmts: List[IRStmt], locals: Set[String]): Unit = {
stmts.foreach(stmt => checkStmt(stmt, locals))
}
private def checkStmt(stmt: IRStmt, locals: Set[String]): Unit = stmt match {
case SIRNoOp(irinfo, desc) => ()
case SIRStmtUnit(irinfo, stmts) => checkStmts(stmts, locals)
case SIRSeq(irinfo, stmts) => checkStmts(stmts, locals)
case vd:IRVarStmt =>
signal("IRVarStmt should have been hoisted.", vd)
case fd:IRFunDecl =>
signal("IRFunDecl should have been hoisted.", fd)
case SIRFunExpr(irinfo, lhs, func) =>
checkId(lhs, locals)
checkFunctional(func)
case SIRObject(irinfo, lhs, members, proto) =>
checkId(lhs, locals)
members.foreach((m) => checkMember(m, locals))
proto match {
case Some(p) => checkId(p, locals)
case None => ()
}
case SIRTry(irinfo, body, name, catchB, finallyB) =>
checkStmt(body, locals)
(name, catchB) match {
case (Some(x), Some(stmt)) => checkStmt(stmt, locals + x.getUniqueName)
case (None, None) => ()
case _ => signal("Wrong IRTryStmt.", stmt)
}
finallyB match {
case Some(stmt) => checkStmt(stmt, locals)
case None => ()
}
case SIRArgs(irinfo, lhs, elements) =>
checkId(lhs, locals)
checkExprOptList(elements, locals)
case SIRArray(irinfo, lhs, elements) =>
checkId(lhs, locals)
checkExprOptList(elements, locals)
case SIRArrayNumber(irinfo, lhs, elements) => checkId(lhs, locals)
case SIRBreak(irinfo, label) => ()
case SIRInternalCall(irinfo, lhs, fun, arg1, arg2) =>
checkId(lhs, locals)
checkId(fun, locals)
checkExpr(arg1, locals)
checkExprOpt(arg2, locals)
case SIRCall(irinfo, lhs, fun, thisB, args) =>
checkId(lhs, locals)
checkId(fun, locals)
checkId(thisB, locals)
checkId(args, locals)
case SIRNew(irinfo, lhs, cons, args) if (args.length == 2) =>
checkId(lhs, locals)
checkId(cons, locals)
checkId(args(0), locals)
checkId(args(1), locals)
case c@SIRNew(irinfo, lhs, fun, args) =>
signal("IRNew should have two elements in args.", c)
case SIRDelete(irinfo, lhs, id) =>
checkId(lhs, locals)
checkId(id, locals)
case SIRDeleteProp(irinfo, lhs, obj, index) =>
checkId(lhs, locals)
checkId(obj, locals)
checkExpr(index, locals)
case SIRExprStmt(irinfo, lhs, expr, _) =>
checkId(lhs, locals)
checkExpr(expr, locals)
case SIRIf(irinfo, cond, trueblock, falseblock) =>
checkExpr(cond, locals)
checkStmt(trueblock, locals)
falseblock match {
case Some(block) => checkStmt(block, locals)
case None => ()
}
case SIRLabelStmt(irinfo, label, stmt) => checkStmt(stmt, locals)
case SIRReturn(irinfo, expr) => checkExprOpt(expr, locals)
case SIRStore(irinfo, obj, index, rhs) =>
checkId(obj, locals)
checkExpr(index, locals)
checkExpr(rhs, locals)
case SIRThrow(irinfo, expr) => checkExpr(expr, locals)
case SIRWhile(irinfo, cond, body) =>
checkExpr(cond, locals)
checkStmt(body, locals)
case _ => {
System.err.println("* Warning: following IR statement is ignored: "+ stmt)
}
}
private def checkMember(mem: IRMember, locals: Set[String]): Unit = {
mem match {
case SIRField(irinfo, prop, expr) => checkExpr(expr, locals)
case getOrSet =>
signal("IRGetProp, IRSetProp is not supported.", getOrSet)
Unit
}
}
private def checkExprOptList(list: List[Option[IRExpr]], locals: Set[String]): Unit = {
list.foreach(exprOpt => checkExprOpt(exprOpt, locals))
}
private def checkExprOpt(exprOpt: Option[IRExpr], locals: Set[String]): Unit = exprOpt match {
case Some(expr) => checkExpr(expr, locals)
case None => ()
}
private def checkExpr(expr: IRExpr, locals: Set[String]): Unit = expr match {
case SIRLoad(_, obj, index) =>
checkId(obj, locals)
checkExpr(index, locals)
case SIRBin(_, first, op, second) =>
checkExpr(first, locals)
checkExpr(second, locals)
case SIRUn(_, op, expr) => checkExpr(expr, locals)
case id:IRId => checkId(id, locals)
case _:IRThis => ()
case _:IRNumber => ()
case _:IRString => ()
case _:IRBool => ()
case _:IRNull => ()
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/cfg/CapturedVariableCollector.scala
|
Scala
|
bsd-3-clause
| 8,666
|
/*
Copyright 2013 Stephen K Samuel
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.BufferedOpFilter
/** @author Stephen Samuel */
class ChromeFilter(amount: Float, exposure: Float) extends BufferedOpFilter {
val op = new thirdparty.jhlabs.image.ChromeFilter()
op.setAmount(amount)
op.setExposure(exposure)
}
object ChromeFilter {
def apply(): ChromeFilter = apply(0.5f, 1.0f)
def apply(amount: Float, exposure: Float): ChromeFilter = new ChromeFilter(amount, exposure)
}
|
carlosFattor/scrimage
|
scrimage-filters/src/main/scala/com/sksamuel/scrimage/filter/ChromeFilter.scala
|
Scala
|
apache-2.0
| 1,059
|
package org.scalaide.core.internal.hyperlink
import org.eclipse.jface.text.IRegion
import org.eclipse.jface.text.ITextViewer
import org.eclipse.jface.text.hyperlink.AbstractHyperlinkDetector
import org.eclipse.jface.text.hyperlink.IHyperlink
import org.eclipse.ui.texteditor.ITextEditor
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.util.eclipse.EditorUtils
import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration._
import java.util.concurrent.TimeoutException
import org.scalaide.core.IScalaPlugin
import org.scalaide.logging.HasLogger
import org.eclipse.ui.texteditor.ITextEditor
abstract class BaseHyperlinkDetector extends AbstractHyperlinkDetector with HasLogger {
val TIMEOUT = if (IScalaPlugin().noTimeoutMode) Duration.Inf else 500.millis
final override def detectHyperlinks(viewer: ITextViewer, currentSelection: IRegion, canShowMultipleHyperlinks: Boolean): Array[IHyperlink] = {
val textEditor = getAdapter(classOf[ITextEditor]).asInstanceOf[ITextEditor]
detectHyperlinks(textEditor, currentSelection, canShowMultipleHyperlinks)
}
/** The Eclipse platform calls this method on the UI thread, so we do our best, but return null
* after TIMEOUT milliseconds. 500 ms is enough to locate most hyperlinks, but it may timeout for
* the first request on a fresh project.
*
* That seems a better experience than freezing the editor for an undetermined amount of time.
*/
final def detectHyperlinks(textEditor: ITextEditor, currentSelection: IRegion, canShowMultipleHyperlinks: Boolean): Array[IHyperlink] = {
if (textEditor == null) null // can be null if generated through ScalaPreviewerFactory
else {
EditorUtils.getEditorCompilationUnit(textEditor) match {
case Some(scu) =>
val hyperlinks = Future { runDetectionStrategy(scu, textEditor, currentSelection) }
try {
Await.result(hyperlinks, TIMEOUT) match {
// I know you will be tempted to remove this, but don't do it, JDT expects null when no hyperlinks are found.
case Nil => null
case links =>
if (canShowMultipleHyperlinks) links.toArray
else Array(links.head)
}
} catch {
case _: TimeoutException =>
eclipseLog.info("Timeout while resolving hyperlink in " + scu.file + " at: " + currentSelection)
null
}
case None => null
}
}
}
protected def runDetectionStrategy(scu: InteractiveCompilationUnit, textEditor: ITextEditor, currentSelection: IRegion): List[IHyperlink]
}
|
scala-ide/scala-ide
|
org.scala-ide.sdt.core/src/org/scalaide/core/internal/hyperlink/BaseHyperlinkDetector.scala
|
Scala
|
bsd-3-clause
| 2,721
|
def a(using i: => Int ) = () // ok
def b(using => Int ) = () // ok
|
dotty-staging/dotty
|
tests/pos/i11997.scala
|
Scala
|
apache-2.0
| 71
|
package com.scalableQuality.quick.core.valueMapping
case object Trim extends ValueMapperFunction {
override def apply(value: Option[String]) = value.map(_.trim)
def apply(shouldInclude: Boolean): List[ValueMapperFunction] =
if (shouldInclude) List(Trim) else Nil
}
|
MouslihAbdelhakim/Quick
|
src/main/scala/com/scalableQuality/quick/core/valueMapping/Trim.scala
|
Scala
|
apache-2.0
| 275
|
package rere.ql.types
trait PrimaryKey {
type Reql <: ReqlDatum
type Scala
}
object PrimaryKey {
type Int = PrimaryKey {
type Reql = ReqlInteger
type Scala = scala.Int
}
type Long = PrimaryKey {
type Reql = ReqlInteger
type Scala = scala.Long
}
type String = PrimaryKey {
type Reql = ReqlString
type Scala = java.lang.String
}
type UUID = PrimaryKey {
type Reql = ReqlUUID
type Scala = java.util.UUID
}
type Time = PrimaryKey {
type Reql = ReqlTime
type Scala = java.time.ZonedDateTime
}
}
|
pbaun/rere
|
modules/ql/src/main/scala/rere/ql/types/PrimaryKey.scala
|
Scala
|
apache-2.0
| 559
|
package pl.writeonly.son2.vaadin.complexes
import com.vaadin.ui.TextArea
import pl.writeonly.son2.vaadin.util.UIUtil
class ComplexIO {
val input = inputTextArea
val output = ComplexIO.outputLabel
def inputTextArea: TextArea = ComplexIO.inputTextArea("Input json:")
}
object ComplexIO extends UIUtil {
}
|
writeonly/scalare
|
scalare-adin/src/main/scala/pl/writeonly/son2/vaadin/complexes/ComplexIO.scala
|
Scala
|
artistic-2.0
| 314
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.actions
import com.google.inject.ImplementedBy
import config.AppConfig
import javax.inject.{Inject, Singleton}
import play.api.mvc.Results._
import play.api.mvc._
import uk.gov.hmrc.auth.core._
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.play.http.HeaderCarrierConverter
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class MinimalAuthActionImpl @Inject()(
override val authConnector: AuthConnector,
val parser: BodyParsers.Default,
config: AppConfig
)(implicit val executionContext: ExecutionContext)
extends MinimalAuthAction with AuthorisedFunctions {
override def invokeBlock[A](request: Request[A], block: Request[A] => Future[Result]): Future[Result] = {
implicit val hc: HeaderCarrier =
HeaderCarrierConverter.fromRequestAndSession(request, request.session)
authorised(ConfidenceLevel.L50) {
block(request)
} recover {
case ex: NoActiveSession =>
Redirect(config.authSignIn, Map("continue" -> Seq(config.loginCallbackUrl), "origin" -> Seq("pbik-frontend")))
}
}
}
@ImplementedBy(classOf[MinimalAuthActionImpl])
trait MinimalAuthAction extends ActionBuilder[Request, AnyContent] with ActionFunction[Request, Request]
|
hmrc/pbik-frontend
|
app/controllers/actions/MinimalAuthAction.scala
|
Scala
|
apache-2.0
| 1,838
|
package org.openapitools.client.model
case class PipelineBranchesitemlatestRun (
_durationInMillis: Option[Integer],
_estimatedDurationInMillis: Option[Integer],
_enQueueTime: Option[String],
_endTime: Option[String],
_id: Option[String],
_organization: Option[String],
_pipeline: Option[String],
_result: Option[String],
_runSummary: Option[String],
_startTime: Option[String],
_state: Option[String],
_type: Option[String],
_commitId: Option[String],
_class: Option[String]
)
object PipelineBranchesitemlatestRun {
def toStringBody(var_durationInMillis: Object, var_estimatedDurationInMillis: Object, var_enQueueTime: Object, var_endTime: Object, var_id: Object, var_organization: Object, var_pipeline: Object, var_result: Object, var_runSummary: Object, var_startTime: Object, var_state: Object, var_type: Object, var_commitId: Object, var_class: Object) =
s"""
| {
| "durationInMillis":$var_durationInMillis,"estimatedDurationInMillis":$var_estimatedDurationInMillis,"enQueueTime":$var_enQueueTime,"endTime":$var_endTime,"id":$var_id,"organization":$var_organization,"pipeline":$var_pipeline,"result":$var_result,"runSummary":$var_runSummary,"startTime":$var_startTime,"state":$var_state,"type":$var_type,"commitId":$var_commitId,"class":$var_class
| }
""".stripMargin
}
|
cliffano/swaggy-jenkins
|
clients/scala-gatling/generated/src/gatling/scala/org/openapitools/client/model/PipelineBranchesitemlatestRun.scala
|
Scala
|
mit
| 1,378
|
/*
* Copyright 2001-2009 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
/**
* Trait implemented by exception types that can append a new clue to the end
* of the exception's detail message.
*
* <p>
* This trait facilitates the <code>withClue</code> construct provided by trait
* <code>Assertions</code>. This construct enables extra information (or "clues") to
* be included in the detail message of a thrown exception. Although both
* <code>assert</code> and <code>expect</code> provide a way for a clue to be
* included directly, <code>intercept</code> and ScalaTest matcher expressions
* do not. Here's an example of clues provided directly in <code>assert</code>:
* </p>
*
* <pre>
* assert(1 + 1 === 3, "this is a clue")
* </pre>
*
* <p>
* and in <code>expect</code>:
* </p>
*
* <pre>
* expect(3, "this is a clue") { 1 + 1 }
* </pre>
*
* <p>
* The exceptions thrown by the previous two statements will include the clue
* string, <code>"this is a clue"</code>, in the exceptions detail message.
* To get the same clue in the detail message of an exception thrown
* by a failed <code>intercept</code> call requires using <code>withClue</code>:
* </p>
*
* <pre>
* withClue("this is a clue") {
* intercept[IndexOutOfBoundsException] {
* "hi".charAt(-1)
* }
* }
* </pre>
*
* <p>
* Similarly, to get a clue in the exception resulting from an exception arising out
* of a ScalaTest matcher expression, you need to use <code>withClue</code>. Here's
* an example:
* </p>
*
* <pre>
* withClue("this is a clue") {
* 1 + 1 should be === 3
* }
* </pre>
*
* <p>
* Exception types that mix in this trait have an <code>appendClue</code> method, which
* returns an exception identical to itself, except with the result of invoking
* <code>toString</code> on the passed <code>clue</code> appended to the exception's
* detail message, separated by a newline character (or replacing the detail message
* if it was previously <code>null</code>).
* </p>
*/
private[scalatest] trait AppendClueMethod[T <: Throwable] { this: Throwable =>
/**
* Returns an instance of this exception's class, identical to this exception,
* except with the string resulting from invoking <code>toString</code> on
* the passed <code>clue</code> appended to its detail message, separated by
* a newline character. Or, if this exception's detail message is <code>null</code>,
* returns an instance of this exception's class that is identical to this exception,
* except with the detail message equal to the result of invoking <code>toString</code>
* on the passed <code>clue</code>.
*
* <p>
* Implementations of this method may either mutate this exception or return
* a new instance with the revised detail message. If the result of invoking
* <code>toString</code> on the passed <code>clue</code> is null or, after being
* trimmed, an empty string, this method returns this exception unchanged.
* </p>
*
* @param clue An object whose <code>toString</code> method returns a message to append
* to this exception's detail message.
*/
def appendClue(clue: Any): T
}
|
kevinwright/scalatest
|
src/main/scala/org/scalatest/AppendClueMethod.scala
|
Scala
|
apache-2.0
| 3,708
|
package sclib.io
import java.nio.file.{Path, Paths}
import sclib.ops.all._
import scala.util.Try
/**
* working with files and directories
*
* ===get a file / directory handle===
*
* to get a file handle, use any of the following functions:
* - [[fs.file(name:String)* file(name: String): Try[FSFile]]]
* - [[fs.file(parent:sclib\\.io\\.fs\\.FSDir,name:String)* file(parent: FSDir, name: String): Try[FSFile]]]
* - [[fs.file(path:java\\.nio\\.file\\.Path)* file(path: Path): Try[FSFile]]]
*
* for a directory handle:
* - [[fs.dir(name:String)* dir(name: String): Try[FSDir]]]
* - [[fs.dir(parent:sclib\\.io\\.fs\\.FSDir,name:String)* dir(parent: FSDir, name: String): Try[FSDir]]]
* - [[fs.dir(path:java\\.nio\\.file\\.Path)* dir(path: Path): Try[FSDir]]]
*
*
* ======example:======
* {{{
* scala> import sclib.io.fs._
* scala> file("/path/file-name")
* res0: scala.util.Try[FSFile] = Success(FSFile(/path/file-name))
* scala> dir("/path/dir-name")
* res1: scala.util.Try[FSDir] = Success(FSDir(/path/dir-name))
* }}}
*
*
* this functions returns a `Failure` if the requested entry exists, but has a wrong type.
* {{{
* scala> import sclib.io.fs._
* scala> file("a-file").flatMap(_.create)
* res0: scala.util.Try[FSFile] = Success(FSFile(a-file))
* scala> dir("a-file")
* res1: scala.util.Try[FSDir] = Failure(java.lang.Exception: 'a-file' is a file)
* scala> file("a-file").flatMap(_.delete)
* res2: scala.util.Try[Unit] = Success(())
* }}}
*
*
* ===work with a handle===
*
* all functions which can throw a exception are wrapped in a `Try`, so it's easy to compose.
* {{{
* scala> import sclib.io.fs._
* scala> for {
* | fh <- file("file-name")
* | _ <- fh.write("file content")
* | c <- fh.slurp
* | _ <- fh.delete
* | } yield c
* res0: scala.util.Try[String] = Success(file content)
* }}}
*
* @see [[sclib.io.fs.FSFile]]
* @see [[sclib.io.fs.FSDir]]
*/
package object fs {
/**
* initialize a file-handle from a given path
*
* if the given path exists, and is a directory, a failure are returned.
*
* ''this doesn't create the file - use [[FSEntry.create(n:*]] / [[FSFile.createTemp]] / any write method to create it''
*/
def file(path: Path): Try[FSFile] = {
val f = new FSFile(path)
if (f.exists && f.isDirectory) s"'${path}' is a directory".failure
else f.success
}
/**
* initialize a file-handle from a given path
*
* if the given path exists, and is a directory, a failure are returned.
*
* ''this doesn't create the file - use [[FSEntry.create(n:*]] / [[FSFile.createTemp]] / any write method to create it''
*/
def file(name: String): Try[FSFile] = file(Paths.get(name))
/**
* initialize a file-handle from a given path
*
* if the given path exists, and is a directory, a failure are returned.
*
* ''this doesn't create the file - use [[FSEntry.create(n:*]] / [[FSFile.createTemp]] / any write method to create it''
*/
def file(parent: FSDir, name: String): Try[FSFile] = file(Paths.get(parent.path.toString, name))
/**
* initialize a directory-handle from a given path
*
* if the given path exists, and is a file, a failure are returned.
*
* ''this doesn't create the directory - use [[FSEntry.create(n:*]] / [[FSDir.createTemp]] to create it''
*/
def dir(path: Path): Try[FSDir] = {
val d = new FSDir(path)
if (d.exists && d.isRegularFile) s"'${path}' is a file".failure
else d.success
}
/**
* initialize a directory-handle from a given path
*
* if the given path exists, and is a file, a failure are returned.
*
* ''this doesn't create the directory - use [[FSEntry.create(n:*]] / [[FSDir.createTemp]] to create it''
*/
def dir(name: String): Try[FSDir] = dir(Paths.get(name))
/**
* initialize a directory-handle from a given path
*
* if the given path exists, and is a file, a failure are returned.
*
* ''this doesn't create the directory - use [[FSEntry.create(n:*]] / [[FSDir.createTemp]] to create it''
*/
def dir(parent: FSDir, name: String): Try[FSDir] = dir(Paths.get(parent.path.toString, name))
/**
* Any FSEntry Implementation
*/
type FSEntryImpl = FSEntry[A] forSome { type A <: FSEntry[A] }
}
|
j-keck/sclib
|
src/main/scala/sclib/io/fs/package.scala
|
Scala
|
mit
| 4,400
|
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.ValidatableBox._
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600.v3.retriever.RepaymentsBoxRetriever
case class B940(value: Option[String]) extends CtBoxIdentifier("building society reference")
with CtOptionalString with Input with ValidatableBox[RepaymentsBoxRetriever] {
override def validate(boxRetriever: RepaymentsBoxRetriever): Set[CtValidation] = {
validateOptionalStringByLength("B940", this, 2, 18) ++
validateOptionalStringByRegex("B940", this, ValidNonForeignLessRestrictiveCharacters)
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600/v3/B940.scala
|
Scala
|
apache-2.0
| 1,191
|
import com.darrenxyli.krypton.libs.{BaderBetweennessCentrality}
import org.apache.spark.hyperx.HypergraphLoader
import org.apache.spark.{SparkConf, SparkContext}
import scala.compat.Platform.currentTime
object Krypton {
def main(args: Array[String]) {
val logFile = args{0}
val partitionNum = args{1}.toInt
val conf = new SparkConf().setAppName("Krypton")
val sc = new SparkContext(conf)
val g = HypergraphLoader.hyperedgeListFile(sc, logFile, " ", false, partitionNum)
val executionStart: Long = currentTime
BaderBetweennessCentrality.run(g).collect()
val total = currentTime - executionStart
println("[total " + total + "ms]")
Thread sleep 10000
sc.stop()
}
}
|
cube2matrix/Krypton
|
Krypton/src/main/scala/Krypton.scala
|
Scala
|
gpl-2.0
| 761
|
package frdomain.ch8
package cqrs
package memrepo
import org.joda.time.DateTime
import scalaz._
import Scalaz._
import scalaz.concurrent.Task
import Task._
import collection.concurrent.TrieMap
import common._
import spray.json._
import JSONProtocols._
trait Event[A] {
def at: DateTime
}
case class Opened(no: String, name: String, openingDate: Option[DateTime], at: DateTime = today) extends Event[Account]
case class Closed(no: String, closeDate: Option[DateTime], at: DateTime = today) extends Event[Account]
case class Debited(no: String, amount: Amount, at: DateTime = today) extends Event[Account]
case class Credited(no: String, amount: Amount, at: DateTime = today) extends Event[Account]
object Event {
val eventLog = TrieMap[String, List[Event[_]]]()
val eventLogJson = TrieMap[String, List[String]]()
def updateState(e: Event[_], initial: Map[String, Account]) = e match {
case o @ Opened(no, name, odate, _) =>
initial + (no -> Account(no, name, odate.get))
case c @ Closed(no, cdate, _) =>
initial + (no -> initial(no).copy(dateOfClosing = Some(cdate.getOrElse(today))))
case d @ Debited(no, amount, _) =>
val a = initial(no)
initial + (no -> a.copy(balance = Balance(a.balance.amount - amount)))
case r @ Credited(no, amount, _) =>
val a = initial(no)
initial + (no -> a.copy(balance = Balance(a.balance.amount + amount)))
}
def events(no: String): Error \\/ List[Event[_]] = {
val currentList = eventLog.getOrElse(no, Nil)
if (currentList.isEmpty) s"Account $no does not exist".left
else currentList.right
}
def snapshot(es: List[Event[_]]): String \\/ Map[String, Account] =
es.reverse.foldLeft(Map.empty[String, Account]) { (a, e) => updateState(e, a) }.right
def snapshotFromJson(es: List[String]): String \\/ Map[String, Account] =
es.reverse.foldLeft(Map.empty[String, Account]) { (a, e) => updateState(e.parseJson.convertTo[Event[_]], a) }.right
}
object Commands extends Commands {
import Event._
def closed(a: Account): Error \\/ Account =
if (a.dateOfClosing isDefined) s"Account ${a.no} is closed".left
else a.right
def beforeOpeningDate(a: Account, cd: Option[DateTime]): Error \\/ Account =
if (a.dateOfOpening isBefore cd.getOrElse(today))
s"Cannot close at a date earlier than opening date ${a.dateOfOpening}".left
else a.right
def sufficientFundsToDebit(a: Account, amount: Amount): Error \\/ Account =
if (a.balance.amount < amount) s"insufficient fund to debit $amount from ${a.no}".left
else a.right
def validateClose(no: String, cd: Option[DateTime]) = for {
l <- events(no)
s <- snapshot(l)
a <- closed(s(no))
_ <- beforeOpeningDate(a, cd)
} yield s
def validateDebit(no: String, amount: Amount) = for {
l <- events(no)
s <- snapshot(l)
a <- closed(s(no))
_ <- sufficientFundsToDebit(a, amount)
} yield s
def validateCredit(no: String) = for {
l <- events(no)
s <- snapshot(l)
_ <- closed(s(no))
} yield s
def validateOpen(no: String) =
eventLog.get(no)
.map { _ => s"Account with no = $no already exists".left }
.getOrElse(no.right)
def handleCommand[A](e: Event[A]): Task[A] = e match {
case o @ Opened(no, name, odate, _) => validateOpen(no).fold(
err => fail(new RuntimeException(err)),
_ => now {
val a = Account(no, name, odate.get)
eventLog += (no -> List(o))
eventLogJson += (no -> List(OpenedFormat.write(o).toString))
a
}
)
case c @ Closed(no, cdate, _) => validateClose(no, cdate).fold(
err => fail(new RuntimeException(err)),
currentState => now {
eventLog += (no -> (c :: eventLog.getOrElse(no, Nil)))
eventLogJson += (no -> (ClosedFormat.write(c).toString :: eventLogJson.getOrElse(no, Nil)))
updateState(c, currentState)(no)
}
)
case d @ Debited(no, amount, _) => validateDebit(no, amount).fold(
err => fail(new RuntimeException(err)),
currentState => now {
eventLog += (no -> (d :: eventLog.getOrElse(no, Nil)))
eventLogJson += (no -> (DebitedFormat.write(d).toString :: eventLogJson.getOrElse(no, Nil)))
updateState(d, currentState)(no)
}
)
case r @ Credited(no, amount, _) => validateCredit(no).fold(
err => fail(new RuntimeException(err)),
currentState => now {
eventLog += (no -> (r :: eventLog.getOrElse(no, Nil)))
eventLogJson += (no -> (CreditedFormat.write(r).toString :: eventLogJson.getOrElse(no, Nil)))
updateState(r, currentState)(no)
}
)
}
}
trait Commands {
import Event._
import scala.language.implicitConversions
type Command[A] = Free[Event, A]
private implicit def liftEvent[Next](event: Event[Next]): Command[Next] = Free.liftF(event)
def open(no: String, name: String, openingDate: Option[DateTime]): Command[Account] = Opened(no, name, openingDate, today)
def close(no: String, closeDate: Option[DateTime]): Command[Account] = Closed(no, closeDate, today)
def debit(no: String, amount: Amount): Command[Account] = Debited(no, amount, today)
def credit(no: String, amount: Amount): Command[Account] = Credited(no, amount, today)
}
object RepositoryBackedInterpreter {
import Commands._
val step: Event ~> Task = new (Event ~> Task) {
override def apply[A](action: Event[A]): Task[A] = handleCommand(action)
}
def apply[A](action: Command[A]): Task[A] = action.foldMap(step)
}
object Scripts extends Commands {
def transfer(from: String, to: String, amount: Amount): Command[Unit] = for {
_ <- debit(from, amount)
_ <- credit(to, amount)
} yield ()
val composite =
for {
a <- open("a-123", "debasish ghosh", Some(today))
_ <- credit(a.no, 10000)
_ <- credit(a.no, 30000)
d <- debit(a.no, 23000)
} yield d
val compositeFail =
for {
a <- open("a-124", "debasish ghosh", Some(today))
_ <- credit(a.no, 10000)
_ <- credit(a.no, 30000)
d <- debit(a.no, 50000)
} yield d
}
object Projections {
import Event._
def balance(no: String) = for {
l <- events(no)
s <- snapshot(l)
} yield s(no).balance
}
|
debasishg/frdomain
|
src/main/scala/frdomain/ch8/cqrs/memrepo/AccountService.scala
|
Scala
|
apache-2.0
| 6,260
|
package io.eels.component.hive
import org.apache.hadoop.hive.ql.io.orc.{OrcInputFormat, OrcOutputFormat, OrcSerde}
import org.apache.hadoop.hive.ql.io.parquet.{MapredParquetInputFormat, MapredParquetOutputFormat}
import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe
trait HiveFormat {
def serde: String
def inputFormat: String
def outputFormat: String
}
object HiveFormat {
object Text extends HiveFormat {
override def serde: String = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"
override def inputFormat: String = "org.apache.hadoop.mapred.TextInputFormat"
override def outputFormat: String = "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"
}
object Parquet extends HiveFormat {
override val serde: String = classOf[ParquetHiveSerDe].getCanonicalName
override val inputFormat: String = classOf[MapredParquetInputFormat].getCanonicalName
override val outputFormat: String = classOf[MapredParquetOutputFormat].getCanonicalName
}
object Avro extends HiveFormat {
override def serde: String = "org.apache.hadoop.hive.serde2.avro.AvroSerDe"
override def inputFormat: String = "org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat"
override def outputFormat: String = "org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat"
}
object Orc extends HiveFormat {
override val serde: String = classOf[OrcSerde].getCanonicalName
override val inputFormat: String = classOf[OrcInputFormat].getCanonicalName
override val outputFormat: String = classOf[OrcOutputFormat].getCanonicalName
}
def apply(format: String): HiveFormat = format match {
case "avro" => HiveFormat.Avro
case "orc" => HiveFormat.Orc
case "parquet" => HiveFormat.Parquet
case "text" => HiveFormat.Text
case _ => throw new UnsupportedOperationException(s"Unknown hive input format $format")
}
def fromInputFormat(inputFormat: String): HiveFormat = inputFormat match {
case "org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat" => Parquet
case "org.apache.hadoop.mapred.TextInputFormat" => Text
case "org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat" => Avro
case "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat" => Orc
case _ => throw new UnsupportedOperationException(s"Input format not known $inputFormat")
}
}
|
stheppi/eel
|
eel-hive/src/main/scala/io/eels/component/hive/HiveFormat.scala
|
Scala
|
apache-2.0
| 2,361
|
package actors.routing.minutes
import actors.routing.minutes.MinutesActorLike.{ManifestLookup, ManifestsUpdate}
import actors.persistent.QueueLikeActor.UpdatedMillis
import drt.shared.CrunchApi.MillisSinceEpoch
import drt.shared.dates.UtcDate
import passengersplits.parsing.VoyageManifestParser.{VoyageManifest, VoyageManifests}
import scala.concurrent.{ExecutionContextExecutor, Future}
case class MockManifestsLookup() {
implicit val ec: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
var paramsLookup: List[(UtcDate, Option[MillisSinceEpoch])] = List()
var paramsUpdate: List[(UtcDate, VoyageManifests)] = List()
def lookup(mockData: VoyageManifests = VoyageManifests.empty): ManifestLookup = {
val byDay: Map[UtcDate, VoyageManifests] = mockData.manifests
.groupBy { vm: VoyageManifest => vm.scheduleArrivalDateTime.map(_.toUtcDate) }
.collect { case (Some(date), vms) => date -> VoyageManifests(vms) }
(d: UtcDate, pit: Option[MillisSinceEpoch]) => {
paramsLookup = paramsLookup :+ ((d, pit))
Future(byDay(d))
}
}
def update: ManifestsUpdate = (date: UtcDate, manifests: VoyageManifests) => {
paramsUpdate = paramsUpdate :+ ((date, manifests))
Future(UpdatedMillis(manifests.manifests.map(_.scheduled.millisSinceEpoch)))
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/test/scala/actors/routing/minutes/MockManifestsLookup.scala
|
Scala
|
apache-2.0
| 1,318
|
object Test {
def main(args: Array[String]): Unit = {
J_2.main(args)
}
}
|
som-snytt/dotty
|
tests/run/t3452a/S_3.scala
|
Scala
|
apache-2.0
| 81
|
package Yosemite.framework
private[Yosemite] object DataType extends Enumeration {
type DataType = Value
val FAKE, INMEMORY, ONDISK = Value
}
private[Yosemite] case class DataIdentifier(
dataId: String,
coflowId: String)
private[Yosemite] class FlowDescription(
val id: String, // Expected to be unique within the coflow
val coflowId: String, // Must be a valid coflow
val dataType: DataType.DataType, // http://www.scala-lang.org/node/7661
val sizeInBytes: Long,
val maxReceivers: Int, // Upper-bound on the number of receivers (how long to keep it around?)
val originHost: String,
var originCommPort: Int)
extends Serializable {
val dataId = DataIdentifier(id, coflowId)
val user = System.getProperty("user.name", "<unknown>")
override def toString: String = "FlowDescription(" + id + ":" + dataType + ":" + coflowId +
" # " + sizeInBytes + " Bytes)"
def updateCommPort(commPort: Int) {
originCommPort = commPort
}
}
private[Yosemite] class FileDescription(
val id_ : String, // Expected to be unique within the coflow
val pathToFile: String,
val cId_ : String, // Must be a valid coflow
val dataType_ : DataType.DataType,
val offset: Long,
val size_ : Long,
val maxR_ : Int,
val originHost_ : String,
val originCommPort_ : Int)
extends FlowDescription(id_, cId_, dataType_, size_, maxR_, originHost_, originCommPort_) {
override def toString: String = "FileDescription(" + id + "[" + pathToFile + "]:" + dataType +
":" + coflowId + " # " + sizeInBytes + " Bytes)"
}
private[Yosemite] class ObjectDescription(
val id_ : String, // Expected to be unique within the coflow
val className: String,
val cId_ : String, // Must be a valid coflow
val dataType_ : DataType.DataType,
val serializedSize: Long,
val maxR_ : Int,
val originHost_ : String,
val origCommPort_ : Int)
extends FlowDescription(id_, cId_, dataType_, serializedSize, maxR_, originHost_, origCommPort_) {
override def toString: String = "ObjectDescription(" + id + "[" + className + "]:" + dataType +
":" + coflowId + " # " + sizeInBytes + " Bytes)"
}
|
zhanghan1990/Yosemite
|
core/src/main/scala/Yosemite/framework/DataDescription.scala
|
Scala
|
apache-2.0
| 3,079
|
package com.twitter.finagle.httpx
import com.twitter.finagle.httpx.netty.Bijections
import com.twitter.finagle.netty3.{ChannelBufferBuf, BufChannelBuffer}
import com.twitter.util.Base64StringEncoder
import com.twitter.io.Buf
import java.net.URL
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.http.multipart.{DefaultHttpDataFactory, HttpPostRequestEncoder, HttpDataFactory}
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpHeaders, HttpVersion, HttpMethod, DefaultHttpRequest}
import scala.annotation.implicitNotFound
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import Bijections._
/*
* HTML form element.
*/
sealed abstract class FormElement
/*
* HTML form simple input field.
*/
case class SimpleElement(name: String, content: String) extends FormElement
/*
* HTML form file input field.
*/
case class FileElement(name: String, content: Buf, contentType: Option[String] = None,
filename: Option[String] = None) extends FormElement
/**
* Provides a class for building [[org.jboss.netty.handler.codec.http.HttpRequest]]s.
* The main class to use is [[com.twitter.finagle.httpx.RequestBuilder]], as so
*
* {{{
* val getRequest = RequestBuilder()
* .setHeader(HttpHeaders.Names.USER_AGENT, "MyBot")
* .setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE)
* .url(new URL("http://www.example.com"))
* .buildGet()
* }}}
*
* The `RequestBuilder` requires the definition of `url`. In Scala,
* this is statically type checked, and in Java the lack of any of
* a url causes a runtime error.
*
* The `buildGet`, 'buildHead`, `buildPut`, and `buildPost` methods use an implicit argument
* to statically typecheck the builder (to ensure completeness, see above).
* The Java compiler cannot provide such implicit, so we provide separate
* functions in Java to accomplish this. Thus, the Java code for the
* above is
*
* {{{
* HttpRequest getRequest =
* RequestBuilder.safeBuildGet(
* RequestBuilder.create()
* .setHeader(HttpHeaders.Names.USER_AGENT, "MyBot")
* .setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE)
* .url(new URL("http://www.example.com")))
* }}}
*
* Overall RequestBuilder's pretty barebones. It does provide certain protocol level support
* for more involved requests. For example, it support easy creation of POST request to submit
* multipart web forms with `buildMultipartPost` and default form post with `buildFormPost`.
*/
/**
* Factory for [[com.twitter.finagle.httpx.RequestBuilder]] instances
*/
object RequestBuilder {
@implicitNotFound("Http RequestBuilder is not correctly configured: HasUrl (exp: Yes): ${HasUrl}, HasForm (exp: Nothing) ${HasForm}.")
private trait RequestEvidence[HasUrl, HasForm]
private object RequestEvidence {
implicit object FullyConfigured extends RequestEvidence[RequestConfig.Yes, Nothing]
}
@implicitNotFound("Http RequestBuilder is not correctly configured for form post: HasUrl (exp: Yes): ${HasUrl}, HasForm (exp: Yes): ${HasForm}.")
private trait PostRequestEvidence[HasUrl, HasForm]
private object PostRequestEvidence {
implicit object FullyConfigured extends PostRequestEvidence[RequestConfig.Yes, RequestConfig.Yes]
}
type Complete = RequestBuilder[RequestConfig.Yes, Nothing]
type CompleteForm = RequestBuilder[RequestConfig.Yes, RequestConfig.Yes]
def apply() = new RequestBuilder()
/**
* Used for Java access.
*/
def create() = apply()
/**
* Provides a typesafe `build` with content for Java.
*/
def safeBuild(builder: Complete, method: Method, content: Option[Buf]): Request =
builder.build(method, content)(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildGet` for Java.
*/
def safeBuildGet(builder: Complete): Request =
builder.buildGet()(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildHead` for Java.
*/
def safeBuildHead(builder: Complete): Request =
builder.buildHead()(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildDelete` for Java.
*/
def safeBuildDelete(builder: Complete): Request =
builder.buildDelete()(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildPut` for Java.
*/
def safeBuildPut(builder: Complete, content: Buf): Request =
builder.buildPut(content)(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildPut` for Java.
*/
@deprecated("Typo, use safeBuildPut instead", "5.3.7")
def safeBuidlPut(builder: Complete, content: Buf): Request =
safeBuildPut(builder, content)
/**
* Provides a typesafe `buildPost` for Java.
*/
def safeBuildPost(builder: Complete, content: Buf): Request =
builder.buildPost(content)(RequestEvidence.FullyConfigured)
/**
* Provides a typesafe `buildFormPost` for Java.
*/
def safeBuildFormPost(builder: CompleteForm, multipart: Boolean): Request =
builder.buildFormPost(multipart)(PostRequestEvidence.FullyConfigured)
}
object RequestConfig {
sealed abstract trait Yes
type FullySpecifiedConfig = RequestConfig[Yes, Nothing]
type FullySpecifiedConfigForm = RequestConfig[Yes, Yes]
}
private[httpx] final case class RequestConfig[HasUrl, HasForm](
url: Option[URL] = None,
headers: Map[String, Seq[String]] = Map.empty,
formElements: Seq[FormElement] = Nil,
version: Version = Version.Http11,
proxied: Boolean = false
)
class RequestBuilder[HasUrl, HasForm] private[httpx](
config: RequestConfig[HasUrl, HasForm]
) {
import RequestConfig._
type This = RequestBuilder[HasUrl, HasForm]
private[this] val SCHEME_WHITELIST = Seq("http","https")
private[httpx] def this() = this(RequestConfig())
/*
* Specify url as String
*/
def url(u: String): RequestBuilder[Yes, HasForm] = url(new java.net.URL(u))
/**
* Specify the url to request. Sets the HOST header and possibly
* the Authorization header using the authority portion of the URL.
*/
def url(u: URL): RequestBuilder[Yes, HasForm] = {
require(SCHEME_WHITELIST.contains(u.getProtocol), "url must be http(s)")
val uri = u.toURI
val host = uri.getHost.toLowerCase
val hostValue =
if (u.getPort == -1 || u.getDefaultPort == u.getPort)
host
else
"%s:%d".format(host, u.getPort)
val withHost = config.headers.updated(HttpHeaders.Names.HOST, Seq(hostValue))
val userInfo = uri.getUserInfo
val updated =
if (userInfo == null || userInfo.isEmpty)
withHost
else {
val auth = "Basic " + Base64StringEncoder.encode(userInfo.getBytes)
withHost.updated(HttpHeaders.Names.AUTHORIZATION, Seq(auth))
}
new RequestBuilder(config.copy(url = Some(u), headers = updated))
}
/*
* Add simple form name/value pairs. In this mode, this RequestBuilder will only
* be able to generate a multipart/form POST request.
*/
def addFormElement(kv: (String, String)*): RequestBuilder[HasUrl, Yes] = {
val elems = config.formElements
val updated = kv.foldLeft(elems) { case (es, (k, v)) => es :+ new SimpleElement(k, v) }
new RequestBuilder(config.copy(formElements = updated))
}
/*
* Add a FormElement to a request. In this mode, this RequestBuilder will only
* be able to generate a multipart/form POST request.
*/
def add(elem: FormElement): RequestBuilder[HasUrl, Yes] = {
val elems = config.formElements
val updated = elems ++ Seq(elem)
new RequestBuilder(config.copy(formElements = updated))
}
/*
* Add a group of FormElements to a request. In this mode, this RequestBuilder will only
* be able to generate a multipart/form POST request.
*/
def add(elems: Seq[FormElement]): RequestBuilder[HasUrl, Yes] = {
val first = this.add(elems.head)
elems.tail.foldLeft(first) { (b, elem) => b.add(elem) }
}
/**
* Declare the HTTP protocol version be HTTP/1.0
*/
def http10(): This =
new RequestBuilder(config.copy(version = Version.Http10))
/**
* Set a new header with the specified name and value.
*/
def setHeader(name: String, value: String): This = {
val updated = config.headers.updated(name, Seq(value))
new RequestBuilder(config.copy(headers = updated))
}
/**
* Set a new header with the specified name and values.
*/
def setHeader(name: String, values: Seq[String]): This = {
val updated = config.headers.updated(name, values)
new RequestBuilder(config.copy(headers = updated))
}
/**
* Set a new header with the specified name and values.
*
* Java convenience variant.
*/
def setHeader(name: String, values: java.lang.Iterable[String]): This = {
setHeader(name, values.toSeq)
}
/**
* Add a new header with the specified name and value.
*/
def addHeader(name: String, value: String): This = {
val values = config.headers.get(name).getOrElse(Seq())
val updated = config.headers.updated(
name, values ++ Seq(value))
new RequestBuilder(config.copy(headers = updated))
}
/**
* Add group of headers expressed as a Map
*/
def addHeaders(headers: Map[String, String]): This = {
headers.foldLeft(this) { case (b, (k, v)) => b.addHeader(k, v) }
}
/**
* Declare the request will be proxied. Results in using the
* absolute URI in the request line.
*/
def proxied(): This = proxied(None)
/**
* Declare the request will be proxied. Results in using the
* absolute URI in the request line and setting the Proxy-Authorization
* header using the provided {{ProxyCredentials}}.
*/
def proxied(credentials: ProxyCredentials): This = proxied(Some(credentials))
/**
* Declare the request will be proxied. Results in using the
* absolute URI in the request line and optionally setting the
* Proxy-Authorization header using the provided {{ProxyCredentials}}.
*/
def proxied(credentials: Option[ProxyCredentials]): This = {
val headers: Map[String,Seq[String]] = credentials map { creds =>
config.headers.updated(HttpHeaders.Names.PROXY_AUTHORIZATION, Seq(creds.basicAuthorization))
} getOrElse config.headers
new RequestBuilder(config.copy(headers = headers, proxied = true))
}
/**
* Construct an HTTP request with a specified method.
*/
def build(method: Method, content: Option[Buf])(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = {
content match {
case Some(content) => withContent(method, content)
case None => withoutContent(method)
}
}
/**
* Construct an HTTP GET request.
*/
def buildGet()(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = withoutContent(Method.Get)
/**
* Construct an HTTP HEAD request.
*/
def buildHead()(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = withoutContent(Method.Head)
/**
* Construct an HTTP DELETE request.
*/
def buildDelete()(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = withoutContent(Method.Delete)
/**
* Construct an HTTP POST request.
*/
def buildPost(content: Buf)(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = withContent(Method.Post, content)
/**
* Construct an HTTP PUT request.
*/
def buildPut(content: Buf)(
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.RequestEvidence[HasUrl, HasForm]
): Request = withContent(Method.Put, content)
/**
* Construct a form post request.
*/
def buildFormPost(multipart: Boolean = false) (
implicit HTTP_REQUEST_BUILDER_IS_NOT_FULLY_SPECIFIED: RequestBuilder.PostRequestEvidence[HasUrl, HasForm]
): Request = {
val dataFactory = new DefaultHttpDataFactory(false) // we don't use disk
val req = withoutContent(Method.Post)
val encoder = new HttpPostRequestEncoder(dataFactory, req, multipart)
config.formElements.foreach {
case FileElement(name, content, contentType, filename) =>
HttpPostRequestEncoderEx.addBodyFileUpload(encoder, dataFactory, req)(name, filename.getOrElse(""), BufChannelBuffer(content), contentType.getOrElse(null), false)
case SimpleElement(name, value) =>
encoder.addBodyAttribute(name, value)
}
val encodedReq = encoder.finalizeRequest()
if (encodedReq.isChunked) {
val encodings = encodedReq.headers.getAll(HttpHeaders.Names.TRANSFER_ENCODING)
encodings.remove(HttpHeaders.Values.CHUNKED)
if (encodings.isEmpty)
encodedReq.headers.remove(HttpHeaders.Names.TRANSFER_ENCODING)
else
encodedReq.headers.set(HttpHeaders.Names.TRANSFER_ENCODING, encodings)
val chunks = new ListBuffer[ChannelBuffer]
while (encoder.hasNextChunk) {
chunks += encoder.nextChunk().getContent()
}
encodedReq.setContent(ChannelBuffers.wrappedBuffer(chunks:_*))
}
from(encodedReq)
}
// absoluteURI if proxied, otherwise relativeURI
private[this] def resource(): String = {
val url = config.url.get
if (config.proxied) {
return url.toString
} else {
val builder = new StringBuilder()
val path = url.getPath
if (path == null || path.isEmpty)
builder.append("/")
else
builder.append(path)
val query = url.getQuery
if (query != null && !query.isEmpty)
builder.append("?%s".format(query))
builder.toString
}
}
private[httpx] def withoutContent(method: Method): Request =
Request(method, resource, config.version, config.headers)
private[httpx] def withContent(method: Method, content: Buf): Request = {
require(content != null)
val req = withoutContent(method)
req.content = content
req.headers.set(HttpHeaders.Names.CONTENT_LENGTH, content.length.toString)
req
}
}
/**
* Add a missing method to HttpPostRequestEncoder to allow specifying a ChannelBuffer directly as
* content of a file. This logic should eventually move to netty.
*/
private object HttpPostRequestEncoderEx {
//TODO: HttpPostBodyUtil not accessible from netty 3.5.0.Final jar
// This HttpPostBodyUtil simulates what we need.
object HttpPostBodyUtil {
val DEFAULT_TEXT_CONTENT_TYPE = "text/plain"
val DEFAULT_BINARY_CONTENT_TYPE = "application/octet-stream"
object TransferEncodingMechanism {
val BINARY = "binary"
val BIT7 = "7bit"
}
}
/*
* allow specifying post body as ChannelBuffer, the logic is adapted from netty code.
*/
def addBodyFileUpload(encoder: HttpPostRequestEncoder, factory: HttpDataFactory, request: HttpRequest)
(name: String, filename: String, content: ChannelBuffer, contentType: String, isText: Boolean) {
require(name != null)
require(filename != null)
require(content != null)
val scontentType =
if (contentType == null) {
if (isText) {
HttpPostBodyUtil.DEFAULT_TEXT_CONTENT_TYPE
} else {
HttpPostBodyUtil.DEFAULT_BINARY_CONTENT_TYPE
}
} else {
contentType
}
val contentTransferEncoding =
if (!isText) {
HttpPostBodyUtil.TransferEncodingMechanism.BINARY
} else {
HttpPostBodyUtil.TransferEncodingMechanism.BIT7
}
val fileUpload = factory.createFileUpload(request, name, filename, scontentType, contentTransferEncoding, null, content.readableBytes)
fileUpload.setContent(content)
encoder.addBodyHttpData(fileUpload)
}
}
|
yancl/finagle-6.22.0
|
finagle-httpx/src/main/scala/com/twitter/finagle/httpx/RequestBuilder.scala
|
Scala
|
apache-2.0
| 15,829
|
package controllers.priv
import com.google.inject.Inject
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import utils.helpers.Config
class SuppressedV5C @Inject()()(implicit clientSideSessionFactory: ClientSideSessionFactory,
config: Config)
extends controllers.SuppressedV5C with PrivateKeeperController {
protected override val sellAnotherVehicleTarget = routes.SuppressedV5C.sellAnotherVehicle()
protected override val finishTarget = routes.SuppressedV5C.finish()
protected override val lookupAnotherVehicle = Redirect(routes.VehicleLookup.present())
protected override val onFinish = Redirect(controllers.routes.BeforeYouStart.present())
}
|
dvla/vehicles-online
|
app/controllers/priv/SuppressedV5C.scala
|
Scala
|
mit
| 732
|
package dotty.tools.benchmarks.tuples
import org.openjdk.jmh.annotations._
@State(Scope.Thread)
class Zip {
@Param(Array("0"))
var size: Int = _
var tuple1: Tuple = _
var tuple2: Tuple = _
var array1: Array[Object] = _
var array2: Array[Object] = _
@Setup
def setup(): Unit = {
tuple1 = Tuple()
tuple2 = Tuple()
for (i <- 1 to size) {
tuple1 = "el" *: tuple1
tuple2 = "em" *: tuple2
}
array1 = Array.fill(size)("el")
array2 = Array.fill(size)("em")
}
@Benchmark
def tupleZip(): Tuple = {
runtime.Tuples.zip(tuple1, tuple2)
}
@Benchmark
def arrayZip(): Array[(Object, Object)] = {
array1.zip(array2)
}
}
|
lampepfl/dotty
|
bench-run/src/main/scala/dotty/tools/benchmarks/tuples/Zip.scala
|
Scala
|
apache-2.0
| 684
|
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.light.ScPrimaryConstructorWrapper
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.TypeParameterType
import org.jetbrains.plugins.scala.lang.psi.types.api.designator._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.ScMethodType
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInUserData, ModCount}
import scala.collection.mutable
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScPrimaryConstructor extends ScMember with ScMethodLike {
def hasMalformedSignature: Boolean = parameterList.clauses.exists {
_.parameters.dropRight(1).exists(_.isRepeatedParameter)
}
/**
* @return has access modifier
*/
def hasModifier: Boolean
def getClassNameText: String
def parameterList: ScParameters
def parameters : Seq[ScClassParameter] = parameterList.clauses.flatMap(_.unsafeClassParameters)
override def containingClass: ScTypeDefinition = getParent.asInstanceOf[ScTypeDefinition]
/**
* return only parameters, which are additionally members.
*/
def valueParameters: Seq[ScClassParameter] = parameters.filter((p: ScClassParameter) => p.isVal || p.isVar)
/**
* All classes must have one non-implicit parameter list. If this is not declared in in the code,
* it is assumed by the compiler.
*
* In addition, view and context bounds generate an additional implicit parameter section.
*/
@CachedInUserData(this, ModCount.getBlockModificationCount)
def effectiveParameterClauses: Seq[ScParameterClause] = {
def emptyParameterList: ScParameterClause =
ScalaPsiElementFactory.createEmptyClassParamClauseWithContext(parameterList)
val clausesWithInitialEmpty = parameterList.clauses match {
case Seq() => Seq(emptyParameterList)
case Seq(clause) if clause.isImplicit => Seq(emptyParameterList, clause)
case clauses => clauses
}
clausesWithInitialEmpty ++
ScalaPsiUtil.syntheticParamClause(containingClass, parameterList, isClassParameter = true)()
}
def effectiveFirstParameterSection: Seq[ScClassParameter] = effectiveParameterClauses.head.unsafeClassParameters
def methodType(result: Option[ScType]): ScType = {
val parameters: ScParameters = parameterList
val clauses = parameters.clauses
val returnType: ScType = result.getOrElse({
val clazz = getParent.asInstanceOf[ScTypeDefinition]
val typeParameters = clazz.typeParameters
val parentClazz = ScalaPsiUtil.getPlaceTd(clazz)
val designatorType: ScType =
if (parentClazz != null)
ScProjectionType(ScThisType(parentClazz), clazz)
else ScDesignatorType(clazz)
if (typeParameters.isEmpty) designatorType
else {
ScParameterizedType(designatorType, typeParameters.map(TypeParameterType(_)))
}
})
if (clauses.isEmpty) return ScMethodType(returnType, Seq.empty, false)
val res = clauses.foldRight[ScType](returnType){(clause: ScParameterClause, tp: ScType) =>
ScMethodType(tp, clause.getSmartParameters, clause.isImplicit)
}
res.asInstanceOf[ScMethodType]
}
def getParamByName(name: String, clausePosition: Int = -1): Option[ScParameter] = {
clausePosition match {
case -1 =>
for (param <- parameters if ScalaNamesUtil.equivalent(param.name, name)) return Some(param)
None
case i if i < 0 => None
case i if i >= effectiveParameterClauses.length => None
case i =>
val clause: ScParameterClause = effectiveParameterClauses.apply(i)
for (param <- clause.parameters if ScalaNamesUtil.equivalent(param.name, name)) return Some(param)
None
}
}
@Cached(ModCount.getBlockModificationCount, this)
def getFunctionWrappers: Seq[ScPrimaryConstructorWrapper] = {
val buffer = mutable.ArrayBuffer.empty[ScPrimaryConstructorWrapper]
buffer += new ScPrimaryConstructorWrapper(this)
for {
first <- parameterList.clauses.headOption
if first.hasRepeatedParam
if hasAnnotation("scala.annotation.varargs")
} {
buffer += new ScPrimaryConstructorWrapper(this, isJavaVarargs = true)
}
val params = parameters
for (i <- params.indices if params(i).baseDefaultParam) {
buffer += new ScPrimaryConstructorWrapper(this, forDefault = Some(i + 1))
}
buffer
}
}
object ScPrimaryConstructor {
object ofClass {
def unapply(pc: ScPrimaryConstructor): Option[ScClass] = {
pc.containingClass match {
case c: ScClass => Some(c)
case _ => None
}
}
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/base/ScPrimaryConstructor.scala
|
Scala
|
apache-2.0
| 4,990
|
package sampler.abc.actor.children
import org.scalatest.FreeSpecLike
import akka.testkit.TestKit
import akka.actor.ActorSystem
class BroadcastActorTest extends TestKit(ActorSystem("ABC-test")) with FreeSpecLike {
"TODO" in fail("TODO")
}
|
tearne/Sampler
|
sampler-abc/src/test/scala/sampler/abc/actor/children/BroadcastActorTest.scala
|
Scala
|
apache-2.0
| 241
|
package thangiee.riotapi.currentgame
case class Participant(
bot: Boolean = false,
championId: Long = 0,
masteries: List[Mastery] = Nil,
profileIconId: Long = 0,
runes: List[Rune] = Nil,
spell1Id: Long = 0,
spell2Id: Long = 0,
summonerId: Long = 0 ,
summonerName: String = "",
teamId: Long = 0
)
|
Thangiee/Riot-API-Scala
|
src/main/scala/thangiee/riotapi/currentgame/Participant.scala
|
Scala
|
mit
| 319
|
package akashic.storage.admin
object Error {
trait t
case class Exception(e: t) extends RuntimeException
case class NotFound() extends t
def interpret(e: t): (Int, String) = e match {
case NotFound() => (404, "account not found")
case _ => (500, "unknown error")
}
def failWith(e: t) = throw Exception(e)
}
|
akiradeveloper/akashic-storage
|
src/main/scala/akashic/storage/admin/Error.scala
|
Scala
|
apache-2.0
| 328
|
package org.danielnixon.progressive
import org.danielnixon.progressive.facades.virtualdom._
import shared.Wart
import scala.scalajs.js
@SuppressWarnings(Array(Wart.AsInstanceOf))
object Global {
val virtualDom: VirtualDom = js.Dynamic.global.virtualDom.asInstanceOf[VirtualDom]
val vdomParser: VDomParser = js.Dynamic.global.vdomParser.asInstanceOf[VDomParser]
}
|
danielnixon/progressive
|
client/src/main/scala/org/danielnixon/progressive/Global.scala
|
Scala
|
gpl-3.0
| 370
|
package actors.queues
import actors.DateRange
import drt.shared.dates.{DateLike, LocalDate, UtcDate}
import org.specs2.mutable.Specification
import services.SDate
import services.graphstages.Crunch
class DateRangeSpec extends Specification {
"Concerning BST dates" >> {
"Given a start date of 2020-05-01T00:00+1 (2020-04-30T23:00) and an end date the same" >> {
"When I ask for the UTC date range" >> {
"I should get 2020-04-30" >> {
val date = SDate("2020-05-01T00:00:00+01:00")
val range: Seq[DateLike] = DateRange.utcDateRange(date, date)
range === Seq(UtcDate(2020, 4, 30))
}
}
}
"Given a start date of 2020-05-01T00:00+1 (2020-04-30T23:00) and an end date the same" >> {
"When I ask for the Local date range" >> {
"I should get 2020-05-01" >> {
val date = SDate("2020-05-01T00:00:00+01:00")
val range: Seq[DateLike] = DateRange.localDateRange(date, date)
range === Seq(LocalDate(2020, 5, 1))
}
}
}
"Given a start and end date that span two UTC dates but one BST date" >> {
"When I ask for a UTC Date range" >> {
"I should get back both UTC Dates in the range" >> {
val date1 = SDate("2020-04-02T00:00", Crunch.europeLondonTimeZone)
val date2 = SDate("2020-04-02T02:00", Crunch.europeLondonTimeZone)
val range: Seq[DateLike] = DateRange.utcDateRange(date1, date2)
range === Seq(UtcDate(2020, 4, 1), UtcDate(2020, 4, 2))
}
}
}
}
"Concerning UTC dates" >> {
"Given a start date of 2020-01-01T00:00 and an end date the same" >> {
"When I ask for the local date range" >> {
"I should get just 2020-01-01" >> {
val date = SDate("2020-01-01T00:00:00")
val range: Seq[DateLike] = DateRange.utcDateRange(date, date)
range === Seq(UtcDate(2020, 1, 1))
}
}
}
"Given a start date of 2020-05-01T00:00 and an end date the same" >> {
"When I ask for the local date range" >> {
"I should get just 2020-01-01" >> {
val date = SDate("2020-01-01T00:00")
val range: Seq[DateLike] = DateRange.localDateRange(date, date)
range === Seq(LocalDate(2020, 1, 1))
}
}
}
}
"Given a date range that spans two dates but less than 24 hours" >> {
"When I ask for the local date range" >> {
"I should get 2 dates back in the range" >> {
val date1 = SDate("2020-01-01T12:00")
val date2 = SDate("2020-01-02T10:00")
val range: Seq[DateLike] = DateRange.localDateRange(date1, date2)
range === Seq(LocalDate(2020, 1, 1), LocalDate(2020, 1, 2))
}
}
}
}
|
UKHomeOffice/drt-scalajs-spa-exploration
|
server/src/test/scala/actors/queues/DateRangeSpec.scala
|
Scala
|
apache-2.0
| 2,737
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.api.common.typeinfo.BasicTypeInfo.{DOUBLE_TYPE_INFO, INT_TYPE_INFO, STRING_TYPE_INFO}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.internal.TableEnvironmentImpl
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}
import org.apache.flink.table.planner.plan.`trait`.{MiniBatchInterval, MiniBatchMode}
import org.apache.flink.table.planner.runtime.utils.BatchTableEnvUtil
import org.apache.flink.table.planner.runtime.utils.BatchTestBase.row
import org.apache.flink.table.planner.utils.TableTestUtil
import org.apache.calcite.sql.SqlExplainLevel
import org.junit.Assert.assertEquals
import org.junit.{Before, Test}
import scala.collection.Seq
class FlinkRelOptUtilTest {
var tableEnv: TableEnvironment = _
@Before
def before(): Unit = {
val settings = EnvironmentSettings.newInstance().useBlinkPlanner().build()
val tEnv = TableEnvironmentImpl.create(settings)
BatchTableEnvUtil.registerCollection(
tEnv,
"MyTable",
Seq(row("Mike", 1, 12.3, "Smith")),
new RowTypeInfo(STRING_TYPE_INFO, INT_TYPE_INFO, DOUBLE_TYPE_INFO, STRING_TYPE_INFO),
"first, id, score, last")
tableEnv = tEnv
}
@Test
def testToString(): Unit = {
val env = StreamExecutionEnvironment.createLocalEnvironment()
val tableEnv = StreamTableEnvironment.create(env, TableTestUtil.STREAM_SETTING)
val table = env.fromElements[(Int, Long, String)]().toTable(tableEnv, 'a, 'b, 'c)
tableEnv.registerTable("MyTable", table)
val sqlQuery =
"""
|WITH t1 AS (SELECT a, c FROM MyTable WHERE b > 50),
| t2 AS (SELECT a * 2 AS a, c FROM MyTable WHERE b < 50)
|
|SELECT * FROM t1 JOIN t2 ON t1.a = t2.a
""".stripMargin
val result = tableEnv.sqlQuery(sqlQuery)
val rel = TableTestUtil.toRelNode(result)
val expected1 =
"""
|LogicalProject(a=[$0], c=[$1], a0=[$2], c0=[$3])
|+- LogicalJoin(condition=[=($0, $2)], joinType=[inner])
| :- LogicalProject(a=[$0], c=[$2])
| : +- LogicalFilter(condition=[>($1, 50)])
| : +- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
| +- LogicalProject(a=[*($0, 2)], c=[$2])
| +- LogicalFilter(condition=[<($1, 50)])
| +- LogicalTableScan(table=[[default_catalog, default_database, MyTable]])
""".stripMargin
assertEquals(expected1.trim, FlinkRelOptUtil.toString(rel).trim)
val expected2 =
"""
|LogicalProject
|+- LogicalJoin
| :- LogicalProject
| : +- LogicalFilter
| : +- LogicalTableScan
| +- LogicalProject
| +- LogicalFilter
| +- LogicalTableScan
""".stripMargin
assertEquals(expected2.trim, FlinkRelOptUtil.toString(rel, SqlExplainLevel.NO_ATTRIBUTES).trim)
}
@Test
def testGetDigestWithDynamicFunction(): Unit = {
val table = tableEnv.sqlQuery(
"""
|(SELECT id AS random FROM MyTable ORDER BY rand() LIMIT 1)
|INTERSECT
|(SELECT id AS random FROM MyTable ORDER BY rand() LIMIT 1)
|INTERSECT
|(SELECT id AS random FROM MyTable ORDER BY rand() LIMIT 1)
""".stripMargin)
val rel = TableTestUtil.toRelNode(table)
val expected = TableTestUtil.readFromResource("/digest/testGetDigestWithDynamicFunction.out")
assertEquals(expected, FlinkRelOptUtil.getDigest(rel))
}
@Test
def testGetDigestWithDynamicFunctionView(): Unit = {
val view = tableEnv.sqlQuery("SELECT id AS random FROM MyTable ORDER BY rand() LIMIT 1")
tableEnv.registerTable("MyView", view)
val table = tableEnv.sqlQuery(
"""
|(SELECT * FROM MyView)
|INTERSECT
|(SELECT * FROM MyView)
|INTERSECT
|(SELECT * FROM MyView)
""".stripMargin)
val rel = TableTestUtil.toRelNode(table).accept(new ExpandTableScanShuttle())
val expected = TableTestUtil.readFromResource(
"/digest/testGetDigestWithDynamicFunctionView.out")
assertEquals(expected, FlinkRelOptUtil.getDigest(rel))
}
@Test
def testMergeRowTimeAndNone(): Unit = {
val none = MiniBatchInterval.NONE
val rowtime = MiniBatchInterval(1000L, MiniBatchMode.RowTime)
val mergedResult = FlinkRelOptUtil.mergeMiniBatchInterval(none, rowtime)
assertEquals(rowtime, mergedResult)
}
@Test
def testMergeProcTimeAndNone(): Unit = {
val none = MiniBatchInterval.NONE
val proctime = MiniBatchInterval(1000L, MiniBatchMode.ProcTime)
val mergedResult = FlinkRelOptUtil.mergeMiniBatchInterval(none, proctime)
assertEquals(proctime, mergedResult)
}
@Test
def testMergeRowTimeTAndProcTime1(): Unit = {
val rowtime = MiniBatchInterval(4000L, MiniBatchMode.RowTime)
val proctime = MiniBatchInterval(1000L, MiniBatchMode.ProcTime)
val mergedResult = FlinkRelOptUtil.mergeMiniBatchInterval(rowtime, proctime)
assertEquals(rowtime, mergedResult)
}
@Test
def testMergeRowTimeTAndProcTime2(): Unit = {
val rowtime = MiniBatchInterval(0L, MiniBatchMode.RowTime)
val proctime = MiniBatchInterval(1000L, MiniBatchMode.ProcTime)
val mergedResult = FlinkRelOptUtil.mergeMiniBatchInterval(rowtime, proctime)
assertEquals(MiniBatchInterval(1000L, MiniBatchMode.RowTime), mergedResult)
}
@Test
def testMergeRowTimeAndRowtime(): Unit = {
val rowtime1 = MiniBatchInterval(3000L, MiniBatchMode.RowTime)
val rowtime2 = MiniBatchInterval(5000L, MiniBatchMode.RowTime)
val mergedResult = FlinkRelOptUtil.mergeMiniBatchInterval(rowtime1, rowtime2)
assertEquals(MiniBatchInterval(1000L, MiniBatchMode.RowTime), mergedResult)
}
@Test
def testMergeWithNoneMiniBatch(): Unit = {
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(
MiniBatchInterval.NO_MINIBATCH, MiniBatchInterval.NONE))
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(
MiniBatchInterval.NONE, MiniBatchInterval.NO_MINIBATCH))
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(
MiniBatchInterval.NO_MINIBATCH, MiniBatchInterval.NO_MINIBATCH))
val rowtime = MiniBatchInterval(3000L, MiniBatchMode.RowTime)
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(MiniBatchInterval.NO_MINIBATCH, rowtime))
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(rowtime, MiniBatchInterval.NO_MINIBATCH))
val proctime = MiniBatchInterval(1000L, MiniBatchMode.ProcTime)
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(MiniBatchInterval.NO_MINIBATCH, proctime))
assertEquals(MiniBatchInterval.NO_MINIBATCH,
FlinkRelOptUtil.mergeMiniBatchInterval(proctime, MiniBatchInterval.NO_MINIBATCH))
}
}
|
gyfora/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/utils/FlinkRelOptUtilTest.scala
|
Scala
|
apache-2.0
| 7,998
|
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api.crypto
import javax.inject.Inject
/**
* Specifies encoding/decoding of authenticator data.
*/
trait AuthenticatorEncoder {
/**
* Encodes a string.
*
* @param data The data to encode.
* @return The encoded data.
*/
def encode(data: String): String
/**
* Decodes a string.
*
* @param data The data to decode.
* @return The decoded data.
*/
def decode(data: String): String
}
/**
* Authenticator encoder implementation based on Base64.
*/
class Base64AuthenticatorEncoder extends AuthenticatorEncoder {
override def encode(data: String): String = Base64.encode(data)
override def decode(data: String): String = Base64.decode(data)
}
/**
* Authenticator encoder implementation based on the [[Crypter]].
*
* @param crypter The crypter instance to use for the encoder.
*/
class CrypterAuthenticatorEncoder @Inject() (crypter: Crypter) extends AuthenticatorEncoder {
override def encode(data: String): String = crypter.encrypt(data)
override def decode(data: String): String = crypter.decrypt(data)
}
|
mohiva/play-silhouette
|
silhouette/app/com/mohiva/play/silhouette/api/crypto/AuthenticatorEncoder.scala
|
Scala
|
apache-2.0
| 1,725
|
package akka.dispatch.verification
import akka.actor.{Cell, ActorRef, ActorSystem, Props}
import akka.dispatch.Envelope
import scala.collection.mutable.ListBuffer
import scala.collection.generic.Growable
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.SynchronizedQueue
import scala.collection.mutable.Queue
import scala.transient
// Internal api
case class UniqueMsgSend(m: MsgSend, id: Int) extends Event
case class UniqueMsgEvent(m: MsgEvent, id: Int) extends Event
case class UniqueTimerDelivery(t: TimerDelivery, id: Int) extends Event
case class EventTrace(val events: SynchronizedQueue[Event], var original_externals: Seq[ExternalEvent]) extends Growable[Event] with Iterable[Event] {
def this() = this(new SynchronizedQueue[Event], null)
def this(original_externals: Seq[ExternalEvent]) = this(new SynchronizedQueue[Event], original_externals)
@transient
var lastNonMetaEvent: Event = null
override def hashCode = this.events.hashCode
override def equals(other: Any) : Boolean = other match {
case that: EventTrace => this.events == that.events
case _ => false
}
// Optional: if you have the original external events, that helps us with
// filtering.
def setOriginalExternalEvents(_original_externals: Seq[ExternalEvent]) = {
//println("Setting originalExternalEvents: " + _original_externals.size)
original_externals = _original_externals
}
def copy() : EventTrace = {
assume(original_externals != null)
assume(!events.isEmpty)
assume(!original_externals.isEmpty)
// I can't figure out the type signature for SynchronizedQueue's ++ operator, so we
// do it in two separate steps here
val copy = new SynchronizedQueue[Event]
copy ++= events
return new EventTrace(copy,
new Queue[ExternalEvent] ++ original_externals)
}
// The difference between EventTrace.events and EventTrace.getEvents is that
// we hide UniqueMsgSend/Events here
// TODO(cs): this is a dangerous API; easy to mix this up with .events...
def getEvents() : Seq[Event] = {
return getEvents(events)
}
// Return any MsgSend events that were never delivered, i.e. they were
// sitting in the buffer at the end of the execution.
def getPendingMsgSends(): Set[MsgSend] = {
val deliveredIds = events.flatMap {
case UniqueMsgEvent(m, id) => Some(id)
case _ => None
}.toSet
return events.flatMap {
case UniqueMsgSend(m, id) if !(deliveredIds contains id) =>
Some(m)
case _ => None
}.toSet
}
def length = events.length
private[this] def getEvents(_events: Seq[Event]): Seq[Event] = {
return _events.map(e =>
e match {
case UniqueMsgSend(m, id) => m
case UniqueMsgEvent(m, id) => m
case UniqueTimerDelivery(t: TimerDelivery, id) => t
case i: Event => i
}
)
}
// This method should not be used to append MsgSends or MsgEvents
override def +=(event: Event) : EventTrace.this.type = {
events += event
if (!(MetaEvents.isMetaEvent(event))) {
lastNonMetaEvent = event
}
return this
}
def appendMsgSend(snd: String, rcv: String, msg: Any, id: Int) = {
val m = UniqueMsgSend(MsgSend(snd, rcv, msg), id)
this.+=(m)
}
def appendMsgEvent(pair: (Cell, Envelope), id: Int) = {
val cell = pair._1
val envelope = pair._2
val snd = envelope.sender.path.name
val rcv = cell.self.path.name
val msg = envelope.message
val event = UniqueMsgEvent(MsgEvent(snd, rcv, msg), id)
lastNonMetaEvent = event
this.+=(event)
}
override def clear() : Unit = {
events.clear
}
override def iterator() : Iterator[Event] = {
return getEvents().iterator
}
// Take the result of ProvenanceTracker.pruneConcurrentEvents, and use that
// to filter out any MsgEvents in our events that were pruned.
// TODO(cs): have ProvenanceTracker act directly on us rather than on a
// separate Queue[Unique].
def intersection(uniqs: Queue[Unique], fingerprintFactory: FingerprintFactory) : EventTrace = {
val msgEvents = new Queue[MsgEvent] ++ uniqs flatMap {
case Unique(m: MsgEvent, id) =>
// Filter out the root event
if (id == 0) {
None
} else {
Some(m)
}
case u => throw new IllegalArgumentException("Non MsgEvent:" + u)
}
// first pass: remove any UniqueMsgEvents that don't show up in msgEvents
// track which UniqueMsgEvent ids were pruned
val pruned = new HashSet[Int]
var filtered = events flatMap {
case u @ UniqueMsgEvent(m, id) =>
msgEvents.headOption match {
case Some(msgEvent) =>
val fingerprinted = MsgEvent(m.sender, m.receiver,
fingerprintFactory.fingerprint(m.msg))
if (fingerprinted == msgEvent) {
msgEvents.dequeue
Some(u)
} else {
// u was filtered
pruned += id
None
}
case None =>
// u was filtered
pruned += id
None
}
case t: TimerDelivery =>
throw new UnsupportedOperationException("TimerDelivery not yet supported")
case t: UniqueTimerDelivery =>
throw new UnsupportedOperationException("UniqueTimerDelivery not yet supported")
case m: MsgEvent =>
throw new IllegalStateException("Should be UniqueMsgEvent")
case e => Some(e)
}
// Should always be a strict subsequence
assert(msgEvents.isEmpty)
// second pass: remove any UniqueMsgSends that correspond to
// UniqueMsgEvents that were pruned in the first pass
// TODO(cs): not sure this is actually necessary.
filtered = filtered flatMap {
case u @ UniqueMsgSend(m, id) =>
if (pruned contains id) {
None
} else {
Some(u)
}
case e => Some(e)
}
val filteredQueue = new SynchronizedQueue[Event]
filteredQueue ++= filtered
return new EventTrace(filteredQueue, original_externals)
}
// Ensure that all failure detector messages are pruned from the original trace,
// since we are now in a divergent execution and the failure detector may
// need to respond differently.
def filterFailureDetectorMessages() : EventTrace = {
def fromFD(snd: String, msg: Any) : Boolean = {
if (snd != "deadLetters") {
return false
}
return MessageTypes.fromFailureDetector(msg)
}
val filtered = events.filterNot(e => e match {
case UniqueMsgEvent(m, _) =>
fromFD(m.sender, m.msg) || m.receiver == FailureDetector.fdName
case UniqueMsgSend(m, _) =>
fromFD(m.sender, m.msg) || m.receiver == FailureDetector.fdName
case _ => false
}
)
val filteredQueue = new SynchronizedQueue[Event]
filteredQueue ++= filtered
return new EventTrace(filteredQueue, original_externals)
}
def filterCheckpointMessages(): EventTrace = {
val filtered = events flatMap {
case UniqueMsgEvent(MsgEvent(_, _, CheckpointRequest), _) => None
case UniqueMsgEvent(MsgEvent(_, _, CheckpointReply(_)), _) => None
case UniqueMsgSend(MsgSend(_, _, CheckpointRequest), _) => None
case UniqueMsgSend(MsgSend(_, _, CheckpointReply(_)), _) => None
case e => Some(e)
}
val filteredQueue = new SynchronizedQueue[Event]
filteredQueue ++= filtered
return new EventTrace(filteredQueue, original_externals)
}
// Pre: externals corresponds exactly to our external MsgSend
// events, i.e. subsequenceIntersection(externals) was used to create this
// EventTrace.
// Pre: no checkpoint messages in events
def recomputeExternalMsgSends(externals: Seq[ExternalEvent]): Seq[Event] = {
if (externals == null) {
throw new IllegalStateException("original_externals must not be null")
}
val sends = externals flatMap {
case s: Send => Some(s)
case _ => None
}
if (sends.isEmpty) {
return getEvents
}
val sendsQueue = Queue(sends: _*)
// ---- Check an assertion: ----
val sendsSet = new HashSet[UniqueMsgSend]
events.foreach {
case u @ UniqueMsgSend(MsgSend(snd, receiver, msg), id) =>
if (sendsSet contains u) {
throw new AssertionError("Duplicate UniqueMsgSend " + u + " " + events.mkString("\\n"))
}
sendsSet += u
case _ =>
}
// ----------------------
return getEvents(events map {
case u @ UniqueMsgSend(MsgSend(snd, receiver, msg), id) =>
if (EventTypes.isExternal(u)) {
if (sendsQueue.isEmpty) {
// XXX
// Problem seems to be some of the Send events that were actually
// sent, don't appear in externals. Truncated somehow?
println("events:---")
events.foreach { case e => println(e) }
println("---")
println("externals:---")
externals.foreach { case e => println(e) }
println("---")
throw new IllegalStateException("sendsQueue is empty, yet " + u)
}
val send = sendsQueue.dequeue
val new_msg = send.messageCtor()
UniqueMsgSend(MsgSend(snd, receiver, new_msg), id)
} else {
u
}
case m: MsgSend =>
throw new IllegalArgumentException("Must be UniqueMsgSend")
case e => e
})
}
// Filter all external events in original_trace that aren't in subseq.
// As an optimization, also filter out some internal events that we know a priori
// aren't going to occur in the subsequence execution.
def subsequenceIntersection(subseq: Seq[ExternalEvent],
filterKnownAbsents:Boolean=true) : EventTrace = {
// Walk through all events in original_trace. As we walk through, check if
// the current event corresponds to an external event at the head of subseq. If it does, we
// include that in result, and pop off the head of subseq. Otherwise that external event has been
// pruned and should not be included. All internal events are included in
// result.
// N.B. we deal with external messages separately after this for loop,
// since they're a bit trickier.
var remaining = ListBuffer[ExternalEvent]() ++ subseq.filter {
case Send(_,_) => false
case _ => true
}
var result = new Queue[Event]()
// N.B. it'd be nicer to use a filter() here, but it isn't guarenteed to
// iterate left to right.
for (event <- events) {
if (remaining.isEmpty) {
if (EventTypes.isMessageType(event)) {
result += event
} else if (!EventTypes.isExternal(event) &&
!event.isInstanceOf[ChangeContext]) {
result += event
}
} else {
event match {
case KillEvent(actor1) =>
remaining(0) match {
case Kill(actor2) => {
if (actor1 == actor2) {
result += event
remaining = remaining.tail
}
}
case _ => None
}
case PartitionEvent((a1,b1)) =>
remaining(0) match {
case Partition(a2,b2) => {
if (a1 == a2 && b1 == b2) {
result += event
remaining = remaining.tail
}
}
case _ => None
}
case UnPartitionEvent((a1,b1)) =>
remaining(0) match {
case UnPartition(a2,b2) => {
if (a1 == a2 && b1 == b2) {
result += event
remaining = remaining.tail
}
}
case _ => None
}
case SpawnEvent(_,_,name1,_) =>
remaining(0) match {
case Start(_, name2) => {
if (name1 == name2) {
result += event
remaining = remaining.tail
}
}
case _ => None
}
case c @ CodeBlock(_) =>
if (remaining(0) == c) {
result += event
remaining = remaining.tail
}
case h @ HardKill(_) =>
if (remaining(0) == h) {
result += event
remaining = remaining.tail
}
// We don't currently use ContextSwitches, so prune them to remove
// clutter.
case ChangeContext(_) => None
// Always include all other internal events
case i: Event => result += i
} // close match
} // close else
} // close for
val filtered = filterSends(result, subseq, filterKnownAbsents=filterKnownAbsents)
val filteredQueue = new SynchronizedQueue[Event]
filteredQueue ++= filtered
return new EventTrace(filteredQueue, original_externals)
}
private[this] def filterSends(events: Queue[Event],
subseq: Seq[ExternalEvent],
filterKnownAbsents:Boolean=true) : Queue[Event] = {
// We assume that Send messages are sent in FIFO order, i.e. so that the
// the zero-th MsgSend event from deadLetters corresponds to the zero-th Send event.
// Our first task is to infer which Send events have been pruned.
// We need original_externals to disambiguate which events have actually
// been pruned, since it's possible that two MsgEvents or MsgSends at
// different points of the trace are identical in terms of .equals.
if (original_externals == null) {
throw new IllegalArgumentException("Must invoke setOriginalExternalEvents")
}
// We assume that no one has copied the external events, i.e. the id's are
// all the same.
val original_sends = original_externals flatMap {
case s: Send => Some(s)
case _ => None
}
val subseq_sends_lst = subseq flatMap {
case s: Send => Some(s)
case _ => None
}
val subseq_sends = subseq_sends_lst.toSet
// Gather all indexes of original_sends that are not in subseq_sends
val missing_indices = new HashSet[Int] ++
original_sends.zipWithIndex.foldRight(List[Int]())((tuple, lst) =>
if (subseq_sends contains tuple._1) {
lst
} else {
tuple._2 :: lst
}
)
// Now filter out all external MsgSend/MsgEvents whose index is part of missing_indices
var msg_send_idx = -1
val pruned_msg_ids = new HashSet[Int]
// N.B. it'd be nicer to use a filter() here
var remaining = new Queue[Event]()
for (e <- events) {
e match {
case m @ UniqueMsgSend(msgEvent, id) =>
if (EventTypes.isExternal(m)) {
msg_send_idx += 1
if (!(missing_indices contains msg_send_idx)) {
remaining += e
} else {
// We prune this event, and we need to later prune its corresponding
// MsgEvent, if such an event exists.
pruned_msg_ids += id
}
} else {
remaining += e
}
case UniqueMsgEvent(m, id) =>
if (!pruned_msg_ids.contains(id)) {
remaining += e
}
case _ => remaining += e
} // end match
} // end for
if (filterKnownAbsents) {
return filterKnownAbsentInternals(remaining, subseq)
}
return remaining
}
// Remove internal events that we know a priori aren't going to occur for
// this subsequence. In particular, if we pruned a "Start" event for a given
// actor, we know that all messages destined to or coming from that actor
// will not occur in the subsequence execution.
private[this] def filterKnownAbsentInternals(events: Queue[Event],
subseq: Seq[ExternalEvent]) : Queue[Event] = {
var result = new Queue[Event]()
// { actor name -> is the actor currently alive? }
var actorToAlive = new HashMap[String, Boolean] {
// If we haven't seen a Start event, that means it's non-existant.
override def default(key:String) = false
}
actorToAlive("deadLetters") = true
actorToAlive("Timer") = true
// { (sender, receiver) -> can the actors send messages between eachother? }
var actorsToPartitioned = new HashMap[(String, String), Boolean]() {
// Somewhat confusing: if we haven't seen a Partition event, that means
// it's reachable (so long as it's alive).
// N.B. because we never partition the external world ("deadLetters")
// from any actors, we always return false if either snd or rcv is
// "deadLetters" or "Timer"
override def default(key:(String, String)) = false
}
// IDs of message sends that we have pruned. Use case: if a message send
// has been pruned, obviously its later message delivery won't ever occur.
var prunedMessageSends = new HashSet[Int]
def messageSendable(snd: String, rcv: String) : Boolean = {
if (!actorToAlive(snd)) {
return false
}
return !actorsToPartitioned((snd, rcv))
}
// TODO(cs): could probably be even more aggressive in pruning than we
// currently are. For example, if we decide that a MsgEvent isn't going to
// be delivered, then we could also prune its prior MsgSend, since it's a
// no-op.
def messageDeliverable(snd: String, rcv: String, id: Int) : Boolean = {
if (!actorToAlive(rcv)) {
return false
}
return !actorsToPartitioned((snd, rcv)) && !prunedMessageSends.contains(id)
}
for (event <- events) {
event match {
case UniqueMsgSend(m, id) =>
if (messageSendable(m.sender, m.receiver)) {
result += event
} else {
prunedMessageSends += id
}
case UniqueTimerDelivery(t, _) =>
if (messageDeliverable(t.sender, t.receiver, -1)) {
result += event
}
case UniqueMsgEvent(m, id) =>
if (messageDeliverable(m.sender, m.receiver, id)) {
result += event
}
case SpawnEvent(_, _, name, _) =>
actorToAlive(name) = true
result += event
case KillEvent(name) =>
actorToAlive(name) = false
result += event
case PartitionEvent((a,b)) =>
actorsToPartitioned((a,b)) = false
result += event
case UnPartitionEvent((a,b)) =>
actorsToPartitioned((a,b)) = true
result += event
case _ =>
result += event
}
}
return result
}
}
// Encapsulates an EventTrace (which is immutable, hence why we need
// this class), along with:
// (i) whether the EventTrace resulted in a violation
// (ii) a mapping from Event to a list of console output messages that were
// emitted as a result of executing that Event. For use with Synoptic.
class MetaEventTrace(val trace: EventTrace) {
var causedViolation: Boolean = false
// Invoked by schedulers to mark whether the violation was triggered.
def setCausedViolation { causedViolation = true }
// Invoked by schedulers to append log messages.
val eventToLogOutput = new HashMap[Event,Queue[String]]
def appendLogOutput(msg: String) {
if (!(eventToLogOutput contains trace.lastNonMetaEvent)) {
eventToLogOutput(trace.lastNonMetaEvent) = new Queue[String]
}
eventToLogOutput(trace.lastNonMetaEvent) += msg
}
// Return an ordered sequence of log output messages emitted by the
// application.
def getOrderedLogOutput: Queue[String] = {
val result = new Queue[String]
trace.events.foreach {
case e =>
if (eventToLogOutput contains e) {
result ++= eventToLogOutput(e)
}
}
return result
}
}
|
NetSys/demi
|
src/main/scala/verification/EventTrace.scala
|
Scala
|
bsd-2-clause
| 19,960
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package runtime
import scala.collection.{ Seq, IndexedSeq, TraversableView, AbstractIterator, GenIterable }
import scala.collection.mutable.WrappedArray
import scala.collection.immutable.{ StringLike, NumericRange, List, Stream, Nil, :: }
import scala.collection.generic.{ Sorted, IsTraversableLike }
import scala.reflect.{ ClassTag, classTag }
import scala.util.control.ControlThrowable
import java.lang.{ Class => jClass }
import java.lang.Double.doubleToLongBits
import java.lang.reflect.{ Modifier, Method => JMethod }
/** The object ScalaRunTime provides support methods required by
* the scala runtime. All these methods should be considered
* outside the API and subject to change or removal without notice.
*/
object ScalaRunTime {
def isArray(x: Any, atLevel: Int = 1): Boolean =
x != null && isArrayClass(x.getClass, atLevel)
private def isArrayClass(clazz: jClass[_], atLevel: Int): Boolean =
clazz != null && clazz.isArray && (atLevel == 1 || isArrayClass(clazz.getComponentType, atLevel - 1))
def isValueClass(clazz: jClass[_]) = clazz.isPrimitive()
// includes specialized subclasses and future proofed against hypothetical TupleN (for N > 22)
def isTuple(x: Any) = x != null && x.getClass.getName.startsWith("scala.Tuple")
def isAnyVal(x: Any) = x match {
case _: Byte | _: Short | _: Char | _: Int | _: Long | _: Float | _: Double | _: Boolean | _: Unit => true
case _ => false
}
// A helper method to make my life in the pattern matcher a lot easier.
def drop[Repr](coll: Repr, num: Int)(implicit traversable: IsTraversableLike[Repr]): Repr =
traversable conversion coll drop num
/** Return the class object representing an array with element class `clazz`.
*/
def arrayClass(clazz: jClass[_]): jClass[_] = {
// newInstance throws an exception if the erasure is Void.TYPE. see SI-5680
if (clazz == java.lang.Void.TYPE) classOf[Array[Unit]]
else java.lang.reflect.Array.newInstance(clazz, 0).getClass
}
/** Return the class object representing elements in arrays described by a given schematic.
*/
def arrayElementClass(schematic: Any): jClass[_] = schematic match {
case cls: jClass[_] => cls.getComponentType
case tag: ClassTag[_] => tag.runtimeClass
case _ =>
throw new UnsupportedOperationException(s"unsupported schematic $schematic (${schematic.getClass})")
}
/** Return the class object representing an unboxed value type,
* e.g., classOf[int], not classOf[java.lang.Integer]. The compiler
* rewrites expressions like 5.getClass to come here.
*/
def anyValClass[T <: AnyVal : ClassTag](value: T): jClass[T] =
classTag[T].runtimeClass.asInstanceOf[jClass[T]]
/** Retrieve generic array element */
def array_apply(xs: AnyRef, idx: Int): Any = {
xs match {
case x: Array[AnyRef] => x(idx).asInstanceOf[Any]
case x: Array[Int] => x(idx).asInstanceOf[Any]
case x: Array[Double] => x(idx).asInstanceOf[Any]
case x: Array[Long] => x(idx).asInstanceOf[Any]
case x: Array[Float] => x(idx).asInstanceOf[Any]
case x: Array[Char] => x(idx).asInstanceOf[Any]
case x: Array[Byte] => x(idx).asInstanceOf[Any]
case x: Array[Short] => x(idx).asInstanceOf[Any]
case x: Array[Boolean] => x(idx).asInstanceOf[Any]
case null => throw new NullPointerException
}
}
/** update generic array element */
def array_update(xs: AnyRef, idx: Int, value: Any): Unit = {
xs match {
case x: Array[AnyRef] => x(idx) = value.asInstanceOf[AnyRef]
case x: Array[Int] => x(idx) = value.asInstanceOf[Int]
case x: Array[Double] => x(idx) = value.asInstanceOf[Double]
case x: Array[Long] => x(idx) = value.asInstanceOf[Long]
case x: Array[Float] => x(idx) = value.asInstanceOf[Float]
case x: Array[Char] => x(idx) = value.asInstanceOf[Char]
case x: Array[Byte] => x(idx) = value.asInstanceOf[Byte]
case x: Array[Short] => x(idx) = value.asInstanceOf[Short]
case x: Array[Boolean] => x(idx) = value.asInstanceOf[Boolean]
case null => throw new NullPointerException
}
}
/** Get generic array length */
def array_length(xs: AnyRef): Int = xs match {
case x: Array[AnyRef] => x.length
case x: Array[Int] => x.length
case x: Array[Double] => x.length
case x: Array[Long] => x.length
case x: Array[Float] => x.length
case x: Array[Char] => x.length
case x: Array[Byte] => x.length
case x: Array[Short] => x.length
case x: Array[Boolean] => x.length
case null => throw new NullPointerException
}
def array_clone(xs: AnyRef): AnyRef = xs match {
case x: Array[AnyRef] => ArrayRuntime.cloneArray(x)
case x: Array[Int] => ArrayRuntime.cloneArray(x)
case x: Array[Double] => ArrayRuntime.cloneArray(x)
case x: Array[Long] => ArrayRuntime.cloneArray(x)
case x: Array[Float] => ArrayRuntime.cloneArray(x)
case x: Array[Char] => ArrayRuntime.cloneArray(x)
case x: Array[Byte] => ArrayRuntime.cloneArray(x)
case x: Array[Short] => ArrayRuntime.cloneArray(x)
case x: Array[Boolean] => ArrayRuntime.cloneArray(x)
case null => throw new NullPointerException
}
/** Convert an array to an object array.
* Needed to deal with vararg arguments of primitive types that are passed
* to a generic Java vararg parameter T ...
*/
def toObjectArray(src: AnyRef): Array[Object] = src match {
case x: Array[AnyRef] => x
case _ =>
val length = array_length(src)
val dest = new Array[Object](length)
for (i <- 0 until length)
array_update(dest, i, array_apply(src, i))
dest
}
def toArray[T](xs: scala.collection.Seq[T]) = {
val arr = new Array[AnyRef](xs.length)
var i = 0
for (x <- xs) {
arr(i) = x.asInstanceOf[AnyRef]
i += 1
}
arr
}
// Java bug: http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4071957
// More background at ticket #2318.
def ensureAccessible(m: JMethod): JMethod = scala.reflect.ensureAccessible(m)
def checkInitialized[T <: AnyRef](x: T): T =
if (x == null) throw new UninitializedError else x
def _toString(x: Product): String =
x.productIterator.mkString(x.productPrefix + "(", ",", ")")
def _hashCode(x: Product): Int = scala.util.hashing.MurmurHash3.productHash(x)
/** A helper for case classes. */
def typedProductIterator[T](x: Product): Iterator[T] = {
new AbstractIterator[T] {
private var c: Int = 0
private val cmax = x.productArity
def hasNext = c < cmax
def next() = {
val result = x.productElement(c)
c += 1
result.asInstanceOf[T]
}
}
}
/** Fast path equality method for inlining; used when -optimise is set.
*/
@inline def inlinedEquals(x: Object, y: Object): Boolean =
if (x eq y) true
else if (x eq null) false
else if (x.isInstanceOf[java.lang.Number]) BoxesRunTime.equalsNumObject(x.asInstanceOf[java.lang.Number], y)
else if (x.isInstanceOf[java.lang.Character]) BoxesRunTime.equalsCharObject(x.asInstanceOf[java.lang.Character], y)
else x.equals(y)
def _equals(x: Product, y: Any): Boolean = y match {
case y: Product if x.productArity == y.productArity => x.productIterator sameElements y.productIterator
case _ => false
}
// hashcode -----------------------------------------------------------
//
// Note that these are the implementations called by ##, so they
// must not call ## themselves.
def hash(x: Any): Int =
if (x == null) 0
else if (x.isInstanceOf[java.lang.Number]) BoxesRunTime.hashFromNumber(x.asInstanceOf[java.lang.Number])
else x.hashCode
def hash(dv: Double): Int = {
val iv = dv.toInt
if (iv == dv) return iv
val lv = dv.toLong
if (lv == dv) return lv.hashCode
val fv = dv.toFloat
if (fv == dv) fv.hashCode else dv.hashCode
}
def hash(fv: Float): Int = {
val iv = fv.toInt
if (iv == fv) return iv
val lv = fv.toLong
if (lv == fv) hash(lv)
else fv.hashCode
}
def hash(lv: Long): Int = {
val low = lv.toInt
val lowSign = low >>> 31
val high = (lv >>> 32).toInt
low ^ (high + lowSign)
}
def hash(x: Number): Int = runtime.BoxesRunTime.hashFromNumber(x)
// The remaining overloads are here for completeness, but the compiler
// inlines these definitions directly so they're not generally used.
def hash(x: Int): Int = x
def hash(x: Short): Int = x.toInt
def hash(x: Byte): Int = x.toInt
def hash(x: Char): Int = x.toInt
def hash(x: Boolean): Int = if (x) true.hashCode else false.hashCode
def hash(x: Unit): Int = 0
/** A helper method for constructing case class equality methods,
* because existential types get in the way of a clean outcome and
* it's performing a series of Any/Any equals comparisons anyway.
* See ticket #2867 for specifics.
*/
def sameElements(xs1: scala.collection.Seq[Any], xs2: scala.collection.Seq[Any]) = xs1 sameElements xs2
/** Given any Scala value, convert it to a String.
*
* The primary motivation for this method is to provide a means for
* correctly obtaining a String representation of a value, while
* avoiding the pitfalls of naively calling toString on said value.
* In particular, it addresses the fact that (a) toString cannot be
* called on null and (b) depending on the apparent type of an
* array, toString may or may not print it in a human-readable form.
*
* @param arg the value to stringify
* @return a string representation of arg.
*/
def stringOf(arg: Any): String = stringOf(arg, scala.Int.MaxValue)
def stringOf(arg: Any, maxElements: Int): String = {
def packageOf(x: AnyRef) = x.getClass.getPackage match {
case null => ""
case p => p.getName
}
def isScalaClass(x: AnyRef) = packageOf(x) startsWith "scala."
def isScalaCompilerClass(x: AnyRef) = packageOf(x) startsWith "scala.tools.nsc."
// We use reflection because the scala.xml package might not be available
def isSubClassOf(potentialSubClass: Class[_], ofClass: String) =
try {
val classLoader = potentialSubClass.getClassLoader
val clazz = Class.forName(ofClass, /*initialize =*/ false, classLoader)
clazz.isAssignableFrom(potentialSubClass)
} catch {
case cnfe: ClassNotFoundException => false
}
def isXmlNode(potentialSubClass: Class[_]) = isSubClassOf(potentialSubClass, "scala.xml.Node")
def isXmlMetaData(potentialSubClass: Class[_]) = isSubClassOf(potentialSubClass, "scala.xml.MetaData")
// When doing our own iteration is dangerous
def useOwnToString(x: Any) = x match {
// Range/NumericRange have a custom toString to avoid walking a gazillion elements
case _: Range | _: NumericRange[_] => true
// Sorted collections to the wrong thing (for us) on iteration - ticket #3493
case _: Sorted[_, _] => true
// StringBuilder(a, b, c) and similar not so attractive
case _: StringLike[_] => true
// Don't want to evaluate any elements in a view
case _: TraversableView[_, _] => true
// Node extends NodeSeq extends Seq[Node] and MetaData extends Iterable[MetaData]
// -> catch those by isXmlNode and isXmlMetaData.
// Don't want to a) traverse infinity or b) be overly helpful with peoples' custom
// collections which may have useful toString methods - ticket #3710
// or c) print AbstractFiles which are somehow also Iterable[AbstractFile]s.
case x: Traversable[_] => !x.hasDefiniteSize || !isScalaClass(x) || isScalaCompilerClass(x) || isXmlNode(x.getClass) || isXmlMetaData(x.getClass)
// Otherwise, nothing could possibly go wrong
case _ => false
}
// A variation on inner for maps so they print -> instead of bare tuples
def mapInner(arg: Any): String = arg match {
case (k, v) => inner(k) + " -> " + inner(v)
case _ => inner(arg)
}
// Special casing Unit arrays, the value class which uses a reference array type.
def arrayToString(x: AnyRef) = {
if (x.getClass.getComponentType == classOf[BoxedUnit])
0 until (array_length(x) min maxElements) map (_ => "()") mkString ("Array(", ", ", ")")
else
WrappedArray make x take maxElements map inner mkString ("Array(", ", ", ")")
}
// The recursively applied attempt to prettify Array printing.
// Note that iterator is used if possible and foreach is used as a
// last resort, because the parallel collections "foreach" in a
// random order even on sequences.
def inner(arg: Any): String = arg match {
case null => "null"
case "" => "\\"\\""
case x: String => if (x.head.isWhitespace || x.last.isWhitespace) "\\"" + x + "\\"" else x
case x if useOwnToString(x) => x.toString
case x: AnyRef if isArray(x) => arrayToString(x)
case x: scala.collection.Map[_, _] => x.iterator take maxElements map mapInner mkString (x.stringPrefix + "(", ", ", ")")
case x: GenIterable[_] => x.iterator take maxElements map inner mkString (x.stringPrefix + "(", ", ", ")")
case x: Traversable[_] => x take maxElements map inner mkString (x.stringPrefix + "(", ", ", ")")
case x: Product1[_] if isTuple(x) => "(" + inner(x._1) + ",)" // that special trailing comma
case x: Product if isTuple(x) => x.productIterator map inner mkString ("(", ",", ")")
case x => x.toString
}
// The try/catch is defense against iterables which aren't actually designed
// to be iterated, such as some scala.tools.nsc.io.AbstractFile derived classes.
try inner(arg)
catch {
case _: UnsupportedOperationException | _: AssertionError => "" + arg
}
}
/** stringOf formatted for use in a repl result. */
def replStringOf(arg: Any, maxElements: Int): String = {
val s = stringOf(arg, maxElements)
val nl = if (s contains "\\n") "\\n" else ""
nl + s + "\\n"
}
def box[T](clazz: jClass[T]): jClass[_] = clazz match {
case java.lang.Byte.TYPE => classOf[java.lang.Byte]
case java.lang.Short.TYPE => classOf[java.lang.Short]
case java.lang.Character.TYPE => classOf[java.lang.Character]
case java.lang.Integer.TYPE => classOf[java.lang.Integer]
case java.lang.Long.TYPE => classOf[java.lang.Long]
case java.lang.Float.TYPE => classOf[java.lang.Float]
case java.lang.Double.TYPE => classOf[java.lang.Double]
case java.lang.Void.TYPE => classOf[scala.runtime.BoxedUnit]
case java.lang.Boolean.TYPE => classOf[java.lang.Boolean]
case _ => clazz
}
}
|
scala-js/scala-js
|
scalalib/overrides-2.11/scala/runtime/ScalaRunTime.scala
|
Scala
|
apache-2.0
| 15,545
|
package forimpatient.chapter10
import java.io.{BufferedInputStream, FileInputStream, InputStream}
/**
* Created by Iryna Kharaborkina on 8/8/16.
*
* Solution to the Chapter 10 Exercise 08 'Scala for the Impatient' by Horstmann C.S.
*
* In the java.io library, you add buffering to an input stream with a BufferedInputStream decorator.
* Reimplement buffering as a trait. For simplicity, override the read method.
*/
object Exercise08 extends App {
println("Chapter 10 Exercise 08")
val fis = new FileInputStream("LICENSE") with Buffering
val ar = new Array[Byte](64)
fis.read(ar)
println(ar.map(_.toChar).mkString(""))
fis.read(ar)
println(ar.map(_.toChar).mkString(""))
trait Buffering {
this: InputStream =>
val bis = new BufferedInputStream(this)
override def read(a: Array[Byte]) = bis.read(a)
}
}
|
Kiryna/Scala-for-the-Impatient
|
src/forimpatient/chapter10/Exercise08.scala
|
Scala
|
apache-2.0
| 852
|
/*
* OpenURP, Agile University Resource Planning Solution
*
* Copyright (c) 2014-2015, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenURP. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.grade.course.domain
import org.openurp.edu.grade.course.model.CourseGrade
/**
* 成绩过滤器
* @author chaostone
*
*/
trait GradeFilter {
def filter(grades: Seq[CourseGrade]): Seq[CourseGrade]
}
|
openurp/edu-core
|
grade/core/src/main/scala/org/openurp/edu/grade/course/domain/GradeFilter.scala
|
Scala
|
gpl-3.0
| 997
|
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.planner
import com.twitter.summingbird._
class OnlinePlan[P <: Platform[P], V](tail: Producer[P, V]) {
private type Prod[T] = Producer[P, T]
private type VisitedStore = Set[Prod[_]]
private type CNode = Node[P]
private type CFlatMapNode = FlatMapNode[P]
private val depData = Dependants(tail)
private val forkedNodes = depData.nodes
.filter(depData.fanOut(_).exists(_ > 1)).toSet
private def distinctAddToList[T](l: List[T], n: T): List[T] = if (l.contains(n)) l else (n :: l)
// We don't merge flatMaps or joins with source.
// That is just a hueristic, and in some cases perhaps we should
private def mergableWithSource(dep: Producer[P, _]): Boolean =
dep match {
case NamedProducer(producer, _) => true
case IdentityKeyedProducer(producer) => true
case OptionMappedProducer(producer, _) => true
case Source(_) => true
case AlsoProducer(_, _) => true
// The rest are flatMaps, joins, merges or tails
case FlatMappedProducer(_, _) => false
case KeyFlatMappedProducer(_, _) => false
case LeftJoinedProducer(_, _) => false
case Summer(_, _, _) => false
case WrittenProducer(_, _) => false
case MergedProducer(_, _) => false
}
private def noOpProducer(dep: Producer[P, _]): Boolean =
dep match {
// These are merely planning hint nodes, and don't do any logic
case NamedProducer(_, _) => true
case IdentityKeyedProducer(_) => true
case MergedProducer(_, _) => true
case AlsoProducer(_, _) => true
// All the rest have some direct effect on the plan
case FlatMappedProducer(_, _) => false
case KeyFlatMappedProducer(_, _) => false
case LeftJoinedProducer(_, _) => false
case OptionMappedProducer(_, _) => false
case Source(_) => false
case Summer(_, _, _) => false
case WrittenProducer(_, _) => false
}
private def noOpNode(c: CNode): Boolean = c.members.forall(noOpProducer)
private def hasSummerAsDependantProducer(p: Prod[_]): Boolean =
depData.dependantsOf(p).get.collect { case s: Summer[_, _, _] => s }.headOption.isDefined
private def dependsOnSummerProducer(p: Prod[_]): Boolean =
Producer.dependenciesOf(p).collect { case s: Summer[_, _, _] => s }.headOption.isDefined
/*
* Note that this is transitive: we check on p, then we call this fn
* for all dependencies of p
*/
private def allTransDepsMergeableWithSource(p: Prod[_]): Boolean =
mergableWithSource(p) && Producer.dependenciesOf(p).forall(allTransDepsMergeableWithSource)
/**
* This is the main planning loop that goes bottom up planning into CNodes.
* The default empty node is a FlatMapNode. When a node is fully planned, we put it
* in the nodeSet. visited is a Set of all the Producers we have planned.
*/
private def addWithDependencies[T](dependantProducer: Prod[T],
previousBolt: CNode,
nodeSet: List[CNode],
visited: VisitedStore): (List[CNode], VisitedStore) =
if (visited.contains(dependantProducer)) {
(distinctAddToList(nodeSet, previousBolt), visited)
} else {
val currentBolt = previousBolt.add(dependantProducer)
val visitedWithN = visited + dependantProducer
/*
* This is a convenience method to call addWithDepenendencies with the common arguments.
* It is intended that this reduces the probability that we make the call with the wrong args,
*/
def recurse[U](
producer: Prod[U],
updatedBolt: CNode,
updatedRegistry: List[CNode] = nodeSet,
visited: VisitedStore = visitedWithN): (List[CNode], VisitedStore) = addWithDependencies(producer, updatedBolt, updatedRegistry, visited)
/*
* The purpose of this method is to see if we need to add a new physical node to the graph,
* or if we can continue by adding this producer to the current physical node.
*
* This function acts as a look ahead, rather than depending on the state of the current node it depends
* on the nodes further along in the dag. That is conditions for spliting into multiple Nodes are based on as yet
* unvisisted Producers.
*
* Note that currentProducer depends on dep: currentProducer -> dep
*/
def maybeSplitThenRecurse[U, A](currentProducer: Prod[U], dep: Prod[A], activeBolt: CNode = currentBolt): (List[CNode], VisitedStore) = {
/*
* First we enumerate the cases where we need to split. Then, the other cases are where we
* don't split
*/
val doSplit = activeBolt match {
/*
* If dep, the next node up the chain, has two dependants, we cannot pull it into this
* node
*/
case _ if (forkedNodes.contains(dep)) => true
/*
* This next rule says: we can pull no-ops down into summer nodes, otherwise
* we split to enable map-side aggregation. If the Semigroup is not commutative,
* it might make possibly sense to pull value flatMap-ing down, but generally
* we want to push things higher up in the Dag, not further down.
*/
case SummerNode(_) if !noOpProducer(dep) => true
/*
* Currently, SummerNodes cannot have any other logic than sum. So, we check to see
* if this node has something that is not no-op, and if the next node will be a summer, we split
* now
*/
case _ if (!noOpNode(activeBolt) && dependsOnSummerProducer(currentProducer)) => true
/*
* This should possibly be improved, but currently, we force a FlatMapNode just before a
* summer (to handle map-side aggregation). This check is here to prevent us from merging
* this current node all the way up to the source.
*/
case FlatMapNode(_) if hasSummerAsDependantProducer(currentProducer) && allTransDepsMergeableWithSource(dep) => true
/*
* if the current node can't be merged with a source, but the transitive deps can
* then split now.
*/
case _ if ((!mergableWithSource(currentProducer)) && allTransDepsMergeableWithSource(dep)) => true
case _ => false
}
// Note the currentProducer is *ALREADY* a part of activeBolt
if (doSplit) {
// Note that FlatMapNode is used as the default empty node
recurse(dep, updatedBolt = FlatMapNode(), updatedRegistry = distinctAddToList(nodeSet, activeBolt))
} else {
recurse(dep, updatedBolt = activeBolt)
}
}
/*
* This is a peek ahead when we meet a MergedProducer. We pull the directly depended on MergedProducer's into the same Node,
* only if that MergedProducer is not a fan out node.
* This has the effect of pulling all of the merged streams in as siblings rather than just the two.
* From this we return a list of the MergedProducers which should be combined into the current Node, and the list of nodes
* on which these nodes depends (the producers passing data into these MergedProducer).
*/
def mergeCollapse[A](p: Prod[A], rootMerge: Boolean = false): (List[Prod[A]], List[Prod[A]]) =
p match {
case MergedProducer(subL, subR) if (!forkedNodes.contains(p) || rootMerge) =>
// TODO support de-duping self merges https://github.com/twitter/summingbird/issues/237
if (subL == subR) sys.error("Online Planner doesn't support both the left and right sides of a join being the same node.")
val (lMergeNodes, lSiblings) = mergeCollapse(subL)
val (rMergeNodes, rSiblings) = mergeCollapse(subR)
(distinctAddToList((lMergeNodes ::: rMergeNodes).distinct, p), (lSiblings ::: rSiblings).distinct)
case _ => (List(), List(p))
}
dependantProducer match {
// Names should have be removed before the planning phase
case NamedProducer(producer, _) => sys.error("Should not try plan a named producer")
// The following are mapping-like operations and all just call maybeSplitThenRecurse
case IdentityKeyedProducer(producer) => maybeSplitThenRecurse(dependantProducer, producer)
case OptionMappedProducer(producer, _) => maybeSplitThenRecurse(dependantProducer, producer)
case FlatMappedProducer(producer, _) => maybeSplitThenRecurse(dependantProducer, producer)
case KeyFlatMappedProducer(producer, _) => maybeSplitThenRecurse(dependantProducer, producer)
case WrittenProducer(producer, _) => maybeSplitThenRecurse(dependantProducer, producer)
case LeftJoinedProducer(producer, _) => maybeSplitThenRecurse(dependantProducer, producer)
// The following are special cases
case Summer(producer, _, _) => maybeSplitThenRecurse(dependantProducer, producer, currentBolt.toSummer)
case AlsoProducer(lProducer, rProducer) =>
val (updatedReg, updatedVisited) = maybeSplitThenRecurse(dependantProducer, rProducer)
recurse(lProducer, FlatMapNode(), updatedReg, updatedVisited)
case Source(spout) => (distinctAddToList(nodeSet, currentBolt.toSource), visitedWithN)
case MergedProducer(l, r) =>
// TODO support de-duping self merges https://github.com/twitter/summingbird/issues/237
if (l == r) throw new Exception("Online Planner doesn't support both the left and right sides of a join being the same node.")
val (otherMergeNodes, dependencies) = mergeCollapse(dependantProducer, rootMerge = true)
val newCurrentBolt = otherMergeNodes.foldLeft(currentBolt)(_.add(_))
val visitedWithOther = otherMergeNodes.foldLeft(visitedWithN) { (visited, n) => visited + n }
// Recurse down all the newly generated dependencies
dependencies.foldLeft((distinctAddToList(nodeSet, newCurrentBolt), visitedWithOther)) {
case ((newNodeSet, newVisited), n) =>
recurse(n, FlatMapNode(), newNodeSet, newVisited)
}
}
}
val (nodeSet, _) = addWithDependencies(tail, FlatMapNode(), List[CNode](), Set())
require(nodeSet.collect { case n @ SourceNode(_) => n }.size > 0, "Valid nodeSet should have at least one source node")
}
object OnlinePlan {
def apply[P <: Platform[P], T](tail: TailProducer[P, T]): Dag[P] = {
val (nameMap, strippedTail) = StripNamedNode(tail)
val planner = new OnlinePlan(strippedTail)
val nodesSet = planner.nodeSet
// The nodes are added in a source -> summer way with how we do list prepends
// but its easier to look at laws in a summer -> source manner
// We also drop all Nodes with no members(may occur when we visit a node already seen and its the first in that Node)
val reversedNodeSet = nodesSet.filter(_.members.size > 0).foldLeft(List[Node[P]]()) { (nodes, n) => n.reverse :: nodes }
Dag(tail, nameMap, strippedTail, reversedNodeSet)
}
}
|
zirpins/summingbird
|
summingbird-online/src/main/scala/com/twitter/summingbird/planner/OnlinePlan.scala
|
Scala
|
apache-2.0
| 11,604
|
package sjs.react.bootstrap
import japgolly.scalajs.react._
import scala.scalajs.js
import japgolly.scalajs.react.vdom.prefix_<^._
object ButtonToolbar /* mixins: BootstrapMixin*/ {
val component = ReactComponentB[Props]("ButtonToolbar")
.render(
(P, C) => {
<.div(^.role := "toolbar", ^.classSet1M(P.className, P.getBsClassSet), C)
}
)
.build
case class Props(className: String = "", bsClass: String = "button-toolbar", bsSize: String = "", bsStyle: String = "") extends BoostrapMixinProps
def apply(className: String = "", bsClass: String = "button-toolbar", bsSize: String = "", bsStyle: String = "",
ref: js.UndefOr[String] = "", key: js.Any = {})(children: ReactNode*) = {
component.set(key, ref)(Props(className = className, bsClass = bsClass, bsSize = bsSize, bsStyle = bsStyle), children)
}
}
|
aparo/scalajs-react-extra
|
react-bootstrap/src/main/scala/sjs/react/bootstrap/ButtonToolbar.scala
|
Scala
|
apache-2.0
| 866
|
package se.ramn.bottfarmen.util
object Timer extends Logging {
def time[T](message: String, thunk: => T): T = {
val start = System.nanoTime
val result = thunk
val durationMs = (System.nanoTime - start) / 1E6
logger.debug(s"$message, duration: $durationMs ms")
result
}
}
|
ramn/bottfarmen
|
common/src/main/scala/util/Timer.scala
|
Scala
|
gpl-3.0
| 297
|
package dotty.tools.dotc
package transform
import MegaPhase._
import core.DenotTransformers._
import core.Symbols._
import core.Contexts._
import core.Flags._
import core.Decorators._
import core.StdNames.nme
import core.Names._
import core.NameKinds.TempResultName
import core.Constants._
import util.Store
import dotty.tools.uncheckedNN
/** This phase translates variables that are captured in closures to
* heap-allocated refs.
*/
class CapturedVars extends MiniPhase with IdentityDenotTransformer:
thisPhase =>
import ast.tpd._
override def phaseName: String = CapturedVars.name
override def description: String = CapturedVars.description
override def runsAfterGroupsOf: Set[String] = Set(LiftTry.name)
// lifting tries changes what variables are considered to be captured
private[this] var Captured: Store.Location[util.ReadOnlySet[Symbol]] = _
private def captured(using Context) = ctx.store(Captured)
override def initContext(ctx: FreshContext): Unit =
Captured = ctx.addLocation(util.ReadOnlySet.empty)
private class RefInfo(using Context) {
/** The classes for which a Ref type exists. */
val refClassKeys: collection.Set[Symbol] =
defn.ScalaNumericValueClasses() `union` Set(defn.BooleanClass, defn.ObjectClass)
val refClass: Map[Symbol, Symbol] =
refClassKeys.map(rc => rc -> requiredClass(s"scala.runtime.${rc.name}Ref")).toMap
val volatileRefClass: Map[Symbol, Symbol] =
refClassKeys.map(rc => rc -> requiredClass(s"scala.runtime.Volatile${rc.name}Ref")).toMap
val boxedRefClasses: collection.Set[Symbol] =
refClassKeys.flatMap(k => Set(refClass(k), volatileRefClass(k)))
val objectRefClasses: collection.Set[Symbol] =
Set(refClass(defn.ObjectClass), volatileRefClass(defn.ObjectClass))
}
private var myRefInfo: RefInfo | Null = null
private def refInfo(using Context): RefInfo = {
if (myRefInfo == null) myRefInfo = new RefInfo()
myRefInfo.uncheckedNN
}
private class CollectCaptured extends TreeTraverser {
private val captured = util.HashSet[Symbol]()
def traverse(tree: Tree)(using Context) = tree match {
case id: Ident =>
val sym = id.symbol
if (sym.is(Mutable, butNot = Method) && sym.owner.isTerm) {
val enclMeth = ctx.owner.enclosingMethod
if (sym.enclosingMethod != enclMeth) {
report.log(i"capturing $sym in ${sym.enclosingMethod}, referenced from $enclMeth")
captured += sym
}
}
case _ =>
traverseChildren(tree)
}
def runOver(tree: Tree)(using Context): util.ReadOnlySet[Symbol] = {
traverse(tree)
captured
}
}
override def prepareForUnit(tree: Tree)(using Context): Context = {
val captured = atPhase(thisPhase) {
CollectCaptured().runOver(ctx.compilationUnit.tpdTree)
}
ctx.fresh.updateStore(Captured, captured)
}
/** The {Volatile|}{Int|Double|...|Object}Ref class corresponding to the class `cls`,
* depending on whether the reference should be @volatile
*/
def refClass(cls: Symbol, isVolatile: Boolean)(using Context): Symbol = {
val refMap = if (isVolatile) refInfo.volatileRefClass else refInfo.refClass
if (cls.isClass)
refMap.getOrElse(cls, refMap(defn.ObjectClass))
else refMap(defn.ObjectClass)
}
override def prepareForValDef(vdef: ValDef)(using Context): Context = {
val sym = atPhase(thisPhase)(vdef.symbol)
if (captured contains sym) {
val newd = atPhase(thisPhase)(sym.denot).copySymDenotation(
info = refClass(sym.info.classSymbol, sym.hasAnnotation(defn.VolatileAnnot)).typeRef,
initFlags = sym.flags &~ Mutable)
newd.removeAnnotation(defn.VolatileAnnot)
newd.installAfter(thisPhase)
}
ctx
}
override def transformValDef(vdef: ValDef)(using Context): Tree = {
val vble = vdef.symbol
if (captured.contains(vble)) {
def boxMethod(name: TermName): Tree =
ref(vble.info.classSymbol.companionModule.info.member(name).symbol)
cpy.ValDef(vdef)(
rhs = boxMethod(nme.create).appliedTo(vdef.rhs),
tpt = TypeTree(vble.info).withSpan(vdef.tpt.span))
}
else vdef
}
override def transformIdent(id: Ident)(using Context): Tree = {
val vble = id.symbol
if (captured.contains(vble))
id.select(nme.elem).ensureConforms(atPhase(thisPhase)(vble.denot).info)
else id
}
/** If assignment is to a boxed ref type, e.g.
*
* intRef.elem = expr
*
* rewrite using a temporary var to
*
* val ev$n = expr
* intRef.elem = ev$n
*
* That way, we avoid the problem that `expr` might contain a `try` that would
* run on a non-empty stack (which is illegal under JVM rules). Note that LiftTry
* has already run before, so such `try`s would not be eliminated.
*
* If the ref type lhs is followed by a cast (can be an artifact of nested translation),
* drop the cast.
*
* If the ref type is `ObjectRef` or `VolatileObjectRef`, immediately assign `null`
* to the temporary to make the underlying target of the reference available for
* garbage collection. Nullification is omitted if the `expr` is already `null`.
*
* var ev$n: RHS = expr
* objRef.elem = ev$n
* ev$n = null.asInstanceOf[RHS]
*/
override def transformAssign(tree: Assign)(using Context): Tree =
def absolved: Boolean = tree.rhs match
case Literal(Constant(null)) | Typed(Literal(Constant(null)), _) => true
case _ => false
def recur(lhs: Tree): Tree = lhs match
case TypeApply(Select(qual@Select(_, nme.elem), nme.asInstanceOf_), _) =>
recur(qual)
case Select(_, nme.elem) if refInfo.boxedRefClasses.contains(lhs.symbol.maybeOwner) =>
val tempDef = transformFollowing(SyntheticValDef(TempResultName.fresh(), tree.rhs, flags = Mutable))
val update = cpy.Assign(tree)(lhs, ref(tempDef.symbol))
def reset = cpy.Assign(tree)(ref(tempDef.symbol), nullLiteral.cast(tempDef.symbol.info))
val res = if refInfo.objectRefClasses(lhs.symbol.maybeOwner) && !absolved then reset else unitLiteral
transformFollowing(Block(tempDef :: update :: Nil, res))
case _ =>
tree
recur(tree.lhs)
object CapturedVars:
val name: String = "capturedVars"
val description: String = "represent vars captured by closures as heap objects"
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/transform/CapturedVars.scala
|
Scala
|
apache-2.0
| 6,439
|
//package io.soheila.um.vos.accounts
//
//import java.util.Locale
//
//import io.soheila.um.entities.User
//import io.soheila.um.types.UserRole
//
//case class UserUpdateVO(
// firstName: String,
// middleName: String = "",
// lastName: String,
// email: String,
// avatarURL: Option[String],
// roles: Seq[UserRole.Value],
// activated: Boolean,
// preferredLocale: String = Locale.getDefault.toLanguageTag
//) {
// def toUser(user: User): User = {
// user.copy(
// firstName = Some(firstName),
// middleName = Some(middleName),
// lastName = Some(lastName),
// fullName = Some(firstName + " " + middleName + " " + lastName),
// email = Some(email),
// avatarURL = avatarURL,
// roles = if (roles.nonEmpty) roles.toSet else Set(UserRole.SimpleUser),
// activated = activated
// )
// }
//}
//
|
esfand-r/soheila-um
|
src/main/scala/io/soheila/um/vos/accounts/UserUpdateVO.scala
|
Scala
|
apache-2.0
| 865
|
/**
* Copyright (c) 2013, The National Archives <digitalpreservation@nationalarchives.gov.uk>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_1
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import uk.gov.nationalarchives.csv.validator.metadata.{Cell, Row}
import uk.gov.nationalarchives.csv.validator.schema.{TotalColumns, SchemaSpecBase, Literal, Schema}
@RunWith(classOf[JUnitRunner])
class SchemaSpec extends SchemaSpecBase {
"NoExt Arg provider" should {
"remove extension from string" in {
val result = NoExt(Literal(Some("somestringwithextension.txt"))).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result must_=== Some("somestringwithextension")
}
"leave a string without extension unchanged" in {
val result = NoExt(Literal(Some("somestringwithoutextension"))).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result must_=== Some("somestringwithoutextension")
}
}
"Concat Arg provider" should {
" append literal together" in {
val result1 = Concat(Literal(Some("aaaaa")), Literal(None)).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result1 must_=== Some("aaaaa")
val result2 = Concat(Literal(None), Literal(Some("aaaaa"))).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result2 must_=== Some("aaaaa")
val result3 = Concat(Literal(Some("aaaaa")), Literal(Some("bbbbb"))).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result3 must_=== Some("aaaaabbbbb")
val result4 = Concat(Literal(None), Literal(None)).referenceValue(1, Row(List(Cell("Germany")), 1), buildSchema1_1(TotalColumns(0))())
result4 must_=== None
}
}
}
|
valydia/csv-validator
|
csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/schema/v1_1/SchemaSpec.scala
|
Scala
|
mpl-2.0
| 2,156
|
package org.psesd.srx.shared.core.sif
import org.json4s._
import org.psesd.srx.shared.core.exceptions.ArgumentNullException
import org.psesd.srx.shared.core.extensions.TypeExtensions._
import org.psesd.srx.shared.core.sif.SifContentType.SifContentType
import org.psesd.srx.shared.core.sif.SifMessageType.SifMessageType
import scala.collection.concurrent.TrieMap
import scala.xml.Node
/** Represents a SIF response.
*
* @version 1.0
* @since 1.0
* @author Stephen Pugmire (iTrellis, LLC)
* */
class SifResponse(timestamp: SifTimestamp,
val messageId: SifMessageId,
val messageType: SifMessageType,
val sifRequest: SifRequest) extends SifMessage(timestamp) {
if (timestamp == null) {
throw new ArgumentNullException("timestamp parameter")
}
if (messageId == null) {
throw new ArgumentNullException("messageId parameter")
}
if (messageType == null) {
throw new ArgumentNullException("messageType parameter")
}
val responseAction = {
if (sifRequest == null) {
None
} else {
sifRequest.requestAction.orElse(None)
}
}
requestId = {
if (sifRequest == null) {
None
} else {
sifRequest.requestId.orElse(None)
}
}
serviceType = {
if (sifRequest == null) {
None
} else {
sifRequest.serviceType.orElse(None)
}
}
var bodyJson: Option[JValue] = None
var bodyXml: Option[Node] = None
var error: Option[SifError] = None
var statusCode: Int = 0
def getBody(contentType: SifContentType): String = {
contentType match {
case SifContentType.Json =>
if (error.isDefined) {
error.get.toXml.toJsonString
} else {
if(bodyJson.isDefined) {
bodyJson.get.toJsonString
} else {
if (bodyXml.isDefined) {
bodyXml.get.toJsonString
} else {
if (body.isDefined) {
if (body.get.isJson) {
body.get
} else {
if (body.get.isXml) {
body.get.toXml.toJsonString
} else {
""
}
}
} else {
""
}
}
}
}
case SifContentType.Xml =>
if (error.isDefined) {
error.get.toXml.toXmlString
} else {
if (bodyXml.isDefined) {
bodyXml.get.toXmlString
} else {
if (bodyJson.isDefined) {
bodyJson.get.toXml.toXmlString
} else {
if (body.isDefined) {
if (body.get.isXml) {
body.get
} else {
if (body.get.isJson) {
body.get.toJson.toXml.toString
} else {
""
}
}
} else {
""
}
}
}
}
}
}
def getBodyJson: Option[JValue] = {
val jsonString = getBody(SifContentType.Json)
if(jsonString.isNullOrEmpty) {
None
} else {
Some(jsonString.toJson)
}
}
def getBodyXml: Option[Node] = {
val xmlString = getBody(SifContentType.Xml)
if(xmlString.isNullOrEmpty) {
None
} else {
Some(xmlString.toXml)
}
}
def getHeaders: TrieMap[String, String] = {
addHeader(SifHttpHeader.ContentType.toString, contentType.getOrElse("").toString)
addHeader(SifHeader.MessageId.toString, messageId.toString)
addHeader(SifHeader.MessageType.toString, messageType.toString)
addHeader(SifHeader.ResponseAction.toString, responseAction.getOrElse("").toString)
addHeader(SifHeader.RequestId.toString, requestId.getOrElse(""))
addHeader(SifHeader.ServiceType.toString, serviceType.getOrElse("").toString)
addHeader(SifHeader.Timestamp.toString, timestamp.toString)
headers
}
}
|
PSESD/srx-shared-core
|
src/main/scala/org/psesd/srx/shared/core/sif/SifResponse.scala
|
Scala
|
mit
| 3,980
|
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.marklogic.qscript
import slamdata.Predef._
import quasar.fp.liftMT
import quasar.fp.ski.κ
import quasar.physical.marklogic.DocType
import quasar.physical.marklogic.xml.namespaces._
import quasar.physical.marklogic.xquery._
import quasar.physical.marklogic.xquery.expr._
import quasar.physical.marklogic.xquery.syntax._
import eu.timepit.refined.auto._
import scalaz._, Scalaz._
/** Planner for QScript structural operations.
*
* @tparam F the effects employed by the library.
* @tparam FMT type index representing the data format supported by the library.
*/
trait StructuralPlanner[F[_], FMT] { self =>
import FunctionDecl._
import StructuralPlanner.ejs
/** The representation of EJson `Null`. */
def null_ : F[XQuery]
/** Returns the result of appending the value to the array. */
def arrayAppend(array: XQuery, value: XQuery): F[XQuery]
/** Returns the concatenation of the given arrays. */
def arrayConcat(a1: XQuery, a2: XQuery): F[XQuery]
/** Returns the value at the (zero-based) index in the array or the empty seq if none. */
def arrayElementAt(array: XQuery, index: XQuery): F[XQuery]
/** Returns a representation of the item for use as a sort key. */
def asSortKey(item: XQuery): F[XQuery]
/** Returns whether the given `item()` represents an EJson array. */
def isArray(item: XQuery): F[XQuery]
/** Returns the inner array elements or object entries of the given `node()`
* as a sequence or the empty seq if it is neither an array or object.
*/
def leftShift(node: XQuery): F[XQuery]
/** Returns a representation of an EJson array given a sequence of array
* elements obtained via `mkArrayElt`.
*/
def mkArray(elements: XQuery): F[XQuery]
/** Returns a representation of an array element given an, possibly empty,
* `item()`.
*/
def mkArrayElt(item: XQuery): F[XQuery]
/** Returns a representation of an EJson object given a sequence of object
* entries obtained via `mkObjectEntry`.
*/
def mkObject(entries: XQuery): F[XQuery]
/** Returns a representation of an EJson object entry given a key name and,
* possibly empty, value `item()`.
*/
def mkObjectEntry(key: XQuery, value: XQuery): F[XQuery]
/** Returns the given `node()` casted to a more precise XQuery type, if
* possible, or the node itself otherwise.
*/
def nodeCast(node: XQuery): F[XQuery]
/** Returns any metadata associated with the given `node()` or the empty seq
* if none.
*/
def nodeMetadata(node: XQuery): F[XQuery]
/** Returns a string representation of the given `node()`. */
def nodeToString(node: XQuery): F[XQuery]
/** Returns the name of the given `node()`'s type as an `xs:string` or the
* empty seq if unknown.
*/
def nodeType(node: XQuery): F[XQuery]
/** Returns an updated version of the given EJson object where the given key
* is not associated with a value.
*/
def objectDelete(obj: XQuery, key: XQuery): F[XQuery]
/** Returns an updated version of the given EJson object where the given value
* is associated with the given key.
*/
def objectInsert(obj: XQuery, key: XQuery, value: XQuery): F[XQuery]
/** Returns the value associated with the given key in the given EJson object
* or the empty seq if none.
*/
def objectLookup(obj: XQuery, key: XQuery): F[XQuery]
/** Returns the right-biased merge of the given EJson objects. */
def objectMerge(o1: XQuery, o2: XQuery): F[XQuery]
//// Derived expressions. ////
/** Attempts to cast the given `item()` to a more specific type if it is a `node()`. */
def castIfNode(item: XQuery)(implicit F0: Bind[F], F1: PrologW[F]): F[XQuery] =
castIfNodeFn.apply(item)
/** Returns an array consisting of the given value. */
def singletonArray(value: XQuery)(implicit F: Monad[F]): F[XQuery] =
mkArrayElt(value) >>= (mkArray(_))
/** Returns an object with the given value associated with the given key. */
def singletonObject(key: XQuery, value: XQuery)(implicit F: Monad[F]): F[XQuery] =
mkObjectEntry(key, value) >>= (mkObject(_))
/** Returns the string representation of the given item. */
def asString(item: XQuery)(implicit F0: Bind[F], F1: PrologW[F]): F[XQuery] =
toStringFn.apply(item)
/** Returns the name of the type of the given item or the empty seq if unknown. */
def typeOf(item: XQuery)(implicit F0: Bind[F], F1: PrologW[F]): F[XQuery] =
typeOfFn.apply(item)
/** Converts a sequence of items into an array. */
def seqToArray(seq: XQuery)(implicit F0: Bind[F], F1: PrologW[F]): F[XQuery] =
seqToArrayFn.apply(seq)
/** Transform the effect type used by the planner. */
def transform[G[_]](f: F ~> G): StructuralPlanner[G, FMT] =
new StructuralPlanner[G, FMT] {
def null_ : G[XQuery] = f(self.null_)
def arrayAppend(array: XQuery, value: XQuery): G[XQuery] = f(self.arrayAppend(array, value))
def arrayConcat(a1: XQuery, a2: XQuery): G[XQuery] = f(self.arrayConcat(a1, a2))
def arrayElementAt(array: XQuery, index: XQuery): G[XQuery] = f(self.arrayElementAt(array, index))
def asSortKey(item: XQuery): G[XQuery] = f(self.asSortKey(item))
def isArray(item: XQuery): G[XQuery] = f(self.isArray(item))
def leftShift(node: XQuery): G[XQuery] = f(self.leftShift(node))
def mkArray(elements: XQuery): G[XQuery] = f(self.mkArray(elements))
def mkArrayElt(item: XQuery): G[XQuery] = f(self.mkArrayElt(item))
def mkObject(entries: XQuery): G[XQuery] = f(self.mkObject(entries))
def mkObjectEntry(key: XQuery, value: XQuery): G[XQuery] = f(self.mkObjectEntry(key, value))
def nodeCast(node: XQuery): G[XQuery] = f(self.nodeCast(node))
def nodeMetadata(node: XQuery): G[XQuery] = f(self.nodeMetadata(node))
def nodeToString(node: XQuery): G[XQuery] = f(self.nodeToString(node))
def nodeType(node: XQuery): G[XQuery] = f(self.nodeType(node))
def objectDelete(obj: XQuery, key: XQuery): G[XQuery] = f(self.objectDelete(obj, key))
def objectInsert(obj: XQuery, key: XQuery, value: XQuery): G[XQuery] = f(self.objectInsert(obj, key, value))
def objectLookup(obj: XQuery, key: XQuery): G[XQuery] = f(self.objectLookup(obj, key))
def objectMerge(o1: XQuery, o2: XQuery): G[XQuery] = f(self.objectMerge(o1, o2))
}
////
// ejson:cast-if-node($item as item()?) as item()?
private def castIfNodeFn(implicit F0: Bind[F], F1: PrologW[F]): F[FunctionDecl1] =
ejs.declare[F]("cast-if-node") flatMap (_(
$("item") as ST("item()?")
).as(ST("item()?")) { item: XQuery =>
val n = $("n")
nodeCast(~n) map { casted =>
typeswitch(item)(
n as ST("node()") return_ κ(casted)
) default item
}
})
@SuppressWarnings(Array("org.wartremover.warts.Equals"))
private def toStringFn(implicit F0: Bind[F], F1: PrologW[F]): F[FunctionDecl1] =
ejs.declare[F]("to-string") flatMap (_(
$("item") as ST("item()?")
).as(ST("xs:string?")) { item: XQuery =>
val (n, t) = ($("n"), $("t"))
(nodeToString(~n) |@| typeOf(item) |@| castIfNode(item))((nstr, tpe, castItem) =>
let_(t := tpe) return_ {
if_(fn.empty(item) or ~t eq "na".xs)
.then_(emptySeq)
.else_(if_(~t eq "null".xs)
.then_("null".xs)
.else_(if_(tpe eq "date".xs)
.then_(fn.formatDate(castItem, lib.dateFmt.xs))
.else_(if_(tpe eq "time".xs)
.then_(fn.formatTime(castItem, lib.timeFmt.xs))
.else_(if_(tpe eq "timestamp".xs)
.then_(fn.formatDateTime(castItem, lib.dateTimeFmt.xs))
.else_(typeswitch(item)(
n as ST("node()") return_ κ(nstr)
) default fn.string(item))))))
})
})
// ejson:type-of($item as item()*) as xs:string?
private def typeOfFn(implicit F0: Bind[F], F1: PrologW[F]): F[FunctionDecl1] =
ejs.declare[F]("type-of") flatMap (_(
$("item") as ST.Top
).as(ST("xs:string?")) { item: XQuery =>
val node = $("node")
nodeType(~node) map { nType =>
if_(fn.empty(item))
.then_ { "na".xs }
.else_ {
typeswitch(item)(
node as ST("node()") return_ κ(nType),
ST("xs:boolean") return_ "boolean".xs,
ST("xs:dateTime") return_ "timestamp".xs,
ST("xs:date") return_ "date".xs,
ST("xs:time") return_ "time".xs,
ST("xs:duration") return_ "interval".xs,
ST("xs:integer") return_ "integer".xs,
ST("xs:decimal") return_ "decimal".xs,
ST("xs:double") return_ "decimal".xs,
ST("xs:float") return_ "decimal".xs,
ST("xs:base64Binary") return_ "binary".xs,
ST("xs:hexBinary") return_ "binary".xs,
ST("xs:QName") return_ "string".xs,
ST("xs:string") return_ "string".xs,
ST("xs:untypedAtomic") return_ "string".xs
) default emptySeq
}
}
})
// ejson:seq-to-array($items as item()*) as node()
private def seqToArrayFn(implicit F0: Bind[F], F1: PrologW[F]): F[FunctionDecl1] =
ejs.declare[F]("seq-to-array") flatMap (_(
$("items") as ST("item()*")
).as(ST("node()")) { items: XQuery =>
val x = $("x")
for {
arrElt <- mkArrayElt(~x)
arr <- mkArray(fn.map(func(x.render) { arrElt }, items))
} yield arr
})
}
object StructuralPlanner extends StructuralPlannerInstances {
val ejs = NamespaceDecl(ejsonNs)
def apply[F[_], T](implicit L: StructuralPlanner[F, T]): StructuralPlanner[F, T] = L
def forTrans[F[_]: Monad, FMT, T[_[_], _]: MonadTrans](
implicit SP: StructuralPlanner[F, FMT]
): StructuralPlanner[T[F, ?], FMT] =
SP.transform(liftMT[F, T])
}
sealed abstract class StructuralPlannerInstances extends StructuralPlannerInstances0 {
implicit def jsonStructuralPlanner[F[_]: Monad: PrologW: QNameGenerator]: StructuralPlanner[F, DocType.Json] =
new JsonStructuralPlanner[F]
implicit def xmlStructuralPlanner[F[_]: Monad: MonadPlanErr: PrologW: QNameGenerator]: StructuralPlanner[F, DocType.Xml] =
new XmlStructuralPlanner[F]
}
sealed abstract class StructuralPlannerInstances0 {
implicit def eitherTStructuralPlanner[F[_]: Monad, FMT, E](
implicit SP: StructuralPlanner[F, FMT]
): StructuralPlanner[EitherT[F, E, ?], FMT] =
StructuralPlanner.forTrans[F, FMT, EitherT[?[_], E, ?]]
implicit def writerTStructuralPlanner[F[_]: Monad, FMT, W: Monoid](
implicit SP: StructuralPlanner[F, FMT]
): StructuralPlanner[WriterT[F, W, ?], FMT] =
StructuralPlanner.forTrans[F, FMT, WriterT[?[_], W, ?]]
}
|
jedesah/Quasar
|
marklogic/src/main/scala/quasar/physical/marklogic/qscript/StructuralPlanner.scala
|
Scala
|
apache-2.0
| 11,372
|
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.wrappers
/** Wrapper message for `uint32`.
*
* The JSON representation for `UInt32Value` is JSON number.
*
* @param value
* The uint32 value.
*/
@SerialVersionUID(0L)
final case class UInt32Value(
value: _root_.scala.Int = 0,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[UInt32Value] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
{
val __value = value
if (__value != 0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeUInt32Size(1, __value)
}
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = value
if (__v != 0) {
_output__.writeUInt32(1, __v)
}
};
unknownFields.writeTo(_output__)
}
def withValue(__v: _root_.scala.Int): UInt32Value = copy(value = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = value
if (__t != 0) __t else null
}
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PInt(value)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.wrappers.UInt32Value
}
object UInt32Value extends scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.UInt32Value] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.wrappers.UInt32Value] = this
def merge(`_message__`: com.google.protobuf.wrappers.UInt32Value, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.wrappers.UInt32Value = {
var __value = `_message__`.value
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__value = _input__.readUInt32()
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.wrappers.UInt32Value(
value = __value,
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.wrappers.UInt32Value] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.wrappers.UInt32Value(
value = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Int]).getOrElse(0)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = WrappersProto.javaDescriptor.getMessageTypes().get(5)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = WrappersProto.scalaDescriptor.messages(5)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.wrappers.UInt32Value(
value = 0
)
implicit class UInt32ValueLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.wrappers.UInt32Value]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.wrappers.UInt32Value](_l) {
def value: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.value)((c_, f_) => c_.copy(value = f_))
}
final val VALUE_FIELD_NUMBER = 1
def of(
value: _root_.scala.Int
): _root_.com.google.protobuf.wrappers.UInt32Value = _root_.com.google.protobuf.wrappers.UInt32Value(
value
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.UInt32Value])
}
|
trueaccord/ScalaPB
|
scalapb-runtime/src/main/scalajs/com/google/protobuf/wrappers/UInt32Value.scala
|
Scala
|
apache-2.0
| 5,561
|
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.framework
import java.io.ByteArrayOutputStream
import com.holdenkarau.spark.testing.SharedSparkContext
import org.scalatest.{FlatSpec, Matchers}
class JobRunnerTest extends FlatSpec with Matchers with SharedSparkContext {
"Jobs" should "be executed" in {
val output = new ByteArrayOutputStream()
Console.withOut(output) {
JobRunner.main(Array("de.hpi.ingestion.framework.mock.MockPrintSparkJob"))
}
val expectedOutput = "assertConditions\\nexecQ\\nload\\nrun\\nexecQ\\nsave\\n"
output.toString shouldEqual expectedOutput
}
they should "be passed command line arguments" in {
val output = new ByteArrayOutputStream()
Console.withOut(output) {
JobRunner.main(Array("de.hpi.ingestion.framework.mock.MockPrintSparkJob", "-c", "test.xml"))
}
val expectedOutput = "assertConditions\\nexecQ\\nload\\nrun\\ntest.xml\\nexecQ\\nsave\\n"
output.toString shouldEqual expectedOutput
}
"Pipeline" should "be executed" in {
val output = new ByteArrayOutputStream()
Console.withOut(output) {
JobRunner.main(Array("de.hpi.ingestion.framework.mock.MockPipeline", "--config", "test.xml"))
}
val expectedOutput = "assertConditions\\nexecQ\\nload\\nrun\\ntest.xml\\nexecQ\\nsave\\nassertConditions\\nexecQ\\n" +
"load\\nrun\\ntest.xml\\nexecQ\\nsave\\n"
output.toString shouldEqual expectedOutput
}
"Invalid input" should "be caught" in {
val nonExistingClass = "de.hpi.ingestion.abcde"
val thrown1 = the [IllegalArgumentException] thrownBy JobRunner.main(Array(nonExistingClass))
thrown1.getCause shouldBe a [ClassNotFoundException]
thrown1.getMessage shouldEqual "There is no such pipeline or job."
val nonEmptyConstructor = "de.hpi.ingestion.dataimport.dbpedia.models.Relation"
val thrown2 = the [IllegalArgumentException] thrownBy JobRunner.main(Array(nonEmptyConstructor))
thrown2.getCause shouldBe a [InstantiationException]
thrown2.getMessage shouldEqual "The provided class is not a Spark Job or a Pipeline."
val notImplementingTrait = "de.hpi.ingestion.textmining.tokenizer.AccessibleGermanStemmer"
val thrown3 = the [IllegalArgumentException] thrownBy JobRunner.main(Array(notImplementingTrait))
thrown3.getCause shouldBe a [MatchError]
thrown3.getMessage shouldEqual "The provided class does not implement the trait SparkJob or JobPipeline."
}
}
|
bpn1/ingestion
|
src/test/scala/de/hpi/ingestion/framework/JobRunnerTest.scala
|
Scala
|
apache-2.0
| 3,141
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.