code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package name.abhijitsarkar.user.controller
import scala.concurrent.Future
import scala.concurrent.Promise
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Directives.logRequestResult
import akka.http.scaladsl.server.Directives.pathPrefix
import scala.concurrent.Future
import name.abhijitsarkar.user.domain.User
import name.abhijitsarkar.user.repository.UserRepository._
import UserJsonSupport._
import akka.actor._
import scala.concurrent.Promise
import akka.pattern.ask
import name.abhijitsarkar.user.ActorPlumbing
trait UserWriteResource extends ActorPlumbing {
val businessDelegateProps: Props
// TODO: Content negotiation is not implemented.
// http://stackoverflow.com/questions/32187858/akka-http-accept-and-content-type-handling
// http://stackoverflow.com/questions/30859264/test-akka-http-server-using-specs2
val writeRoute = {
logRequestResult("user-service") {
pathPrefix("user") {
(post & entity(as[User])) { user =>
path(Segment) { userId =>
complete {
val userUpdateRequest = UserUpdateRequest(user)
val actor = system.actorOf(businessDelegateProps)
val response = (actor ? userUpdateRequest).asInstanceOf[Promise[Future[UserModificationResponse]]]
processDeleteOrUpdateResponse(response)
}
} ~
extractUri { requestUri =>
complete {
val userCreateRequest = UserCreateRequest(user)
val actor = system.actorOf(businessDelegateProps)
val response = (actor ? userCreateRequest).asInstanceOf[Promise[Future[UserModificationResponse]]]
response.future.flatMap {
_.map { response =>
val body = response match {
case UserModificationResponse(statusCode, Some(uid)) => s"$requestUri/${uid}"
case UserModificationResponse(statusCode, _) => s"Failed to create user with id: ${userCreateRequest.user.userId}."
}
UserModificationResponse(response.statusCode, Some(body))
}
}
}
}
} ~
(delete & path(Segment)) { userId =>
complete {
val userDeleteRequest = UserDeleteRequest(userId)
val actor = system.actorOf(businessDelegateProps)
val response = (actor ? userDeleteRequest).asInstanceOf[Promise[Future[UserModificationResponse]]]
processDeleteOrUpdateResponse(response)
}
}
}
}
}
def processDeleteOrUpdateResponse(response: Promise[Future[UserModificationResponse]]) = {
response.future.flatMap { _.map { identity } }
}
}
| asarkar/akka | user-service/src/main/scala/name/abhijitsarkar/user/controller/UserWriteResource.scala | Scala | gpl-3.0 | 2,795 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.analysis.ViewType
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, FunctionResource}
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.types.{DataType, StructType}
/**
* A logical plan node that contains exactly what was parsed from SQL.
*
* This is used to hold information parsed from SQL when there are multiple implementations of a
* query or command. For example, CREATE TABLE may be implemented by different nodes for v1 and v2.
* Instead of parsing directly to a v1 CreateTable that keeps metadata in CatalogTable, and then
* converting that v1 metadata to the v2 equivalent, the sql [[CreateTableStatement]] plan is
* produced by the parser and converted once into both implementations.
*
* Parsed logical plans are not resolved because they must be converted to concrete logical plans.
*
* Parsed logical plans are located in Catalyst so that as much SQL parsing logic as possible is be
* kept in a [[org.apache.spark.sql.catalyst.parser.AbstractSqlParser]].
*/
abstract class ParsedStatement extends LogicalPlan {
// Redact properties and options when parsed nodes are used by generic methods like toString
override def productIterator: Iterator[Any] = super.productIterator.map {
case mapArg: Map[_, _] => conf.redactOptions(mapArg)
case other => other
}
override def output: Seq[Attribute] = Seq.empty
override def children: Seq[LogicalPlan] = Seq.empty
final override lazy val resolved = false
}
/**
* A CREATE TABLE command, as parsed from SQL.
*
* This is a metadata-only command and is not used to write data to the created table.
*/
case class CreateTableStatement(
tableName: Seq[String],
tableSchema: StructType,
partitioning: Seq[Transform],
bucketSpec: Option[BucketSpec],
properties: Map[String, String],
provider: String,
options: Map[String, String],
location: Option[String],
comment: Option[String],
ifNotExists: Boolean) extends ParsedStatement
/**
* A CREATE TABLE AS SELECT command, as parsed from SQL.
*/
case class CreateTableAsSelectStatement(
tableName: Seq[String],
asSelect: LogicalPlan,
partitioning: Seq[Transform],
bucketSpec: Option[BucketSpec],
properties: Map[String, String],
provider: String,
options: Map[String, String],
location: Option[String],
comment: Option[String],
ifNotExists: Boolean) extends ParsedStatement {
override def children: Seq[LogicalPlan] = Seq(asSelect)
}
/**
* A CREATE VIEW statement, as parsed from SQL.
*/
case class CreateViewStatement(
viewName: Seq[String],
userSpecifiedColumns: Seq[(String, Option[String])],
comment: Option[String],
properties: Map[String, String],
originalText: Option[String],
child: LogicalPlan,
allowExisting: Boolean,
replace: Boolean,
viewType: ViewType) extends ParsedStatement
/**
* A REPLACE TABLE command, as parsed from SQL.
*
* If the table exists prior to running this command, executing this statement
* will replace the table's metadata and clear the underlying rows from the table.
*/
case class ReplaceTableStatement(
tableName: Seq[String],
tableSchema: StructType,
partitioning: Seq[Transform],
bucketSpec: Option[BucketSpec],
properties: Map[String, String],
provider: String,
options: Map[String, String],
location: Option[String],
comment: Option[String],
orCreate: Boolean) extends ParsedStatement
/**
* A REPLACE TABLE AS SELECT command, as parsed from SQL.
*/
case class ReplaceTableAsSelectStatement(
tableName: Seq[String],
asSelect: LogicalPlan,
partitioning: Seq[Transform],
bucketSpec: Option[BucketSpec],
properties: Map[String, String],
provider: String,
options: Map[String, String],
location: Option[String],
comment: Option[String],
orCreate: Boolean) extends ParsedStatement {
override def children: Seq[LogicalPlan] = Seq(asSelect)
}
/**
* Column data as parsed by ALTER TABLE ... ADD COLUMNS.
*/
case class QualifiedColType(
name: Seq[String],
dataType: DataType,
nullable: Boolean,
comment: Option[String],
position: Option[ColumnPosition])
/**
* ALTER TABLE ... ADD COLUMNS command, as parsed from SQL.
*/
case class AlterTableAddColumnsStatement(
tableName: Seq[String],
columnsToAdd: Seq[QualifiedColType]) extends ParsedStatement
case class AlterTableReplaceColumnsStatement(
tableName: Seq[String],
columnsToAdd: Seq[QualifiedColType]) extends ParsedStatement
/**
* ALTER TABLE ... CHANGE COLUMN command, as parsed from SQL.
*/
case class AlterTableAlterColumnStatement(
tableName: Seq[String],
column: Seq[String],
dataType: Option[DataType],
nullable: Option[Boolean],
comment: Option[String],
position: Option[ColumnPosition]) extends ParsedStatement
/**
* ALTER TABLE ... RENAME COLUMN command, as parsed from SQL.
*/
case class AlterTableRenameColumnStatement(
tableName: Seq[String],
column: Seq[String],
newName: String) extends ParsedStatement
/**
* ALTER TABLE ... DROP COLUMNS command, as parsed from SQL.
*/
case class AlterTableDropColumnsStatement(
tableName: Seq[String],
columnsToDrop: Seq[Seq[String]]) extends ParsedStatement
/**
* ALTER TABLE ... SET TBLPROPERTIES command, as parsed from SQL.
*/
case class AlterTableSetPropertiesStatement(
tableName: Seq[String],
properties: Map[String, String]) extends ParsedStatement
/**
* ALTER TABLE ... UNSET TBLPROPERTIES command, as parsed from SQL.
*/
case class AlterTableUnsetPropertiesStatement(
tableName: Seq[String],
propertyKeys: Seq[String],
ifExists: Boolean) extends ParsedStatement
/**
* ALTER TABLE ... SET LOCATION command, as parsed from SQL.
*/
case class AlterTableSetLocationStatement(
tableName: Seq[String],
partitionSpec: Option[TablePartitionSpec],
location: String) extends ParsedStatement
/**
* ALTER TABLE ... RECOVER PARTITIONS command, as parsed from SQL.
*/
case class AlterTableRecoverPartitionsStatement(
tableName: Seq[String]) extends ParsedStatement
/**
* ALTER TABLE ... ADD PARTITION command, as parsed from SQL
*/
case class AlterTableAddPartitionStatement(
tableName: Seq[String],
partitionSpecsAndLocs: Seq[(TablePartitionSpec, Option[String])],
ifNotExists: Boolean) extends ParsedStatement
/**
* ALTER TABLE ... RENAME PARTITION command, as parsed from SQL.
*/
case class AlterTableRenamePartitionStatement(
tableName: Seq[String],
from: TablePartitionSpec,
to: TablePartitionSpec) extends ParsedStatement
/**
* ALTER TABLE ... DROP PARTITION command, as parsed from SQL
*/
case class AlterTableDropPartitionStatement(
tableName: Seq[String],
specs: Seq[TablePartitionSpec],
ifExists: Boolean,
purge: Boolean,
retainData: Boolean) extends ParsedStatement
/**
* ALTER TABLE ... SERDEPROPERTIES command, as parsed from SQL
*/
case class AlterTableSerDePropertiesStatement(
tableName: Seq[String],
serdeClassName: Option[String],
serdeProperties: Option[Map[String, String]],
partitionSpec: Option[TablePartitionSpec]) extends ParsedStatement
/**
* ALTER VIEW ... SET TBLPROPERTIES command, as parsed from SQL.
*/
case class AlterViewSetPropertiesStatement(
viewName: Seq[String],
properties: Map[String, String]) extends ParsedStatement
/**
* ALTER VIEW ... UNSET TBLPROPERTIES command, as parsed from SQL.
*/
case class AlterViewUnsetPropertiesStatement(
viewName: Seq[String],
propertyKeys: Seq[String],
ifExists: Boolean) extends ParsedStatement
/**
* ALTER VIEW ... Query command, as parsed from SQL.
*/
case class AlterViewAsStatement(
viewName: Seq[String],
originalText: String,
query: LogicalPlan) extends ParsedStatement
/**
* ALTER TABLE ... RENAME TO command, as parsed from SQL.
*/
case class RenameTableStatement(
oldName: Seq[String],
newName: Seq[String],
isView: Boolean) extends ParsedStatement
/**
* A DROP TABLE statement, as parsed from SQL.
*/
case class DropTableStatement(
tableName: Seq[String],
ifExists: Boolean,
purge: Boolean) extends ParsedStatement
/**
* A DROP VIEW statement, as parsed from SQL.
*/
case class DropViewStatement(
viewName: Seq[String],
ifExists: Boolean) extends ParsedStatement
/**
* A DESCRIBE TABLE tbl_name col_name statement, as parsed from SQL.
*/
case class DescribeColumnStatement(
tableName: Seq[String],
colNameParts: Seq[String],
isExtended: Boolean) extends ParsedStatement
/**
* An INSERT INTO statement, as parsed from SQL.
*
* @param table the logical plan representing the table.
* @param query the logical plan representing data to write to.
* @param overwrite overwrite existing table or partitions.
* @param partitionSpec a map from the partition key to the partition value (optional).
* If the value is missing, dynamic partition insert will be performed.
* As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS` would have
* Map('a' -> Some('1'), 'b' -> Some('2')),
* and `INSERT INTO tbl PARTITION (a=1, b) AS ...`
* would have Map('a' -> Some('1'), 'b' -> None).
* @param ifPartitionNotExists If true, only write if the partition does not exist.
* Only valid for static partitions.
*/
case class InsertIntoStatement(
table: LogicalPlan,
partitionSpec: Map[String, Option[String]],
query: LogicalPlan,
overwrite: Boolean,
ifPartitionNotExists: Boolean) extends ParsedStatement {
require(overwrite || !ifPartitionNotExists,
"IF NOT EXISTS is only valid in INSERT OVERWRITE")
require(partitionSpec.values.forall(_.nonEmpty) || !ifPartitionNotExists,
"IF NOT EXISTS is only valid with static partitions")
override def children: Seq[LogicalPlan] = query :: Nil
}
/**
* A SHOW TABLE EXTENDED statement, as parsed from SQL.
*/
case class ShowTableStatement(
namespace: Option[Seq[String]],
pattern: String,
partitionSpec: Option[TablePartitionSpec])
extends ParsedStatement
/**
* A CREATE NAMESPACE statement, as parsed from SQL.
*/
case class CreateNamespaceStatement(
namespace: Seq[String],
ifNotExists: Boolean,
properties: Map[String, String]) extends ParsedStatement
/**
* A USE statement, as parsed from SQL.
*/
case class UseStatement(isNamespaceSet: Boolean, nameParts: Seq[String]) extends ParsedStatement
/**
* An ANALYZE TABLE statement, as parsed from SQL.
*/
case class AnalyzeTableStatement(
tableName: Seq[String],
partitionSpec: Map[String, Option[String]],
noScan: Boolean) extends ParsedStatement
/**
* An ANALYZE TABLE FOR COLUMNS statement, as parsed from SQL.
*/
case class AnalyzeColumnStatement(
tableName: Seq[String],
columnNames: Option[Seq[String]],
allColumns: Boolean) extends ParsedStatement {
require(columnNames.isDefined ^ allColumns, "Parameter `columnNames` or `allColumns` are " +
"mutually exclusive. Only one of them should be specified.")
}
/**
* A REPAIR TABLE statement, as parsed from SQL
*/
case class RepairTableStatement(tableName: Seq[String]) extends ParsedStatement
/**
* A LOAD DATA INTO TABLE statement, as parsed from SQL
*/
case class LoadDataStatement(
tableName: Seq[String],
path: String,
isLocal: Boolean,
isOverwrite: Boolean,
partition: Option[TablePartitionSpec]) extends ParsedStatement
/**
* A SHOW CREATE TABLE statement, as parsed from SQL.
*/
case class ShowCreateTableStatement(
tableName: Seq[String],
asSerde: Boolean = false) extends ParsedStatement
/**
* A CACHE TABLE statement, as parsed from SQL
*/
case class CacheTableStatement(
tableName: Seq[String],
plan: Option[LogicalPlan],
isLazy: Boolean,
options: Map[String, String]) extends ParsedStatement
/**
* An UNCACHE TABLE statement, as parsed from SQL
*/
case class UncacheTableStatement(
tableName: Seq[String],
ifExists: Boolean) extends ParsedStatement
/**
* A TRUNCATE TABLE statement, as parsed from SQL
*/
case class TruncateTableStatement(
tableName: Seq[String],
partitionSpec: Option[TablePartitionSpec]) extends ParsedStatement
/**
* A SHOW PARTITIONS statement, as parsed from SQL
*/
case class ShowPartitionsStatement(
tableName: Seq[String],
partitionSpec: Option[TablePartitionSpec]) extends ParsedStatement
/**
* A REFRESH TABLE statement, as parsed from SQL
*/
case class RefreshTableStatement(tableName: Seq[String]) extends ParsedStatement
/**
* A SHOW COLUMNS statement, as parsed from SQL
*/
case class ShowColumnsStatement(
table: Seq[String],
namespace: Option[Seq[String]]) extends ParsedStatement
/**
* A SHOW CURRENT NAMESPACE statement, as parsed from SQL
*/
case class ShowCurrentNamespaceStatement() extends ParsedStatement
/**
* A DESCRIBE FUNCTION statement, as parsed from SQL
*/
case class DescribeFunctionStatement(
functionName: Seq[String],
isExtended: Boolean) extends ParsedStatement
/**
* SHOW FUNCTIONS statement, as parsed from SQL
*/
case class ShowFunctionsStatement(
userScope: Boolean,
systemScope: Boolean,
pattern: Option[String],
functionName: Option[Seq[String]]) extends ParsedStatement
/**
* DROP FUNCTION statement, as parsed from SQL
*/
case class DropFunctionStatement(
functionName: Seq[String],
ifExists: Boolean,
isTemp: Boolean) extends ParsedStatement
/**
* CREATE FUNCTION statement, as parsed from SQL
*/
case class CreateFunctionStatement(
functionName: Seq[String],
className: String,
resources: Seq[FunctionResource],
isTemp: Boolean,
ignoreIfExists: Boolean,
replace: Boolean) extends ParsedStatement
| darionyaphet/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statements.scala | Scala | apache-2.0 | 15,036 |
package dk.tennis.compare.rating.trueskill
import dk.tennis.compare.rating.trueskill.model.Result
import dk.tennis.compare.rating.trueskill.model.TrueSkillRating
trait TrueSkill {
/**
* @param result
* @param perfVariance The player's performance variances. Tuple2[player1 variance, player2 variance]
*/
def addResult(result: Result, perfVariance: Tuple2[Double, Double])
/**
* @return Map[playerName,playerSkill]
*/
def getRatings(): Map[String, TrueSkillRating]
} | danielkorzekwa/tennis-player-compare | trueskill/src/main/scala/dk/tennis/compare/rating/trueskill/TrueSkill.scala | Scala | bsd-2-clause | 511 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.agent.itest.config
import net.lshift.diffa.agent.itest.support.TestConstants._
import net.lshift.diffa.agent.client.SystemConfigRestClient
import org.junit.Assert._
import com.eaio.uuid.UUID
import net.lshift.diffa.kernel.frontend.DomainDef
import net.lshift.diffa.client.NotFoundException
import net.lshift.diffa.schema.servicelimits.ChangeEventRate
import org.junit.{Ignore, Test}
import org.apache.commons.lang3.RandomStringUtils
@Ignore
class SystemConfigTest {
val client = new SystemConfigRestClient(agentURL)
@Test
def shouldBeAbleToCreateAndRemoveSubSpaces {
val parentSpace = RandomStringUtils.randomAlphanumeric(10)
val childSpace = parentSpace + "/" + RandomStringUtils.randomAlphanumeric(10)
client.declareDomain(DomainDef(name = parentSpace))
client.declareDomain(DomainDef(name = childSpace))
client.removeDomain(parentSpace)
try {
client.removeDomain(childSpace)
fail("Should have recursively deleted the child space")
}
catch {
case x:NotFoundException => // expected
}
}
@Test(expected = classOf[NotFoundException])
def nonExistentDomainShouldRaiseError {
client.removeDomain(RandomStringUtils.randomAlphanumeric(10))
}
@Test(expected = classOf[NotFoundException])
def shouldSetSystemConfigOption {
client.setConfigOption("foo", "bar")
assertEquals("bar", client.getConfigOption("foo"))
client.deleteConfigOption("foo")
// This should provoke a 404
client.getConfigOption("foo")
}
@Test
def shouldSetMultipleSystemConfigOptions {
client.setConfigOptions(Map("foo" -> "bar", "foz" -> "boz"))
assertEquals("bar", client.getConfigOption("foo"))
assertEquals("boz", client.getConfigOption("foz"))
}
@Test(expected = classOf[NotFoundException])
def unknownLimitNameShouldRaiseErrorWhenSettingHardLimit {
client.setHardSystemLimit(new UUID().toString, 111)
}
@Test(expected = classOf[NotFoundException])
def unknownLimitNameShouldRaiseErrorWhenSettingDefaultLimit {
client.setDefaultSystemLimit(new UUID().toString, 111)
}
@Test(expected = classOf[NotFoundException])
def unknownLimitNameShouldRaiseErrorWhenGettingEffectiveLimit {
client.getEffectiveSystemLimit(new UUID().toString)
}
@Test
def shouldSetSystemHardLimitFollowedBySoftLimit {
client.setHardSystemLimit(ChangeEventRate.key, 19)
client.setDefaultSystemLimit(ChangeEventRate.key, 19) // Assert the default value, since this might have been set by a previous test run
val oldlimit = client.getEffectiveSystemLimit(ChangeEventRate.key)
assertEquals(19, oldlimit)
client.setDefaultSystemLimit(ChangeEventRate.key, 18)
val newlimit = client.getEffectiveSystemLimit(ChangeEventRate.key)
assertEquals(18, newlimit)
}
} | 0x6e6562/diffa | agent/src/test/scala/net/lshift/diffa/agent/itest/config/SystemConfigTest.scala | Scala | apache-2.0 | 3,421 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.analytic
import java.awt.image.DataBuffer
import javax.media.jai.RasterFactory
import org.geotools.coverage.CoverageFactoryFinder
import org.geotools.coverage.grid.GridCoverage2D
import org.geotools.data.Query
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.util.factory.GeoTools
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.process.ProcessException
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.geotools.process.vector.{BBOXExpandingFilterVisitor, HeatmapSurface}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.iterators.DensityScan
import org.locationtech.geomesa.process.GeoMesaProcess
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.coverage.grid.GridGeometry
import org.opengis.filter.Filter
import org.opengis.util.ProgressListener
/**
* Stripped down version of org.geotools.process.vector.HeatmapProcess
*/
@DescribeProcess(
title = "Density Map",
description = "Computes a density map over a set of features stored in Geomesa"
)
class DensityProcess extends GeoMesaProcess {
import DensityProcess.DefaultRadiusPixels
@throws(classOf[ProcessException])
@DescribeResult(name = "result", description = "Output raster")
def execute(@DescribeParameter(name = "data", description = "Input features")
obsFeatures: SimpleFeatureCollection,
@DescribeParameter(name = "radiusPixels", description = "Radius of the density kernel in pixels")
argRadiusPixels: Integer,
@DescribeParameter(name = "weightAttr", description = "Name of the attribute to use for data point weight", min = 0, max = 1)
argWeightAttr: String,
@DescribeParameter(name = "outputBBOX", description = "Bounding box of the output")
argOutputEnv: ReferencedEnvelope,
@DescribeParameter(name = "outputWidth", description = "Width of output raster in pixels")
argOutputWidth: Integer,
@DescribeParameter(name = "outputHeight", description = "Height of output raster in pixels")
argOutputHeight: Integer,
monitor: ProgressListener): GridCoverage2D = {
val pixels = Option(argRadiusPixels).map(_.intValue).getOrElse(DefaultRadiusPixels)
// buffer our calculations based on the pixel radius to avoid edge artifacts
val outputWidth = argOutputWidth + 2 * pixels
val outputHeight = argOutputHeight + 2 * pixels
val bufferWidth = pixels * argOutputEnv.getWidth / argOutputWidth
val bufferHeight = pixels * argOutputEnv.getHeight / argOutputHeight
val envelope = new ReferencedEnvelope(argOutputEnv)
envelope.expandBy(bufferWidth, bufferHeight)
val decode = DensityScan.decodeResult(envelope, outputWidth, outputHeight)
val heatMap = new HeatmapSurface(pixels, envelope, outputWidth, outputHeight)
try {
WithClose(obsFeatures.features()) { features =>
while (features.hasNext) {
val pts = decode(features.next())
while (pts.hasNext) {
val (x, y, weight) = pts.next()
heatMap.addPoint(x, y, weight)
}
}
}
} catch {
case e: Exception => throw new ProcessException("Error processing heatmap", e)
}
val heatMapGrid = DensityProcess.flipXY(heatMap.computeSurface)
// create the raster from our unbuffered envelope and discard the buffered pixels in our final image
val raster = RasterFactory.createBandedRaster(DataBuffer.TYPE_FLOAT, argOutputWidth, argOutputHeight, 1, null)
var i, j = pixels
while (j < heatMapGrid.length - pixels) {
val row = heatMapGrid(j)
while (i < row.length - pixels) {
raster.setSample(i - pixels, j - pixels, 0, row(i))
i += 1
}
j += 1
i = pixels
}
val gcf = CoverageFactoryFinder.getGridCoverageFactory(GeoTools.getDefaultHints)
gcf.create("Process Results", raster, argOutputEnv)
}
/**
* Given a target query and a target grid geometry returns the query to be used to read the
* input data of the process involved in rendering. In this process this method is used to:
* <ul>
* <li>determine the extent & CRS of the output grid
* <li>expand the query envelope to ensure stable surface generation
* <li>modify the query hints to ensure point features are returned
* </ul>
* Note that in order to pass validation, all parameters named here must also appear in the
* parameter list of the <tt>execute</tt> method, even if they are not used there.
*
* @param argRadiusPixels the feature type attribute that contains the observed surface value
* @param targetQuery the query used against the data source
* @param targetGridGeometry the grid geometry of the destination image
* @return The transformed query
*/
@throws(classOf[ProcessException])
def invertQuery(@DescribeParameter(name = "radiusPixels", description = "Radius to use for the kernel", min = 0, max = 1)
argRadiusPixels: Integer,
@DescribeParameter(name = "weightAttr", description = "Name of the attribute to use for data point weight", min = 0, max = 1)
argWeightAttr: String,
@DescribeParameter(name = "outputBBOX", description = "Georeferenced bounding box of the output")
argOutputEnv: ReferencedEnvelope,
@DescribeParameter(name = "outputWidth", description = "Width of the output raster")
argOutputWidth: Integer,
@DescribeParameter(name = "outputHeight", description = "Height of the output raster")
argOutputHeight: Integer,
targetQuery: Query,
targetGridGeometry: GridGeometry): Query = {
if (argOutputWidth == null || argOutputHeight == null) {
throw new IllegalArgumentException("outputWidth and/or outputHeight not specified")
} else if (argOutputWidth < 0 || argOutputHeight < 0) {
throw new IllegalArgumentException("outputWidth and outputHeight must both be positive")
}
val pixels = Option(argRadiusPixels).map(_.intValue).getOrElse(DefaultRadiusPixels)
// buffer our calculations based on the pixel radius to avoid edge artifacts
val outputWidth = argOutputWidth + 2 * pixels
val outputHeight = argOutputHeight + 2 * pixels
val bufferWidth = pixels * argOutputEnv.getWidth / argOutputWidth
val bufferHeight = pixels * argOutputEnv.getHeight / argOutputHeight
val envelope = new ReferencedEnvelope(argOutputEnv)
envelope.expandBy(bufferWidth, bufferHeight)
val filter = {
val buf = math.max(bufferWidth, bufferHeight)
targetQuery.getFilter.accept(new BBOXExpandingFilterVisitor(buf, buf, buf, buf), null).asInstanceOf[Filter]
}
val invertedQuery = new Query(targetQuery)
invertedQuery.setFilter(filter)
invertedQuery.setProperties(null)
invertedQuery.getHints.put(QueryHints.DENSITY_BBOX, envelope)
invertedQuery.getHints.put(QueryHints.DENSITY_WIDTH, outputWidth)
invertedQuery.getHints.put(QueryHints.DENSITY_HEIGHT, outputHeight)
if (argWeightAttr != null) {
invertedQuery.getHints.put(QueryHints.DENSITY_WEIGHT, argWeightAttr)
}
invertedQuery
}
}
object DensityProcess {
val DefaultRadiusPixels: Int = 10
/**
* Flips an XY matrix along the X=Y axis, and inverts the Y axis. Used to convert from
* "map orientation" into the "image orientation" used by GridCoverageFactory. The surface
* interpolation is done on an XY grid, with Y=0 being the bottom of the space. GridCoverages
* are stored in an image format, in a YX grid with Y=0 being the top.
*
* @param grid the grid to flip
* @return the flipped grid
*/
def flipXY(grid: Array[Array[Float]]): Array[Array[Float]] = {
val length_x = grid.length
val length_y = grid(0).length
val res = Array.fill(length_y,length_x)(0f)
for ( x <- 0 until length_x ; y <- 0 until length_y ) {
val x1 = length_y - 1 - y
val y1 = x
res(x1)(y1) = grid(x)(y)
}
res
}
}
| elahrvivaz/geomesa | geomesa-process/geomesa-process-vector/src/main/scala/org/locationtech/geomesa/process/analytic/DensityProcess.scala | Scala | apache-2.0 | 8,719 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.releaser.domain
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.nio.file.Path
import java.util.jar.Manifest
import java.util.zip.ZipFile
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}
case class ArtefactMetaData(sha:CommitSha, commitAuthor:String, commitDate:DateTime)
object ArtefactMetaData{
val gitCommitDateFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ")
def fromFile(p:Path):Try[ArtefactMetaData] = {
Try {new ZipFile(p.toFile) }.flatMap { jarFile =>
jarFile.entries().filter(_.getName == "META-INF/MANIFEST.MF").toList.headOption.map { ze =>
val man = new Manifest(jarFile.getInputStream(ze))
ArtefactMetaData(
man.getMainAttributes.getValue("Git-Head-Rev"),
man.getMainAttributes.getValue("Git-Commit-Author"),
gitCommitDateFormat.parseDateTime(man.getMainAttributes.getValue("Git-Commit-Date"))
)
}.toTry(new Exception(s"Failed to retrieve manifest from $p"))
}
}
implicit class OptionPimp[A](opt:Option[A]){
def toTry(e:Exception):Try[A] = opt match {
case Some(x) => Success(x)
case None => Failure(e)
}
}
}
| xnejp03/releaser | src/main/scala/uk/gov/hmrc/releaser/domain/ArtefactMetaData.scala | Scala | apache-2.0 | 2,465 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.Eq
import cats.data.EitherT
import cats.effect.IO
import cats.laws.discipline.{
CoflatMapTests,
DeferTests,
FunctorFilterTests,
MonadErrorTests,
MonoidKTests,
SemigroupalTests
}
import cats.laws.discipline.arbitrary.catsLawsArbitraryForPartialFunction
import monix.execution.schedulers.TestScheduler
object TypeClassLawsForIterantIOSuite extends BaseLawsSuite {
type F[α] = Iterant[IO, α]
implicit lazy val ec: TestScheduler = TestScheduler()
// Explicit instance due to weird implicit resolution problem
implicit lazy val iso: SemigroupalTests.Isomorphisms[F] =
SemigroupalTests.Isomorphisms.invariant[F]
// Explicit instance, since Scala can't figure it out below :-(
lazy val eqEitherT: Eq[EitherT[F, Throwable, Int]] =
implicitly[Eq[EitherT[F, Throwable, Int]]]
checkAllAsync("Defer[Iterant[IO]]") { implicit ec =>
DeferTests[F].defer[Int]
}
checkAllAsync("MonadError[Iterant[IO]]") { _ =>
implicit val eqE = eqEitherT
MonadErrorTests[F, Throwable].monadError[Int, Int, Int]
}
checkAllAsync("MonoidK[Iterant[IO]]") { implicit ec =>
MonoidKTests[F].monoidK[Int]
}
checkAllAsync("CoflatMap[Iterant[IO]]") { implicit ec =>
CoflatMapTests[F].coflatMap[Int, Int, Int]
}
checkAllAsync("FunctorFilter[Iterant[IO]]") { implicit ec =>
FunctorFilterTests[F].functorFilter[Int, Int, Int]
}
}
| alexandru/monifu | monix-tail/shared/src/test/scala/monix/tail/TypeClassLawsForIterantIOSuite.scala | Scala | apache-2.0 | 2,079 |
package chandu0101.scalajs.react.components.fascades
import japgolly.scalajs.react.TopNode
import org.scalajs.dom.Event
import scala.scalajs.js
import scala.scalajs.js.Dynamic.{global => g, literal => json, newInstance => jsnew}
import scala.scalajs.js.JSConverters.genTravConvertible2JSRichGenTrav
import scala.scalajs.js.annotation.JSName
/**
* Created by chandrasekharkode .
*/
@JSName("google.maps.LatLng")
class GLatLng(lat: Double, lng: Double) extends js.Object
@JSName("google.maps.Map")
class GMap(node: TopNode, options: js.Dynamic) extends js.Object
@JSName("google.maps.Point")
class GPoint(x : Int, y: Int) extends js.Object
@JSName("google.maps.Size")
class GSize(width : Int, height: Int) extends js.Object
@JSName("google.maps.Marker")
class GMarker(ops: js.Dynamic) extends js.Object {
def setMap(map : GMap) : Unit = js.native
}
@JSName("google.maps.InfoWindow")
class GInfoWindow extends js.Object {
def setContent(content : String) : Unit = js.native
def open(map : GMap, marker:GMarker) : Unit = js.native
}
@JSName("google.maps.event")
class GEvent extends js.Object {
def addListener(marker : GMarker , tpe : String , callback : js.Function0[Unit]) :Unit = js.native
def addListener(marker : GMarker , tpe : String , callback : js.Function1[Event,Unit]) : Unit = js.native
}
@JSName("google.maps.event.addListener")
class GAddListener(marker : GMarker , tpe : String , callback : js.Function) extends js.Object
@JSName("google.maps.event.clearInstanceListeners")
class GClearInstanceListeners(marker : GMarker) extends js.Object
@JSName("google.maps.event.clearListeners")
class GClearListeners(marker : GMarker , `type` : String) extends js.Object
case class Point(x: Int, y: Int) {
def toGPoint = new GPoint(x,y)
}
case class Size(width: Int, height: Int) {
def toGSize = new GSize(width,height)
}
/*
url = image location
size = This marker is 20 pixels wide by 32 pixels tall.
origin = The origin for this image is 0,0.
anchor = The anchor for this image is the base of the flagpole at 0,32.
*/
case class Icon(url : String,size :Size,origin : Point,anchor : Point) {
def toGIcon = json( url = url , size = size.toGSize, origin = origin.toGPoint , anchor = anchor.toGPoint)
}
/*
Shapes define the clickable region of the icon.
The type defines an HTML <area> element 'poly' which
traces out a polygon as a series of X,Y points. The final
coordinate closes the poly by connecting to the first
coordinate.
*/
case class Shape(coords : List[Int] , tpe : String ) {
def toGShape = json("coords" -> coords.toJSArray,"type" -> tpe)
}
case class LatLng(lat: Double, lng: Double) {
def toGlatlng = new GLatLng(lat, lng)
}
case class Marker(position: LatLng, title: String = "" , icon : Icon = null ,shape : Shape = null ,zIndex : Int = 0 ,draggable : Boolean = false ,content : String = "") {
def toGMarker(map: GMap) = json(map = map, position = position.toGlatlng,
title = title,
icon = if( icon != null) icon.toGIcon else null,
shape = if(shape != null) shape.toGShape else null,
zIndex = zIndex,
draggable = draggable)
}
case class MapOptions(center : LatLng,zoom : Int = 4) {
def toGMapOptions = json( center = center.toGlatlng , zoom = zoom)
} | coreyauger/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/fascades/GoogleMapFascade.scala | Scala | apache-2.0 | 3,289 |
package com.nekopiano.scala.processing.sandbox.poc.glow
import com.nekopiano.scala.processing.{ScalaPApp, ScalaPAppCompanion, ScalaPApplet, ThreeDimensionalPApp}
import processing.core.{PGraphics, PImage, PVector}
import processing.opengl.{PGL, PGraphicsOpenGL}
//import javax.media.opengl.*
/**
* Glow w/ OpenGL
*
* Created on 28/07/2016.
*/
class GlowOGLApp extends ThreeDimensionalPApp {
// http://takuma-art.blogspot.jp/2009/08/processingglow.html
// http://log.nissuk.info/2012/02/processing-20a4-glow-openglapplet.html
var pgl:PGraphicsOpenGL = null
override def settings() {
// P2D and P3D is based on OpenGL
size(600, 400, P2D)
//size(600, 400, P3D)
// 2. hintを設定します。
// - ENABLE_OPENGL_4X_SMOOTH: アンチエイリアス有効
// - DISABLE_OPENGL_ERROR_REPORT: エラー処理無効
// 参考: http://www.technotype.net/processing/reference/hint_.html
// anti alias
// hint(ENABLE_OPENGL_4X_SMOOTH)
// hint(DISABLE_OPENGL_ERROR_REPORT);
smooth()
}
override def setup() {
background(10)
// 3. PGrahicsOpenGLを取得します。
pgl = super.getGraphics().asInstanceOf[PGraphicsOpenGL]
img = setupImage(250, 250)
}
var img:PGraphics = null
def setupImage(w:Int, h:Int) = {
img = createGraphics(w, h, P2D)
//img = createGraphics(w, h, P3D)
val r = img.width / 2f
img.beginDraw()
img.noStroke()
img.smooth()
img.colorMode(HSB, 360, 100, 100, 100)
val steps = 2
(steps to 1).foreach(i => {
img.fill(0, 0, 100, 100 / i)
img.ellipse(img.width / 2, img.height / 2, r * i/steps, r * i/steps);
})
img.filter(BLUR, r / 10)
img.endDraw()
img
}
var prevMouseX:Int = 0
var prevMouseY:Int = 0
var aa:Float = 0
var ss:Float = 0
var hueValue = 0
override def draw() {
fill(250)
box(100)
rect(10, 20, 30, 40)
val distance = sqrt(pow(mouseX - prevMouseX, 2) + pow(mouseY - prevMouseY, 2))
aa = max(50, min(aa + distance / 10, 100))
ss = max(0, min(ss + distance / 10, 50))
val a = 100 - aa
val s = (100 - ss) / 100
fill(0, 0, 0, a);
rect(0, 0, width, height);
drawImage(mouseX, mouseY, 2 * s);
drawImage(mouseX, mouseY, 1.5f * s)
drawImage(mouseX, mouseY, 1 * s)
drawImage(mouseX, mouseY, 0.5f * s)
hueValue = (hueValue + 1) % 360;
prevMouseX = mouseX;
prevMouseY = mouseY;
aa = if (0 < aa) aa -1 else 0
ss = if (0 < ss) ss -1 else 0
}
def drawImage(x:Int, y:Int, s:Float) {
// 4. PGrahicsOpenGLからGLを取得、
// アルファブレンディング(GL_BLEND)
// 加算合成(GL_SRC_ALPHA, GL_ONE)を有効にして画像を描画します
val gl = pgl.beginPGL()
gl.enable(PGL.BLEND)
gl.blendFunc(PGL.SRC_ALPHA, PGL.ONE)
colorMode(HSB, 360, 100, 100, 100)
imageMode(CENTER)
tint(hueValue, 50, 100, 100)
image(img, x, y, img.width * s, img.height * s)
pgl.endPGL()
}
}
object GlowOGLApp extends ScalaPAppCompanion {
}
//
//case class Star(starImg:PImage, pos:Point, vel:PVector, w:Float, h:Float)(implicit val sp5:ThreeDimensionalPApp) {
// import sp5._
//
// def display() {
// pushMatrix()
// translate(0, 0, pos.z)
// image(starImg, pos.x-w/2, pos.y-h/2, w, h)
// popMatrix()
// }
//
//
// def move(){
// pos.x+=random(-5,5);
// pos.y+=random(-5,5);
// pos.z+=random(-10,10);
// }
//
// def bounce(){
// if(pos.x+w/2>width || pos.x-w/2<0){
// vel.x*= -1;
// if(pos.x+w/2>width) pos.x = width-w/2
// else pos.x = w/2
// }
// if(pos.y+h/2>height || pos.y-h/2<0){
// vel.y*= -1;
// if(pos.y+h/2>height) pos.y = height-h/2
// else pos.y = h/2
// }
// }
//
//}
| lamusique/ScalaProcessing | samples/src/test/scala/com/nekopiano/scala/processing/sandbox/poc/glow/GlowOGL.scala | Scala | apache-2.0 | 3,764 |
package com.folio_sec.example.domain.bitemporal.scala_api
object BitemporalOrderStatusService extends BitemporalOrderStatusService
trait BitemporalOrderStatusService extends BitemporalOrderStatusServiceAbstract {}
| folio-sec/reladomo-scala | sample/src/main/scala/com/folio_sec/example/domain/bitemporal/scala_api/BitemporalOrderStatusService.scala | Scala | apache-2.0 | 216 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core
import akka.util.ByteString
import org.ensime.api._
/**
* Provides a generic message encoding/dblocking I/O API for reading and writing to the wire.
*/
trait Protocol {
/**
* Attempt to read a single message from the given ByteString
* @param bytes The input bytes
* @return a tuple containing a optional read message and the remaining bytes after message is removed.
*/
def decode(bytes: ByteString): (Option[RpcRequestEnvelope], ByteString)
def encode(msg: RpcResponseEnvelope): ByteString
}
| ensime/ensime-server | core/src/main/scala/org/ensime/core/Protocol.scala | Scala | gpl-3.0 | 670 |
package org.scalacoin.marshallers.script
import org.scalacoin.script._
import org.scalacoin.script.constant._
import org.scalacoin.script.crypto.{OP_CHECKMULTISIGVERIFY, OP_CHECKMULTISIG}
import org.scalacoin.util.{BitcoinSLogger, Factory, BitcoinSUtil}
import org.slf4j.LoggerFactory
import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}
/**
* Created by chris on 1/7/16.
*/
trait ScriptParser extends Factory[List[ScriptToken]] with BitcoinSLogger {
/**
* Parses a list of bytes into a list of script tokens
* @param bytes
* @return
*/
def fromBytes(bytes : Seq[Byte]) : List[ScriptToken] = {
val scriptTokens : List[ScriptToken] = parse(bytes)
scriptTokens
}
/**
* Parses an asm output script of a transaction
* example: "OP_DUP OP_HASH160 e2e7c1ab3f807151e832dd1accb3d4f5d7d19b4b OP_EQUALVERIFY OP_CHECKSIG"
* example: ["0", "IF 0x50 ENDIF 1", "P2SH,STRICTENC", "0x50 is reserved (ok if not executed)"] (from script_valid.json)
* @param str
* @return
*/
def fromString(str : String) : List[ScriptToken] = {
if (str.size > 1 && str.substring(0,2) == "0x" && str.split(" ").size == 1) {
//parse this as a byte array that is led with a 0x for example
//0x4e03000000ffff
val hex = str.substring(2,str.size)
fromBytes(BitcoinSUtil.decodeHex(hex))
} else {
val scriptTokens : List[ScriptToken] = parse(str)
scriptTokens
}
}
/**
* Parses a string to a sequence of script tokens
* example: "OP_DUP OP_HASH160 e2e7c1ab3f807151e832dd1accb3d4f5d7d19b4b OP_EQUALVERIFY OP_CHECKSIG"
* example: ["0", "IF 0x50 ENDIF 1", "P2SH,STRICTENC", "0x50 is reserved (ok if not executed)"] (from script_valid.json)
* @param str
* @return
*/
private def parse(str : String) : List[ScriptToken] = {
logger.debug("Parsing string: " + str + " into a list of script tokens")
@tailrec
def loop(operations : List[String], accum : List[Byte]) : List[Byte] = {
/* logger.debug("Attempting to parse: " + operations.headOption)
logger.debug("Accum: " + accum)*/
operations match {
//for parsing strings like 'Az', need to remove single quotes
//example: https://github.com/bitcoin/bitcoin/blob/master/src/test/data/script_valid.json#L24
case h :: t if (h.size > 0 && h.head == ''' && h.last == ''') =>
logger.debug("Found a string constant")
val strippedQuotes = h.replace("'","")
if (strippedQuotes.size == 0) {
loop(t, OP_0.bytes ++ accum)
} else {
val bytes : Seq[Byte] = BitcoinSUtil.decodeHex(BitcoinSUtil.flipEndianess(strippedQuotes.getBytes.toList))
val bytesToPushOntoStack : List[ScriptToken] = (bytes.size > 75) match {
case true =>
val scriptNumber = ScriptNumberFactory.fromHex(BitcoinSUtil.flipEndianess(BitcoinSUtil.longToHex(bytes.size)))
bytes.size match {
case size if size < Byte.MaxValue =>
List(scriptNumber,OP_PUSHDATA1)
case size if size < Short.MaxValue =>
List(scriptNumber,OP_PUSHDATA2)
case size if size < Int.MaxValue =>
List(scriptNumber,OP_PUSHDATA4)
}
case false => List(BytesToPushOntoStackFactory.fromNumber(bytes.size).get)
}
loop(t, bytes.toList ++ bytesToPushOntoStack.flatMap(_.bytes) ++ accum)
}
//if we see a byte constant in the form of "0x09adb"
case h :: t if (h.size > 1 && h.substring(0,2) == "0x") =>
loop(t,BitcoinSUtil.decodeHex(h.substring(2,h.size).toLowerCase).reverse ++ accum)
//skip the empty string
case h :: t if (h == "") => loop(t,accum)
case h :: t if (h == "0") => loop(t, OP_0.bytes ++ accum)
case h :: t if (ScriptOperationFactory.fromString(h).isDefined) =>
logger.debug("Founding a script operation in string form i.e. NOP or ADD")
val op = ScriptOperationFactory.fromString(h).get
loop(t,op.bytes ++ accum)
case h :: t if (tryParsingLong(h)) =>
logger.debug("Found a decimal number")
val hexLong = BitcoinSUtil.flipEndianess(BitcoinSUtil.longToHex(h.toLong))
val bytesToPushOntoStack = BytesToPushOntoStackFactory.fromNumber(hexLong.size / 2).get
//convert the string to int, then convert to hex
loop(t, BitcoinSUtil.decodeHex(hexLong) ++ bytesToPushOntoStack.bytes ++ accum)
//means that it must be a BytesToPushOntoStack followed by a script constant
case h :: t =>
logger.debug("Generic h :: t")
//find the size of the string in bytes
val bytesToPushOntoStack = BytesToPushOntoStackImpl(h.size / 2)
loop(t, BitcoinSUtil.decodeHex(BitcoinSUtil.flipEndianess(h)) ++ bytesToPushOntoStack.bytes ++ accum)
case Nil => accum
}
}
if (tryParsingLong(str) && str.size > 1 && str.substring(0,2) != "0x") {
//for the case when there is just a single decimal constant
//i.e. "8388607"
List(ScriptNumberFactory.fromNumber(parseLong(str)))
}
else if (BitcoinSUtil.isHex(str)) {
//if the given string is hex, it is pretty straight forward to parse it
//convert the hex string to a byte array and parse it
val bytes = BitcoinSUtil.decodeHex(str)
parse(bytes)
} else {
//this handles weird cases for parsing with various formats in bitcoin core.
//take a look at https://github.com/bitcoin/bitcoin/blob/605c17844ea32b6d237db6d83871164dc7d59dab/src/core_read.cpp#L53-L88
//for the offical parsing algorithm, for examples of weird formats look inside of
//https://github.com/bitcoin/bitcoin/blob/master/src/test/data/script_valid.json
val parsedBytesFromString = loop(str.split(" ").toList, List()).reverse
logger.info("Parsed bytes from the given string: " + BitcoinSUtil.encodeHex(parsedBytesFromString))
parse(parsedBytesFromString)
}
}
/**
* Parses a byte array into a the asm operations for a script
* will throw an exception if it fails to parse a op code
* @param bytes
* @return
*/
private def parse(bytes : List[Byte]) : List[ScriptToken] = {
logger.debug("Parsing byte list: " + bytes + " into a list of script tokens")
@tailrec
def loop(bytes : List[Byte], accum : List[ScriptToken]) : List[ScriptToken] = {
logger.debug("Byte to be parsed: " + bytes.headOption)
bytes match {
case h :: t =>
val op = ScriptOperationFactory.fromByte(h).get
val parsingHelper : ParsingHelper[Byte] = parseOperationByte(op,accum,t)
loop(parsingHelper.tail,parsingHelper.accum)
case Nil => accum
}
}
loop(bytes, List()).reverse
}
private def parse(bytes : Seq[Byte]) : List[ScriptToken] = parse(bytes.toList)
/**
* Parses a redeem script from the given script token
* @param scriptToken
* @return
*/
def parseRedeemScript(scriptToken : ScriptToken) : Try[List[ScriptToken]] = {
val redeemScript : Try[List[ScriptToken]] = Try(parse(scriptToken.bytes))
redeemScript
}
/**
* Slices the amount of bytes specified in the bytesToPushOntoStack parameter and then creates a script constant
* from those bytes. Returns the script constant and the byte array without the script constant
* @param bytesToPushOntoStack
* @param data
* @tparam T
* @return
*/
private def sliceConstant[T](bytesToPushOntoStack: BytesToPushOntoStack, data : List[T]) : (List[T], List[T]) = {
val finalIndex = bytesToPushOntoStack.opCode
val dataConstant = data.slice(0,finalIndex)
(dataConstant,data.slice(finalIndex,data.size))
}
/**
* Parses the bytes in string format, an example input would look like this
* "0x09 0x00000000 0x00000000 0x10"
* see https://github.com/bitcoin/bitcoin/blob/master/src/test/data/script_valid.json#L21-L25
* for examples of this
* @param s
* @return
*/
def parseBytesFromString(s: String) : List[ScriptConstant] = {
logger.debug("Parsing bytes from string " + s)
val scriptConstants : List[ScriptConstant] = (raw"\b0x([0-9a-f]+)\b".r
.findAllMatchIn(s.toLowerCase)
.map(g =>
// 1 hex = 4 bits therefore 16 hex characters * 4 bits = 64
// if it is not smaller than 16 hex characters it cannot
//fit inside of a scala long
//therefore store it as a script constant
if (g.group(1).size <= 16) {
ScriptNumberFactory.fromHex(g.group(1))
} else {
ScriptConstantFactory.fromHex(g.group(1))
}).toList)
scriptConstants
}
sealed case class ParsingHelper[T](tail : List[T], accum : List[ScriptToken])
/**
* Parses an operation if the tail is a List[Byte]
* If the operation is a bytesToPushOntoStack, it pushes the number of bytes onto the stack
* specified by the bytesToPushOntoStack
* i.e. If the operation was BytesToPushOntoStackImpl(5), it would slice 5 bytes off of the tail and
* places them into a ScriptConstant and add them to the accumulator.
* @param op
* @param accum
* @param tail
* @return
*/
private def parseOperationByte(op : ScriptOperation, accum : List[ScriptToken], tail : List[Byte]) : ParsingHelper[Byte] = {
op match {
case bytesToPushOntoStack : BytesToPushOntoStack =>
logger.debug("Parsing operation byte: " +bytesToPushOntoStack )
//means that we need to push x amount of bytes on to the stack
val (constant,newTail) = sliceConstant(bytesToPushOntoStack,tail)
val scriptConstant = new ScriptConstantImpl(constant)
ParsingHelper(newTail,scriptConstant :: bytesToPushOntoStack :: accum)
case OP_PUSHDATA1 => parseOpPushData(op,accum,tail)
case OP_PUSHDATA2 => parseOpPushData(op,accum,tail)
case OP_PUSHDATA4 => parseOpPushData(op,accum,tail)
case _ =>
//means that we need to push the operation onto the stack
ParsingHelper(tail,op :: accum)
}
}
/**
* Parses OP_PUSHDATA operations correctly. Slices the appropriate amount of bytes off of the tail and pushes
* them onto the accumulator.
* @param op
* @param accum
* @param tail
* @return
*/
private def parseOpPushData(op : ScriptOperation, accum : List[ScriptToken], tail : List[Byte]) : ParsingHelper[Byte] = {
op match {
case OP_PUSHDATA1 =>
//next byte is size of the script constant
val bytesToPushOntoStack = ScriptNumberFactory.fromNumber(Integer.parseInt(BitcoinSUtil.encodeHex(tail.head),16))
val scriptConstantBytes = tail.slice(1,(bytesToPushOntoStack.num+1).toInt)
val scriptConstant = ScriptConstantFactory.fromBytes(scriptConstantBytes)
val restOfBytes = tail.slice((bytesToPushOntoStack.num+1).toInt,tail.size)
buildParsingHelper(op,bytesToPushOntoStack,scriptConstant,restOfBytes,accum)
case OP_PUSHDATA2 =>
//next 2 bytes is the size of the script constant
val scriptConstantHex = BitcoinSUtil.flipEndianess(tail.slice(0,2))
val bytesToPushOntoStack = ScriptNumberFactory.fromNumber(Integer.parseInt(scriptConstantHex,16))
val scriptConstantBytes = tail.slice(2,(bytesToPushOntoStack.num + 2).toInt)
val scriptConstant = ScriptConstantFactory.fromBytes(scriptConstantBytes)
val restOfBytes = tail.slice((bytesToPushOntoStack.num + 2).toInt,tail.size)
buildParsingHelper(op,bytesToPushOntoStack,scriptConstant,restOfBytes,accum)
case OP_PUSHDATA4 =>
//next 4 bytes is the size of the script constant
val scriptConstantHex = BitcoinSUtil.flipEndianess(tail.slice(0,4))
val bytesToPushOntoStack = ScriptNumberFactory.fromNumber(Integer.parseInt(scriptConstantHex, 16))
val scriptConstantBytes = tail.slice(4,bytesToPushOntoStack.num.toInt + 4)
val scriptConstant = ScriptConstantFactory.fromBytes(scriptConstantBytes)
val restOfBytes = tail.slice(bytesToPushOntoStack.num.toInt + 4,tail.size)
buildParsingHelper(op,bytesToPushOntoStack,scriptConstant,restOfBytes,accum)
case _ : ScriptToken => throw new RuntimeException("parseOpPushData can only parse OP_PUSHDATA operations")
}
}
/**
* Helper function to build the parsing helper for parsing an OP_PUSHDATA operation
* @param op the OP_PUSHDATA operation being added to the accum
* @param bytesToPushOntoStack the number of bytes that are pushed onto the stack by the OP_PUSHDATA operation
* @param scriptConstant the constant that is being pushed onto the stack by the OP_PUSHDATA operation
* @param restOfBytes the remaining bytes that need to be parsed
* @param accum the accumulator filled with script tokens that have already been parsed
* @return
*/
private def buildParsingHelper( op : ScriptOperation, bytesToPushOntoStack : ScriptNumber,
scriptConstant : ScriptConstant, restOfBytes : List[Byte], accum : List[ScriptToken]) : ParsingHelper[Byte] = {
if (bytesToPushOntoStack.num == 0) {
//if we need to push 0 bytes onto the stack we do not add the script constant
ParsingHelper[Byte](restOfBytes,
bytesToPushOntoStack :: op :: accum)
} else ParsingHelper[Byte](restOfBytes,
scriptConstant :: bytesToPushOntoStack :: op :: accum)
}
/**
* Parses an operation if the tail is a List[String]
* If the operation is a bytesToPushOntoStack, it pushes the number of bytes onto the stack
* specified by the bytesToPushOntoStack
* i.e. If the operation was BytesToPushOntoStackImpl(5), it would slice 5 bytes off of the tail and
* places them into a ScriptConstant and add them to the accumulator.
* @param op
* @param accum
* @param tail
* @return
*/
private def parseOperationString(op : ScriptOperation, accum : List[ScriptToken], tail : List[String]) : ParsingHelper[String] = {
op match {
case bytesToPushOntoStack : BytesToPushOntoStack =>
//means that we need to push x amount of bytes on to the stack
val (constant,newTail) = sliceConstant[String](bytesToPushOntoStack,tail)
val scriptConstant = ScriptConstantImpl(constant.mkString)
ParsingHelper(newTail,scriptConstant :: bytesToPushOntoStack :: accum)
case _ =>
//means that we need to push the operation onto the stack
ParsingHelper(tail,op :: accum)
}
}
/**
* Checks if a string can be cast to an int
* @param str
* @return
*/
private def tryParsingLong(str : String) = try {
parseLong(str)
true
} catch {
case _ : Throwable => false
}
private def parseLong(str : String) = {
if (str.substring(0,2) == "0x") {
val strRemoveHex = str.substring(2,str.size)
BitcoinSUtil.hexToLong(strRemoveHex)
} else str.toLong
}
}
object ScriptParser extends ScriptParser
| TomMcCabe/scalacoin | src/main/scala/org/scalacoin/marshallers/script/ScriptParser.scala | Scala | mit | 15,028 |
package core
import io.apibuilder.spec.v0.models.Enum
import java.net.{MalformedURLException, URL}
import scala.util.{Failure, Success, Try}
object Util {
// Select out named parameters in the path. E.g. /:org/:service/foo would return [org, service]
def namedParametersInPath(path: String): Seq[String] = {
path.split("/").flatMap { name =>
if (name.startsWith(":")) {
val idx = if (name.indexOf(".") >= 0) {
name.indexOf(".")
} else {
name.length
}
Some(name.slice(1, idx))
} else {
None
}
}
}
def isValidEnumValue(enum: Enum, value: String): Boolean = {
enum.values.map(_.name).contains(value)
}
def isValidUri(value: String): Boolean = {
val formatted = value.trim.toLowerCase
formatted.startsWith("http://") || formatted.startsWith("https://") || formatted.startsWith("file://")
}
def validateUri(value: String): Seq[String] = {
val formatted = value.trim.toLowerCase
if (!formatted.startsWith("http://") && !formatted.startsWith("https://") && !formatted.startsWith("file://")) {
Seq(s"URI[$value] must start with http://, https://, or file://")
} else if (formatted.endsWith("/")) {
Seq(s"URI[$value] cannot end with a '/'")
} else {
Try(new URL(value)) match {
case Success(url) => Nil
case Failure(e) => e match {
case e: MalformedURLException => Seq(s"URL is not valid: ${e.getMessage}")
}
}
}
}
}
| gheine/apidoc | core/src/main/scala/core/Util.scala | Scala | mit | 1,509 |
package com.twitter.io
import com.twitter.util._
import java.util.concurrent.atomic.AtomicBoolean
/**
* An ActivitySource for ClassLoader resources.
*/
class ClassLoaderActivitySource private[io] (classLoader: ClassLoader, pool: FuturePool)
extends ActivitySource[Buf] {
private[io] def this(classLoader: ClassLoader) = this(classLoader, FuturePool.unboundedPool)
def get(name: String): Activity[Buf] = {
// This Var is updated at most once since ClassLoader
// resources don't change (do they?).
val runOnce = new AtomicBoolean(false)
val p = new Promise[Activity.State[Buf]]
// Defer loading until the first observation
val v = Var.async[Activity.State[Buf]](Activity.Pending) { value =>
if (runOnce.compareAndSet(false, true)) {
pool {
classLoader.getResourceAsStream(name) match {
case null => p.setValue(Activity.Failed(ActivitySource.NotFound))
case stream =>
val reader = InputStreamReader(stream, pool)
BufReader.readAll(reader) respond {
case Return(buf) =>
p.setValue(Activity.Ok(buf))
case Throw(cause) =>
p.setValue(Activity.Failed(cause))
} ensure {
// InputStreamReader ignores the deadline in close
reader.close(Time.Undefined)
}
}
}
}
p.onSuccess(value() = _)
Closable.nop
}
Activity(v)
}
}
| twitter/util | util-core/src/main/scala/com/twitter/io/ClassLoaderActivitySource.scala | Scala | apache-2.0 | 1,497 |
package com.twitter.finagle.serverset2
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.serverset2.client._
import com.twitter.finagle.service.Backoff
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.zookeeper.ZkInstance
import com.twitter.io.Buf
import com.twitter.util._
import org.scalatest.concurrent.Eventually._
import org.scalatest.time._
import org.scalatest.{BeforeAndAfter, FunSuite}
class ZkSessionEndToEndTest extends FunSuite with BeforeAndAfter {
val zkTimeout = 100.milliseconds
val retryStream = new RetryStream(Backoff.const(zkTimeout))
@volatile var inst: ZkInstance = _
def toSpan(d: Duration): Span = Span(d.inNanoseconds, Nanoseconds)
implicit val patienceConfig =
PatienceConfig(timeout = toSpan(1.second), interval = toSpan(zkTimeout))
/* This can be useful if you want to retain ZK logging output for debugging.
val app = new org.apache.log4j.ConsoleAppender
app.setTarget(org.apache.log4j.ConsoleAppender.SYSTEM_ERR)
app.setLayout(new org.apache.log4j.SimpleLayout)
app.activateOptions()
org.apache.log4j.Logger.getRootLogger().addAppender(app)
*/
before {
inst = new ZkInstance
inst.start()
}
after {
inst.stop()
}
// COORD-339
if (!sys.props.contains("SKIP_FLAKY")) test("Session expiration 2") {
implicit val timer = new MockTimer
val connected: (WatchState => Boolean) = {
case WatchState.SessionState(SessionState.SyncConnected) => true
case _ => false
}
val notConnected: (WatchState => Boolean) = w => !connected(w)
val session1 = ZkSession.retrying(
retryStream,
() => ZkSession(retryStream, inst.zookeeperConnectString, statsReceiver = NullStatsReceiver)
)
@volatile var states = Seq.empty[SessionState]
val state = session1 flatMap { session1 =>
session1.state
}
state.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(s) => states = s +: states
case _ =>
}
}))
Await.result(state.changes.filter(connected).toFuture())
val cond = state.changes.filter(notConnected).toFuture()
val session2 = {
val z = Var.sample(session1)
val p = new Array[Byte](z.sessionPasswd.length)
z.sessionPasswd.write(p, 0)
ClientBuilder()
.hosts(inst.zookeeperConnectString)
.sessionTimeout(zkTimeout)
.sessionId(z.sessionId)
.password(Buf.ByteArray.Owned(p))
.reader()
}
Await.result(session2.state.changes.filter(connected).toFuture())
session2.value.close()
Await.result(cond)
Await.result(state.changes.filter(connected).toFuture())
assert(
states == Seq(
SessionState.SyncConnected,
SessionState.Expired,
SessionState.Disconnected,
SessionState.SyncConnected
)
)
}
// COORD-339
if (!sys.props.contains("SKIP_FLAKY")) test("ZkSession.retrying") {
implicit val timer = new MockTimer
val watch = Stopwatch.start()
val varZkSession = ZkSession.retrying(
retryStream,
() => ZkSession(retryStream, inst.zookeeperConnectString, statsReceiver = NullStatsReceiver)
)
val varZkState = varZkSession flatMap { _.state }
@volatile var zkStates = Seq[(SessionState, Duration)]()
varZkState.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(state) =>
zkStates = (state, watch()) +: zkStates
case _ =>
}
}))
@volatile var sessions = Seq[ZkSession]()
varZkSession.changes.register(Witness({ s: ZkSession =>
sessions = s +: sessions
}))
// Wait for the initial connect.
eventually {
assert(
Var.sample(varZkState) ==
WatchState.SessionState(SessionState.SyncConnected)
)
assert(sessions.size == 1)
}
val session1 = Var.sample(varZkSession)
// Hijack the session by reusing its id and password.
val session2 = {
val p = new Array[Byte](session1.sessionPasswd.length)
session1.sessionPasswd.write(p, 0)
ClientBuilder()
.hosts(inst.zookeeperConnectString)
.sessionTimeout(zkTimeout)
.sessionId(session1.sessionId)
.password(Buf.ByteArray.Owned(p))
.reader()
}
val connected = new Promise[Unit]
val closed = new Promise[Unit]
session2.state.changes.register(Witness({ ws: WatchState =>
ws match {
case WatchState.SessionState(SessionState.SyncConnected) =>
connected.setDone(); ()
case WatchState.SessionState(SessionState.Disconnected) =>
closed.setDone(); ()
case _ => ()
}
}))
Await.ready(connected)
Await.ready(session2.value.close())
// This will expire the session.
val session1Expired =
session1.state.changes.filter(_ == WatchState.SessionState(SessionState.Expired)).toFuture()
val zkConnected =
varZkState.changes.filter(_ == WatchState.SessionState(SessionState.SyncConnected)).toFuture()
Await.ready(session1.getData("/sadfads"))
Await.ready(session1Expired)
Await.ready(zkConnected)
eventually {
assert(
(zkStates map { case (s, _) => s }).reverse ==
Seq(
SessionState.SyncConnected,
SessionState.Disconnected,
SessionState.Expired,
SessionState.SyncConnected
)
)
}
assert(sessions.size == 2)
}
}
| luciferous/finagle | finagle-serversets/src/test/scala/com/twitter/finagle/serverset2/ZkSessionEndToEndTest.scala | Scala | apache-2.0 | 5,501 |
/*
* Copyright (c) 2013 Habla Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hablapps.bigbrothapp.house.eviction.nomination
import language.reflectiveCalls
import org.hablapps.{ updatable, react, speech, bigbrothapp }
import updatable._
import react._
import bigbrothapp._
object Nominator {
trait State { self: speech.Program with BigBrothappProgram =>
trait Nominator extends Agent {
type This = Nominator
type Context = Nomination
type PlayerCol[x] = Option[x]
type Player = Housemate
def housemate = player.get
def nomination = context.get
}
implicit val Nominator = builder[Nominator]
trait Nominate extends Join {
type This = Nominate
type Context = Nomination
type Performer = Housemate
type New = Nominator
val reason: String
def nomination = context.head
def eviction = nomination.context.head
def housemate = performer.get
override def empowered(implicit state: State) =
(eviction.substatus == Option(Nominating)) &&
(! reason.isEmpty) &&
(housemate.nominating.size == 0) &&
(nomination.nominee.housemate != housemate)
}
implicit val Nominate = builder[Nominate]
}
trait Rules { self: speech.Program with BigBrothappProgram =>
when {
case New(nominator: $[Nominator], nVal: Nominator) => implicit state => {
val mates = nominator.housemate.house.housemates
val cnt = mates.foldLeft(0) { (c, m) =>
c + m.nominating.size
}
if (mates.size == cnt) {
Sequence(
GetReadyForVotation(nominator.nomination.eviction)//,
//Let(nVal.nomination.eviction.substatus += Polling)
)
}
else
ActionId() // not everyone has posted a nomination yet
}
}
}
trait Actions { self: speech.Program with BigBrothappProgram =>
case class FreeNominees(eviction: $[Eviction]) extends DefinedAction(
implicit state => {
val bound = (eviction.nominees map { n =>
n.nomination.nominators.size
}).sorted.reverse(1)
For(eviction.nominees.filter { n =>
n.nomination.nominators.size < bound
}) {
case n => Abandon(n)
}
})
case class FreeNominators(eviction: $[Eviction]) extends DefinedAction(
For(eviction.nominators) {
case n => implicit state => Abandon(n)
})
case class FreeNominations(eviction: $[Eviction]) extends DefinedAction(
For(eviction.nominations filter { _.member.isEmpty }) {
case n => Finish(n)
})
case class GetReadyForVotation(eviction: $[Eviction]) extends DefinedAction(
Sequence(
FreeNominees(eviction),
FreeNominators(eviction),
FreeNominations(eviction)))
}
}
| hablapps/app-bigbrothapp | src/main/scala/org/hablapps/bigbrothapp/house/eviction/nomination/Nominator.scala | Scala | apache-2.0 | 3,362 |
/**
* @author e.e d3si9n
*/
import scalaxb._
import org.w3.xmldsig._
import org.w3.xmlenc._
import org.xml.saml2.assertion._
import org.xml.saml2.metadata._
import java.net.URI
object SamlUsage {
def main(args: Array[String]) = {
allTests
}
def allTests = {
testAttribute
true
}
// case class AttributeType(AttributeValue: Seq[scalaxb.DataRecord[Option[Any]]] = Nil,
// attributes: Map[String, scalaxb.DataRecord[Any]] = Map()) extends AttributeTypable {
// lazy val Name = attributes("@Name").as[String]
// lazy val NameFormat = attributes.get("@NameFormat") map { _.as[java.net.URI] }
// lazy val FriendlyName = attributes.get("@FriendlyName") map { _.as[String] }
// }
def testAttribute {
val subject = <saml:Attribute
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:x500="urn:oasis:names:tc:SAML:2.0:profiles:attribute:X500"
x500:Encoding="LDAP"
NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
Name="urn:oid:1.3.6.1.4.1.5923.1.1.1.1"
FriendlyName="eduPersonAffiliation">
<AttributeValue xsi:type="xs:string">member</AttributeValue>
<AttributeValue xsi:type="xs:string">staff</AttributeValue>
</saml:Attribute>
val obj = fromXML[AttributeType](subject)
obj match {
case x@AttributeType(Seq(y@DataRecord(_, _, _), z@DataRecord(_, _, _) ), _)
if (x.Name == "urn:oid:1.3.6.1.4.1.5923.1.1.1.1") &&
(x.NameFormat == Some(new URI("urn:oasis:names:tc:SAML:2.0:attrname-format:uri"))) &&
(x.FriendlyName == Some("eduPersonAffiliation")) =>
case _ => sys.error("match failed: " + obj.toString)
}
println(obj.toString)
}
}
| justjoheinz/scalaxb | integration/src/test/resources/SamlUsage.scala | Scala | mit | 1,824 |
package net.liftmodules.ng
package test.snippet
import Angular._
import net.liftweb.actor.LAFuture
import net.liftmodules.ng.test.model.StringInt
import net.liftweb.common.{Box, Empty, Failure, Full}
import net.liftweb.json.JsonAST.{JNull, JValue}
import net.liftweb.util.Schedule
import net.liftweb.util.Helpers._
import net.liftweb.http.S
import net.liftweb.json.DefaultFormats
import AngularExecutionContext._
import scala.concurrent.{Future, Promise => ScalaPromise}
import scala.util.Try
case class EmbeddedFutures(
resolved: LAFuture[Box[String]],
failed: LAFuture[Box[String]],
string: LAFuture[Box[String]],
obj: LAFuture[Box[StringInt]],
arr: List[LAFuture[Box[String]]],
fobj: LAFuture[Box[EmbeddedObj]],
np: JValue = JNull
)
case class EmbeddedObj(
resolved: LAFuture[Box[String]],
failed: LAFuture[Box[String]],
string: LAFuture[Box[String]],
obj: LAFuture[Box[StringInt]]
)
case class EmbeddedScalaFutures(
resolved: Future[String],
failed: Future[String],
string: Future[String],
obj: Future[StringInt]
)
object EmbeddedFuturesSnips {
implicit val formats = DefaultFormats
def services = renderIfNotAlreadyDefined(
angular.module("EmbeddedFutures")
.factory("embeddedFutureServices", jsObjFactory()
.defFutureAny("fetch", {
S.session.map(_.sendCometActorMessage("EmbeddedFutureActor", Empty, "go"))
buildFuture
})
.defAny("sfetch", { Full(buildScalaModel) })
)
)
def buildEmptyModel = {
EmbeddedFutures(emptyFuture, emptyFuture, emptyFuture, emptyFuture, List(emptyFuture, emptyFuture), emptyFuture)
}
def emptyFuture[T]: LAFuture[Box[T]] = {
val future = new LAFuture[Box[T]]
future.satisfy(Empty)
future
}
def buildModel = {
val resolved = new LAFuture[Box[String]]
resolved.satisfy(Full("resolved"))
val failed = new LAFuture[Box[String]]
satisfy(failed, Failure("failed"))
val string = new LAFuture[Box[String]]
satisfy(string, Full("future"))
val obj = new LAFuture[Box[StringInt]]
satisfy(obj, Full(StringInt("string", 42)))
val arr = List(new LAFuture[Box[String]], new LAFuture[Box[String]])
satisfy(arr(0), Full("Roll"))
satisfy(arr(1), Full("Tide!"))
val fobj = new LAFuture[Box[EmbeddedObj]]
val fobjResolved = new LAFuture[Box[String]]
val fobjFailed = new LAFuture[Box[String]]
val fobjString = new LAFuture[Box[String]]
val fobjObj = new LAFuture[Box[StringInt]]
satisfy(fobj, Full(EmbeddedObj(fobjResolved, fobjFailed, fobjString, fobjObj)))
fobjResolved.satisfy(Full("sub resolved"))
satisfy(fobjFailed, Failure("sub fail"))
satisfy(fobjString, Full("sub string"))
satisfy(fobjObj, Full(StringInt("sub obj string", 44)))
EmbeddedFutures(resolved, failed, string, obj, arr, fobj)
}
def buildFuture = {
Future.apply(buildModel)
}
def sched(f: => Unit) = {
def delay = TimeSpan((Math.random() * 3000).toInt)
Schedule(() => { f }, delay)
}
def satisfy[T](future:LAFuture[Box[T]], value:Box[T]) {
sched( future.satisfy(value) )
}
def satisfy[T](p:ScalaPromise[T], value:T) {
sched( p.complete(Try(value)) )
}
def buildScalaModel = {
val resolved = Future("resolved")
val failed = ScalaPromise[String]()
sched( failed.failure(new Exception("failed")) )
val string = ScalaPromise[String]()
satisfy(string, "future")
val obj = ScalaPromise[StringInt]()
satisfy(obj, StringInt("string", 42))
EmbeddedScalaFutures(resolved, failed.future, string.future, obj.future)
}
}
| joescii/lift-ng | test-project/src/main/scala/net/liftmodules/ng/test/snippet/EmbeddedFuturesSnips.scala | Scala | apache-2.0 | 3,654 |
import java.io.{File}
import scalaxb.compiler.Config
import scalaxb.compiler.ConfigEntry._
class BigTest2 extends TestBase {
val inFile = new File("integration/src/test/resources/big.xsd")
// override val module = new scalaxb.compiler.xsd.Driver with Verbose
lazy val generated = module.process(inFile,
Config.default.update(PackageNames(Map(None -> Some("big")))).
update(Outdir(tmp)).
update(ClassPrefix("X")).
update(ParamPrefix("m_")).
update(WrappedComplexTypes(List("barOne"))).
update(ContentsSizeLimit(10)))
"big.scala file must compile so that Foo can be used" in {
(List("val subject = <foo xmlns:o=\\"http://www.example.com/other\\">" +
"<string1></string1><string2></string2><string3></string3><string4></string4><string5></string5>" +
"<string6></string6><string7></string7><string8></string8><string9></string9><string10></string10>" +
"<string11></string11><string12></string12><string13></string13><string14></string14><string15></string15>" +
"<string16></string16><string17></string17><string18></string18><string19></string19><string20></string20>" +
"<string21></string21><string22></string22><string23></string23><string24></string24><string25></string25>" +
"<string26></string26><string27></string27><string28></string28><string29></string29><o:string30></o:string30>" +
"""</foo>""",
"""scalaxb.toXML[big.XFoo](scalaxb.fromXML[big.XFoo](subject), None, Some("foo"), subject.scope).toString"""),
generated) must evaluateTo("<foo xmlns:o=\\"http://www.example.com/other\\">" +
"<string1></string1><string2></string2><string3></string3><string4></string4><string5></string5>" +
"<string6></string6><string7></string7><string8></string8><string9></string9><string10></string10>" +
"<string11></string11><string12></string12><string13></string13><string14></string14><string15></string15>" +
"<string16></string16><string17></string17><string18></string18><string19></string19><string20></string20>" +
"<string21></string21><string22></string22><string23></string23><string24></string24><string25></string25>" +
"<string26></string26><string27></string27><string28></string28><string29></string29><o:string30></o:string30>" +
"</foo>", outdir = "./tmp")
}
"big.scala file must compile so that XBaz can be used" in {
(List("import scalaxb._",
"import big._",
"toXML[big.XBaz](fromXML[big.XBaz](<foo>" +
"<string1>123</string1><string2></string2><string3></string3><string4></string4><string5></string5>" +
"<string6></string6><string7></string7><string8></string8><string9></string9><string10></string10>" +
"<string11></string11><string12></string12><string13></string13><string14></string14><string15></string15>" +
"<string16></string16><string17></string17><string18></string18><string19></string19><string20></string20>" +
"<string21></string21><string22></string22><string23></string23><string24></string24><string25></string25>" +
"<string26></string26><string27></string27><string28></string28><string29></string29><string30></string30>" +
"""</foo>), None, Some("foo"), scala.xml.TopScope).toString"""),
generated) must evaluateTo("<foo>" +
"<string1>123</string1><string2></string2><string3></string3><string4></string4><string5></string5>" +
"<string6></string6><string7></string7><string8></string8><string9></string9><string10></string10>" +
"<string11></string11><string12></string12><string13></string13><string14></string14><string15></string15>" +
"<string16></string16><string17></string17><string18></string18><string19></string19><string20></string20>" +
"<string21></string21><string22></string22><string23></string23><string24></string24><string25></string25>" +
"<string26></string26><string27></string27><string28></string28><string29></string29><string30></string30>" +
"</foo>", outdir = "./tmp")
}
}
| eed3si9n/scalaxb | integration/src/test/scala/BigTest2.scala | Scala | mit | 3,953 |
package com.sageserpent.plutonium
abstract class ImplementingHistory extends AbstractedHistory {
override def property: Int = ???
override def property_=(data: Int): Unit = {
recordDatum(data)
}
}
| sageserpent-open/open-plutonium | src/test/scala/com/sageserpent/plutonium/ImplementingHistory.scala | Scala | mit | 218 |
package model
import scala.slick.driver.H2Driver.simple._
object IssueId extends Table[(String, String, Int)]("ISSUE_ID") with IssueTemplate {
def * = userName ~ repositoryName ~ issueId
def byPrimaryKey(owner: String, repository: String) = byRepository(owner, repository)
}
object IssueOutline extends Table[(String, String, Int, Int)]("ISSUE_OUTLINE_VIEW") with IssueTemplate {
def commentCount = column[Int]("COMMENT_COUNT")
def * = userName ~ repositoryName ~ issueId ~ commentCount
}
object Issues extends Table[Issue]("ISSUE") with IssueTemplate with MilestoneTemplate {
def openedUserName = column[String]("OPENED_USER_NAME")
def assignedUserName = column[String]("ASSIGNED_USER_NAME")
def title = column[String]("TITLE")
def content = column[String]("CONTENT")
def closed = column[Boolean]("CLOSED")
def registeredDate = column[java.util.Date]("REGISTERED_DATE")
def updatedDate = column[java.util.Date]("UPDATED_DATE")
def pullRequest = column[Boolean]("PULL_REQUEST")
def * = userName ~ repositoryName ~ issueId ~ openedUserName ~ milestoneId.? ~ assignedUserName.? ~ title ~ content.? ~ closed ~ registeredDate ~ updatedDate ~ pullRequest <> (Issue, Issue.unapply _)
def byPrimaryKey(owner: String, repository: String, issueId: Int) = byIssue(owner, repository, issueId)
}
case class Issue(
userName: String,
repositoryName: String,
issueId: Int,
openedUserName: String,
milestoneId: Option[Int],
assignedUserName: Option[String],
title: String,
content: Option[String],
closed: Boolean,
registeredDate: java.util.Date,
updatedDate: java.util.Date,
isPullRequest: Boolean) | denen99/gitbucket | src/main/scala/model/Issue.scala | Scala | apache-2.0 | 1,705 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2012-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package example.minijava
/**
* Code generator that prints JVM target trees to Jasmine assembler files.
*/
object CodeGenerator extends org.kiama.output.PrettyPrinter {
import JVMTree._
import org.kiama.util.{Emitter, FileEmitter}
/**
* Generate the Jasmine code for a single classfile.
*/
def generate (isTest : Boolean, classfile : ClassFile, emitter : Emitter) {
// If it's a test use the provided emitter for output, otherwise make
// a file emitter that is based on the the class name and use that.
val codeEmitter =
if (isTest)
emitter
else
new FileEmitter (s"${classfile.name}.j")
// Pretty-print and emit
codeEmitter.emit (pretty (classFileToDoc (classfile)).layout)
// Close up the file if we are using one.
if (!isTest)
codeEmitter.close ()
}
/**
* Generate a file document for a classfile.
*/
def classFileToDoc (classfile : ClassFile) : Doc = {
val header =
".source" <+> classfile.filename <@>
".class public" <+> classfile.name <@>
".super" <+> classfile.superclassname
val defaultConstructor =
line <>
".method <init>()V" <@>
".limit stack 1" <@>
".limit locals 1" <@>
" aload_0" <@>
" invokespecial" <+> classfile.superclassname <> "/<init>()V" <@>
" return" <@>
".end method" <>
line
link (classfile.source,
header <>
hcat (classfile.fields.map (fieldToDoc)) <@>
defaultConstructor <>
hcat (classfile.methods.map (methodToDoc))) <>
line
}
/*
* Generate a declaration for a field.
*/
def fieldToDoc (field : JVMField) : Doc =
line <> link (field, ".field public" <+> field.name <+> value (field.tipe))
/*
* Generate a declaration of a method including its code.
*/
def methodToDoc (method : JVMMethod) : Doc = {
// Calculate the maximum location number used in the method by going
// through the operations checking all loads and stores. The fold
// propagates the maximum so far.
val maxloc =
method.instrs.foldLeft (method.spec.argTypes.length) {
case (maxsofar, JVMInstr (op, _)) =>
op match {
case Iload (loc) => maxsofar.max (loc)
case Istore (loc) => maxsofar.max (loc)
case Aload (loc) => maxsofar.max (loc)
case Astore (loc) => maxsofar.max (loc)
case _ => maxsofar
}
}
/*
* Calculate the maximum stack depth by going through the instructions
* simulating the effect that each operation has on the stack size.
* The fold propagates the maximum so far and the current stack depth.
*/
val (maxstack, _) =
method.instrs.foldLeft ((0, 0)) {
case ((maxsofar, depth), JVMInstr (op, _)) =>
val newdepth = depth + op.stackChange
(maxsofar.max (newdepth), newdepth)
}
line <>
link (method.source,
".method public" <+>
(if (method.isStatic) "static " else empty) <>
value (method.spec) <@>
".limit stack" <+> value (maxstack) <@>
".limit locals" <+> value (maxloc + 1) <>
hcat (method.instrs.map (instrToDoc)) <@>
".end method") <>
line
}
/*
* Generate an instruction. Instructions that are not label declarations
* are output using the name of their class converted to lower case. Each
* argument is output in the order that it appears in the instruction
* instance. Thus, this code does not have to be extended when new
* instruction types are added.
*/
def instrToDoc (instr : JVMInstr) : Doc =
instr.op match {
case Label (label) =>
line <> link (instr.source, label <> colon)
case op =>
nest (
line <>
link (instr.source,
op.productPrefix.toLowerCase <>
hcat (op.productIterator.toVector.map {
case arg => space <> value (arg)
}))
)
}
}
| solomono/kiama | library/src/org/kiama/example/minijava/CodeGenerator.scala | Scala | gpl-3.0 | 5,424 |
package coursier.util
import coursier.{moduleNameString, moduleString, organizationString}
import utest._
object ModuleMatcherTests extends TestSuite {
val tests = Tests {
test {
val matcher = ModuleMatcher(org"io.circe", name"circe-*")
val shouldMatch = Seq(
mod"io.circe:circe-core_2.12",
mod"io.circe:circe-generic_2.12",
mod"io.circe:circe-foo_2.12"
)
val shouldNotMatch = Seq(
mod"io.circe:circo-core_2.12",
mod"io.circe:foo-circe-core_2.12",
mod"io.circo:circe-core_2.12",
mod"ioocirce:circe-foo_2.12"
)
for (m <- shouldMatch)
assert(matcher.matches(m))
for (m <- shouldNotMatch)
assert(!matcher.matches(m))
}
test {
val matcher = ModuleMatcher(org"org.*", name"scala-library")
val shouldMatch = Seq(
mod"org.scala-lang:scala-library",
mod"org.typelevel:scala-library"
)
val shouldNotMatch = Seq(
mod"org.scala-lang:scala-compiler",
mod"org.typelevel:scala-reflect"
)
for (m <- shouldMatch)
assert(matcher.matches(m))
for (m <- shouldNotMatch)
assert(!matcher.matches(m))
}
test {
val matcher = ModuleMatcher(org"io.foo", name"foo-*_2.12")
val shouldMatch = Seq(
mod"io.foo:foo-core_2.12",
mod"io.foo:foo-data_2.12"
)
val shouldNotMatch = Seq(
mod"io.foo:foo-core_2.11",
mod"io.foo:foo-core",
mod"io.fooo:foo-core_2.12",
mod"io.foo:foo-data_2o12"
)
for (m <- shouldMatch)
assert(matcher.matches(m))
for (m <- shouldNotMatch)
assert(!matcher.matches(m))
}
test("all") {
val matcher = ModuleMatcher(org"*", name"*")
val shouldMatch = Seq(
mod"io.foo:foo-core_2.12",
mod"io.foo:foo-data_2.12",
mod"io.foo:foo-core_2.11",
mod"io.foo:foo-core",
mod"io.fooo:foo-core_2.12",
mod":",
mod"io.fooo:",
mod":foo-core_2.12"
)
for (m <- shouldMatch)
assert(matcher.matches(m))
}
}
}
| alexarchambault/coursier | modules/coursier/shared/src/test/scala/coursier/util/ModuleMatcherTests.scala | Scala | apache-2.0 | 2,133 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
// generated by genprod on Thu Apr 29 17:52:16 CEST 2010 (with extra methods)
package scala
/** <p>
* Function with 12 parameters.
* </p>
*
*/
trait Function12[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, +R] extends AnyRef { self =>
def apply(v1:T1,v2:T2,v3:T3,v4:T4,v5:T5,v6:T6,v7:T7,v8:T8,v9:T9,v10:T10,v11:T11,v12:T12): R
override def toString() = "<function12>"
/** f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12) == (f.curried)(x1)(x2)(x3)(x4)(x5)(x6)(x7)(x8)(x9)(x10)(x11)(x12)
*/
def curried: T1 => T2 => T3 => T4 => T5 => T6 => T7 => T8 => T9 => T10 => T11 => T12 => R = {
(x1: T1) => ((x2: T2, x3: T3, x4: T4, x5: T5, x6: T6, x7: T7, x8: T8, x9: T9, x10: T10, x11: T11, x12: T12) => self.apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)).curried
}
@deprecated("Use 'curried' instead")
def curry = curried
/* f(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12) == (f.tupled)(Tuple12(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12))
*/
def tupled: Tuple12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12] => R = {
case Tuple12(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12) => apply(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12)
}
}
| cran/rkafkajars | java/scala/Function12.scala | Scala | apache-2.0 | 1,770 |
package com.datawizards.dmg.examples
import com.datawizards.dmg.{DataModelGenerator, dialects}
import com.datawizards.dmg.examples.TestModel.{ClassWithAllSimpleTypes, Person}
object H2Example extends App {
println(DataModelGenerator.generate[Person](dialects.H2Dialect))
println(DataModelGenerator.generate[ClassWithAllSimpleTypes](dialects.H2Dialect))
}
| mateuszboryn/data-model-generator | src/main/scala/com/datawizards/dmg/examples/H2Example.scala | Scala | apache-2.0 | 361 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.robustj.typechecker
import ch.usi.inf.l3.sana
import sana.robustj
import sana.arrayj
import sana.brokenj
import sana.primj
import sana.tiny
import sana.calcj
import tiny.dsl._
import robustj.ast._
import brokenj.typechecker.LabelNameCheckerComponent
/*
Try: DONE
Catch: DONE
*/
@component(tree, labelNames)
trait TryLabelNameCheckerComponent extends LabelNameCheckerComponent {
(tri: TryApi) => {
check((tri.tryClause, labelNames))
tri.catches.foreach(c => check((c, labelNames)))
tri.finallyClause.foreach(f => check((f, labelNames)))
}
}
@component(tree, labelNames)
trait CatchLabelNameCheckerComponent extends LabelNameCheckerComponent {
(ctch: CatchApi) => {
check((ctch.catchClause, labelNames))
}
}
| amanjpro/languages-a-la-carte | robustj/src/main/scala/typechecker/labelcheckers.scala | Scala | bsd-3-clause | 2,337 |
object Runner {
def run(name: String, botFactory: BotFactory): Unit = {
val id = Env.readId()
val grid = Env.readInit()
val bot = botFactory.make(id)
Env.writeInit(name)
while (true) {
val occupants = Env.readFrame(grid.getWidth, grid.getHeight)
grid.update(occupants)
val moves = bot.getMoves(grid)
Env.writeFrame(moves)
}
}
}
| yangle/HaliteIO | airesources/Scala/Runner.scala | Scala | mit | 382 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.internal
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.apache.spark.util.Utils
/**
* Static SQL configuration is a cross-session, immutable Spark configuration. External users can
* see the static sql configs via `SparkSession.conf`, but can NOT set/unset them.
*/
object StaticSQLConf {
import SQLConf.buildStaticConf
val WAREHOUSE_PATH = buildStaticConf("spark.sql.warehouse.dir")
.doc("The default location for managed databases and tables.")
.version("2.0.0")
.stringConf
.createWithDefault(Utils.resolveURI("spark-warehouse").toString)
val CATALOG_IMPLEMENTATION = buildStaticConf("spark.sql.catalogImplementation")
.internal()
.version("2.0.0")
.stringConf
.checkValues(Set("hive", "in-memory"))
.createWithDefault("in-memory")
val GLOBAL_TEMP_DATABASE = buildStaticConf("spark.sql.globalTempDatabase")
.internal()
.version("2.1.0")
.stringConf
// System preserved database should not exists in metastore. However it's hard to guarantee it
// for every session, because case-sensitivity differs. Here we always lowercase it to make our
// life easier.
.transform(_.toLowerCase(Locale.ROOT))
.createWithDefault("global_temp")
// This is used to control when we will split a schema's JSON string to multiple pieces
// in order to fit the JSON string in metastore's table property (by default, the value has
// a length restriction of 4000 characters, so do not use a value larger than 4000 as the default
// value of this property). We will split the JSON string of a schema to its length exceeds the
// threshold. Note that, this conf is only read in HiveExternalCatalog which is cross-session,
// that's why this conf has to be a static SQL conf.
val SCHEMA_STRING_LENGTH_THRESHOLD =
buildStaticConf("spark.sql.sources.schemaStringLengthThreshold")
.internal()
.doc("The maximum length allowed in a single cell when " +
"storing additional schema information in Hive's metastore.")
.version("1.3.1")
.intConf
.createWithDefault(4000)
val FILESOURCE_TABLE_RELATION_CACHE_SIZE =
buildStaticConf("spark.sql.filesourceTableRelationCacheSize")
.internal()
.doc("The maximum size of the cache that maps qualified table names to table relation plans.")
.version("2.2.0")
.intConf
.checkValue(cacheSize => cacheSize >= 0, "The maximum size of the cache must not be negative")
.createWithDefault(1000)
val CODEGEN_CACHE_MAX_ENTRIES = buildStaticConf("spark.sql.codegen.cache.maxEntries")
.internal()
.doc("When nonzero, enable caching of generated classes for operators and expressions. " +
"All jobs share the cache that can use up to the specified number for generated classes.")
.version("2.4.0")
.intConf
.checkValue(maxEntries => maxEntries >= 0, "The maximum must not be negative")
.createWithDefault(100)
val CODEGEN_COMMENTS = buildStaticConf("spark.sql.codegen.comments")
.internal()
.doc("When true, put comment in the generated code. Since computing huge comments " +
"can be extremely expensive in certain cases, such as deeply-nested expressions which " +
"operate over inputs with wide schemas, default is false.")
.version("2.0.0")
.booleanConf
.createWithDefault(false)
// When enabling the debug, Spark SQL internal table properties are not filtered out; however,
// some related DDL commands (e.g., ANALYZE TABLE and CREATE TABLE LIKE) might not work properly.
val DEBUG_MODE = buildStaticConf("spark.sql.debug")
.internal()
.doc("Only used for internal debugging. Not all functions are supported when it is enabled.")
.version("2.1.0")
.booleanConf
.createWithDefault(false)
val HIVE_THRIFT_SERVER_SINGLESESSION =
buildStaticConf("spark.sql.hive.thriftServer.singleSession")
.doc("When set to true, Hive Thrift server is running in a single session mode. " +
"All the JDBC/ODBC connections share the temporary views, function registries, " +
"SQL configuration and the current database.")
.version("1.6.0")
.booleanConf
.createWithDefault(false)
val SPARK_SESSION_EXTENSIONS = buildStaticConf("spark.sql.extensions")
.doc("A comma-separated list of classes that implement " +
"Function1[SparkSessionExtensions, Unit] used to configure Spark Session extensions. The " +
"classes must have a no-args constructor. If multiple extensions are specified, they are " +
"applied in the specified order. For the case of rules and planner strategies, they are " +
"applied in the specified order. For the case of parsers, the last parser is used and each " +
"parser can delegate to its predecessor. For the case of function name conflicts, the last " +
"registered function name is used.")
.version("2.2.0")
.stringConf
.toSequence
.createOptional
val SPARK_CACHE_SERIALIZER = buildStaticConf("spark.sql.cache.serializer")
.doc("The name of a class that implements " +
"org.apache.spark.sql.columnar.CachedBatchSerializer. It will be used to " +
"translate SQL data into a format that can more efficiently be cached. The underlying " +
"API is subject to change so use with caution. Multiple classes cannot be specified. " +
"The class must have a no-arg constructor.")
.version("3.1.0")
.stringConf
.createWithDefault("org.apache.spark.sql.execution.columnar.DefaultCachedBatchSerializer")
val QUERY_EXECUTION_LISTENERS = buildStaticConf("spark.sql.queryExecutionListeners")
.doc("List of class names implementing QueryExecutionListener that will be automatically " +
"added to newly created sessions. The classes should have either a no-arg constructor, " +
"or a constructor that expects a SparkConf argument.")
.version("2.3.0")
.stringConf
.toSequence
.createOptional
val STREAMING_QUERY_LISTENERS = buildStaticConf("spark.sql.streaming.streamingQueryListeners")
.doc("List of class names implementing StreamingQueryListener that will be automatically " +
"added to newly created sessions. The classes should have either a no-arg constructor, " +
"or a constructor that expects a SparkConf argument.")
.version("2.4.0")
.stringConf
.toSequence
.createOptional
val UI_RETAINED_EXECUTIONS =
buildStaticConf("spark.sql.ui.retainedExecutions")
.doc("Number of executions to retain in the Spark UI.")
.version("1.5.0")
.intConf
.createWithDefault(1000)
val BROADCAST_EXCHANGE_MAX_THREAD_THRESHOLD =
buildStaticConf("spark.sql.broadcastExchange.maxThreadThreshold")
.internal()
.doc("The maximum degree of parallelism to fetch and broadcast the table. " +
"If we encounter memory issue like frequently full GC or OOM when broadcast table " +
"we can decrease this number in order to reduce memory usage. " +
"Notice the number should be carefully chosen since decreasing parallelism might " +
"cause longer waiting for other broadcasting. Also, increasing parallelism may " +
"cause memory problem.")
.version("3.0.0")
.intConf
.checkValue(thres => thres > 0 && thres <= 128, "The threshold must be in (0,128].")
.createWithDefault(128)
val SUBQUERY_MAX_THREAD_THRESHOLD =
buildStaticConf("spark.sql.subquery.maxThreadThreshold")
.internal()
.doc("The maximum degree of parallelism to execute the subquery.")
.version("2.4.6")
.intConf
.checkValue(thres => thres > 0 && thres <= 128, "The threshold must be in (0,128].")
.createWithDefault(16)
val SQL_EVENT_TRUNCATE_LENGTH = buildStaticConf("spark.sql.event.truncate.length")
.doc("Threshold of SQL length beyond which it will be truncated before adding to " +
"event. Defaults to no truncation. If set to 0, callsite will be logged instead.")
.version("3.0.0")
.intConf
.checkValue(_ >= 0, "Must be set greater or equal to zero")
.createWithDefault(Int.MaxValue)
val SQL_LEGACY_SESSION_INIT_WITH_DEFAULTS =
buildStaticConf("spark.sql.legacy.sessionInitWithConfigDefaults")
.doc("Flag to revert to legacy behavior where a cloned SparkSession receives SparkConf " +
"defaults, dropping any overrides in its parent SparkSession.")
.version("3.0.0")
.booleanConf
.createWithDefault(false)
val DEFAULT_URL_STREAM_HANDLER_FACTORY_ENABLED =
buildStaticConf("spark.sql.defaultUrlStreamHandlerFactory.enabled")
.internal()
.doc(
"When true, register Hadoop's FsUrlStreamHandlerFactory to support " +
"ADD JAR against HDFS locations. " +
"It should be disabled when a different stream protocol handler should be registered " +
"to support a particular protocol type, or if Hadoop's FsUrlStreamHandlerFactory " +
"conflicts with other protocol types such as `http` or `https`. See also SPARK-25694 " +
"and HADOOP-14598.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
val STREAMING_UI_ENABLED =
buildStaticConf("spark.sql.streaming.ui.enabled")
.doc("Whether to run the Structured Streaming Web UI for the Spark application when the " +
"Spark Web UI is enabled.")
.version("3.0.0")
.booleanConf
.createWithDefault(true)
val STREAMING_UI_RETAINED_PROGRESS_UPDATES =
buildStaticConf("spark.sql.streaming.ui.retainedProgressUpdates")
.doc("The number of progress updates to retain for a streaming query for Structured " +
"Streaming UI.")
.version("3.0.0")
.intConf
.createWithDefault(100)
val STREAMING_UI_RETAINED_QUERIES =
buildStaticConf("spark.sql.streaming.ui.retainedQueries")
.doc("The number of inactive queries to retain for Structured Streaming UI.")
.version("3.0.0")
.intConf
.createWithDefault(100)
val METADATA_CACHE_TTL_SECONDS = buildStaticConf("spark.sql.metadataCacheTTLSeconds")
.doc("Time-to-live (TTL) value for the metadata caches: partition file metadata cache and " +
"session catalog cache. This configuration only has an effect when this value having " +
"a positive value (> 0). It also requires setting " +
s"'${StaticSQLConf.CATALOG_IMPLEMENTATION.key}' to `hive`, setting " +
s"'${SQLConf.HIVE_FILESOURCE_PARTITION_FILE_CACHE_SIZE.key}' > 0 and setting " +
s"'${SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key}' to `true` " +
"to be applied to the partition file metadata cache.")
.version("3.1.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefault(-1)
}
| rednaxelafx/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/internal/StaticSQLConf.scala | Scala | apache-2.0 | 11,583 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Connection, SQLException, Statement}
import java.util.Properties
import com.stratio.crossdata.util.using
import org.apache.spark.Partition
import org.apache.spark.sql.types._
import scala.collection.mutable.ArrayBuffer
object PostgresqlUtils {
val URL: String = "url"
val DRIVER: String = "driver"
val driverClassName: String = "org.postgresql.Driver"
def withClientDo[T](parameters: Map[String, String])(f: (Connection, Statement) => T): T = {
// TODO use using inside withclientDo
// using (buildConnection(parameters)) { connection =>
// using(connection.createStatement()){ statement =>
// f(connection, statement)
// }
// }
//
val connection = buildConnection(parameters)
val statement = connection.createStatement()
try {
f(connection, statement)
} finally {
connection.close()
statement.close()
}
}
def resolveSchema(url: String, table: String, properties: Properties): StructType =
JDBCRDD.resolveTable(url, table, properties)
private def buildConnection(parameters: Map[String, String]): Connection = {
val url: String = parameters.getOrElse(URL, sys.error(s"Option $URL not specified"))
val properties = mapToPropertiesWithDriver(parameters)
JdbcUtils.createConnectionFactory(url, properties)()
}
def getRequiredProperty(propertyName: String, parameters: Map[String, String]): String =
parameters.getOrElse(propertyName, sys.error(s"Option $propertyName not specified"))
def mapToPropertiesWithDriver(parameters: Map[String, String]): Properties = {
val properties = new Properties()
parameters.foreach(kv => properties.setProperty(kv._1, kv._2))
properties.setProperty(DRIVER, driverClassName)
properties
}
def structTypeToStringSchema(schema: StructType): String = {
val sb = new StringBuilder()
schema.fields.foreach{ field =>
val name = field.name
val postgresqlType = getPostgresqlType(field.dataType)
val nullable = if (field.nullable) "" else "NOT NULL"
sb.append(s", $name $postgresqlType $nullable")
}
if (sb.length < 2) "" else sb.substring(2)
}
private def getPostgresqlType(dataType: DataType): String = dataType match {
case StringType => "TEXT"
case BinaryType => "BYTEA"
case BooleanType => "BOOLEAN"
case FloatType => "FLOAT4"
case DoubleType => "FLOAT8"
case ArrayType(elementType, _) => s"${getPostgresqlType(elementType)}[]"
case IntegerType => "INTEGER"
case LongType => "BIGINT"
case ShortType => "SMALLINT"
case TimestampType => "TIMESTAMP"
case DateType => "DATE"
case decimal: DecimalType => s"DECIMAL(${decimal.precision},${decimal.scale})"
case _ => throw new IllegalArgumentException(s"Unsupported type in postgresql: $dataType")
}
} | jjlopezm/crossdata | postgresql/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/PostgresqlUtils.scala | Scala | apache-2.0 | 3,503 |
/*
* Copyright 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.expecty
import org.junit.Assert._
import org.junit.Test
import junit.framework.ComparisonFailure
class ExpectyRenderingSpec {
val expect = new Expecty(printAsts = true)
@Test
def literals() {
outputs("""
"abc".length() == 2
| |
3 false
""") {
expect {
"abc".length() == 2
}
}
}
@Test
def object_apply() {
outputs("""
List() == List(1, 2)
| |
| List(1, 2)
false
""") {
expect {
List() == List(1, 2)
}
}
}
@Test
def object_apply_2() {
outputs("""
List(1, 2) == List()
| |
List(1, 2) false
""") {
expect {
List(1, 2) == List()
}
}
}
@Test
def infix_operators() {
val str = "abc"
outputs("""
str + "def" == "other"
| | |
abc abcdef false
""") {
expect {
str + "def" == "other"
}
}
}
@Test
def null_value() {
val x = null
outputs("""
x == "null"
| |
| false
null
""") {
expect {
x == "null"
}
}
}
@Test
def value_with_type_hint() {
val expect = new Expecty(showTypes = true)
val x = "123"
outputs("""
x == 123
| |
| false (java.lang.Boolean)
123 (java.lang.String)
""") {
expect {
x == 123
}
}
}
@Test
def arithmetic_expressions() {
val one = 1
outputs("""
one + 2 == 4
| | |
1 3 false
""") {
expect {
one + 2 == 4
}
}
}
@Test
def property_read() {
val person = Person()
outputs("""
person.age == 43
| | |
| 42 false
Person(Fred,42)
""") {
expect {
person.age == 43
}
}
}
@Test
def method_call_zero_args() {
val person = Person()
outputs("""
person.doIt() == "pending"
| | |
| done false
Person(Fred,42)
""") {
expect {
person.doIt() == "pending"
}
}
}
@Test
def method_call_one_arg() {
val person = Person()
val word = "hey"
outputs("""
person.sayTwice(word) == "hoho"
| | | |
| heyhey hey false
Person(Fred,42)
""") {
expect {
person.sayTwice(word) == "hoho"
}
}
}
@Test
def method_call_multiple_args() {
val person = Person()
val word1 = "hey"
val word2 = "ho"
outputs("""
person.sayTwo(word1, word2) == "hoho"
| | | | |
| heyho hey ho false
Person(Fred,42)
""") {
expect {
person.sayTwo(word1, word2) == "hoho"
}
}
}
@Test
def method_call_var_args() {
val person = Person()
val word1 = "foo"
val word2 = "bar"
val word3 = "baz"
outputs("""
person.sayAll(word1, word2, word3) == "hoho"
| | | | | |
| | foo bar baz false
| foobarbaz
Person(Fred,42)
""") {
expect {
person.sayAll(word1, word2, word3) == "hoho"
}
}
}
@Test
def nested_property_reads_and_method_calls() {
val person = Person()
outputs("""
person.sayTwo(person.sayTwice(person.name), "bar") == "hoho"
| | | | | | |
| | | FredFred | Fred false
| | Person(Fred,42) Person(Fred,42)
| FredFredbar
Person(Fred,42)
""") {
expect {
person.sayTwo(person.sayTwice(person.name), "bar") == "hoho"
}
}
}
@Test
def constructor_call() {
val brand = "BMW"
val model = "M5"
outputs("""
new Car(brand, model).brand == "Audi"
| | | | |
BMW M5 BMW M5 BMW false
""") {
expect {
new Car(brand, model).brand == "Audi"
}
}
}
@Test
def higher_order_methods() {
outputs("""
a.map(_ * 2) == b
| | | | | |
| | | | | List(2, 4, 7)
| | | | false
| | | <function1>
| | scala.collection.generic.GenTraversableFactory$ReusableCBF@...
| List(2, 4, 6)
List(1, 2, 3)
""") {
val a = List(1, 2, 3)
val b = List(2, 4, 7)
expect {
a.map(_ * 2) == b
}
}
}
@Test
def tuple() {
outputs("""
(1, 2)._1 == 3
| | |
(1,2) 1 false
""") {
expect {
(1, 2)._1 == 3
}
}
}
@Test
def case_class() {
outputs("""
Some(1).map(_ + 1) == Some(3)
| | | | |
Some(1) | | | Some(3)
| | false
| <function1>
Some(2)
""") {
expect {
Some(1).map(_ + 1) == Some(3)
}
}
}
@Test
def class_with_package() {
outputs("""
collection.mutable.Map(1->"a").get(1) == "b"
| || | |
| |(1,a) | false
| | Some(a)
| scala.Predef$ArrowAssoc@...
Map(1 -> a)
""") {
expect {
collection.mutable.Map(1->"a").get(1) == "b"
}
}
}
@Test
def java_static_method() {
outputs("""
java.util.Collections.emptyList() == null
| |
[] false
""") {
expect {
java.util.Collections.emptyList() == null
}
}
}
@Test
def implicit_conversion() {
outputs("""
"fred".slice(1, 2) == "frog"
| | |
fred r false
""") {
expect {
"fred".slice(1, 2) == "frog"
}
}
}
@Test
def option_type() {
outputs(
"""
Some(23) == Some(22)
| | |
Some(23) | Some(22)
false
""") {
expect {
Some(23) == Some(22)
}
}
}
// doesn't compile, fix pending
// @Test
// def varargs_conversion() {
// outputs(
// """
//fun1(List(1) :_*) == List(1)
//| |
//List(1) true
// """)
// {
// def fun1(p: Int*) = p
//
// expect {
// fun1(List(1) :_*) == List(1)
// }
// }
// }
def outputs(rendering: String)(expectation: => Boolean) {
def normalize(s: String) = s.trim().lines.mkString
try {
expectation
fail("Expectation should have failed but didn't")
}
catch {
case e: AssertionError => {
val expected = normalize(rendering)
val actual = normalize(e.getMessage).replaceAll("@[0-9a-f]*", "@\\\\.\\\\.\\\\.")
if (actual != expected) {
throw new ComparisonFailure("Expectation output doesn't match", expected, actual)
}
}
}
}
case class Person(name: String = "Fred", age: Int = 42) {
def doIt() = "done"
def sayTwice(word: String) = word * 2
def sayTwo(word1: String, word2: String) = word1 + word2
def sayAll(words: String*) = words.mkString("")
}
class Car(val brand: String, val model: String) {
override def toString = brand + " " + model
}
}
| pniederw/expecty | src/test/scala/org/expecty/ExpectyRenderingSpec.scala | Scala | apache-2.0 | 7,444 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions.validation
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{SqlParserException, ValidationException}
import org.apache.flink.table.expressions.TimePointUnit
import org.apache.flink.table.planner.codegen.CodeGenException
import org.apache.flink.table.planner.expressions.utils.ScalarTypesTestBase
import org.apache.calcite.avatica.util.TimeUnit
import org.junit.{Ignore, Test}
class ScalarFunctionsValidationTest extends ScalarTypesTestBase {
// ----------------------------------------------------------------------------------------------
// Math functions
// ----------------------------------------------------------------------------------------------
@Ignore
@Test
def testInvalidLog1(): Unit = {
thrown.expect(classOf[ValidationException])
// invalid arithmetic argument
testSqlApi(
"LOG(1, 100)",
"FAIL"
)
}
@Ignore
@Test
def testInvalidLog2(): Unit ={
thrown.expect(classOf[ValidationException])
// invalid arithmetic argument
testSqlApi(
"LOG(-1)",
"FAIL"
)
}
@Test(expected = classOf[ValidationException])
def testInvalidBin1(): Unit = {
testSqlApi("BIN(f12)", "101010") // float type
}
@Test(expected = classOf[ValidationException])
def testInvalidBin2(): Unit = {
testSqlApi("BIN(f15)", "101010") // BigDecimal type
}
@Test(expected = classOf[ValidationException])
def testInvalidBin3(): Unit = {
testSqlApi("BIN(f16)", "101010") // Date type
}
@Test(expected = classOf[ValidationException])
def testInvalidTruncate1(): Unit = {
// All arguments are string type
testSqlApi(
"TRUNCATE('abc', 'def')",
"FAIL")
// The second argument is of type String
testSqlApi(
"TRUNCATE(f12, f0)",
"FAIL")
// The second argument is of type Float
testSqlApi(
"TRUNCATE(f12,f12)",
"FAIL")
// The second argument is of type Double
testSqlApi(
"TRUNCATE(f12, cast(f28 as DOUBLE))",
"FAIL")
// The second argument is of type BigDecimal
testSqlApi(
"TRUNCATE(f12,f15)",
"FAIL")
}
@Test
def testInvalidTruncate2(): Unit = {
thrown.expect(classOf[CodeGenException])
// The one argument is of type String
testSqlApi(
"TRUNCATE('abc')",
"FAIL")
}
// ----------------------------------------------------------------------------------------------
// String functions
// ----------------------------------------------------------------------------------------------
@Test(expected = classOf[ValidationException])
def testInvalidSubstring1(): Unit = {
// Must fail. Parameter of substring must be an Integer not a Double.
testTableApi("test".substring(2.0.toExpr), "FAIL", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testInvalidSubstring2(): Unit = {
// Must fail. Parameter of substring must be an Integer not a String.
testTableApi("test".substring("test".toExpr), "FAIL", "FAIL")
}
// ----------------------------------------------------------------------------------------------
// Temporal functions
// ----------------------------------------------------------------------------------------------
@Test(expected = classOf[SqlParserException])
def testTimestampAddWithWrongTimestampInterval(): Unit ={
testSqlApi("TIMESTAMPADD(XXX, 1, timestamp '2016-02-24'))", "2016-06-16")
}
@Test(expected = classOf[SqlParserException])
def testTimestampAddWithWrongTimestampFormat(): Unit ={
testSqlApi("TIMESTAMPADD(YEAR, 1, timestamp '2016-02-24'))", "2016-06-16")
}
@Test(expected = classOf[ValidationException])
def testTimestampAddWithWrongQuantity(): Unit ={
testSqlApi("TIMESTAMPADD(YEAR, 1.0, timestamp '2016-02-24 12:42:25')", "2016-06-16")
}
// ----------------------------------------------------------------------------------------------
// Sub-query functions
// ----------------------------------------------------------------------------------------------
@Test(expected = classOf[ValidationException])
def testInValidationExceptionMoreThanOneTypes(): Unit = {
testTableApi(
'f2.in('f3, 'f8),
"f2.in(f3, f8)",
"true"
)
testTableApi(
'f2.in('f3, 'f4, 4),
"f2.in(f3, f4, 4)",
"false" // OK if all numeric
)
}
@Test(expected = classOf[ValidationException])
def scalaInValidationExceptionDifferentOperandsTest(): Unit = {
testTableApi(
'f1.in("Hi", "Hello world", "Comment#1"),
"true",
"true"
)
}
@Test(expected = classOf[ValidationException])
def javaInValidationExceptionDifferentOperandsTest(): Unit = {
testTableApi(
true,
"f1.in('Hi','Hello world','Comment#1')",
"true"
)
}
@Test(expected = classOf[ValidationException])
def testTimestampDiffWithWrongTime(): Unit = {
testTableApi(
timestampDiff(TimePointUnit.DAY, "2016-02-24", "2016-02-27"), "FAIL", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testTimestampDiffWithWrongTimeAndUnit(): Unit = {
testTableApi(
timestampDiff(TimePointUnit.MINUTE, "2016-02-24", "2016-02-27"), "FAIL", "FAIL")
}
@Test
def testDOWWithTimeWhichIsUnsupported(): Unit = {
thrown.expect(classOf[ValidationException])
testSqlApi("EXTRACT(DOW FROM TIME '12:42:25')", "0")
}
@Test
def testDOYWithTimeWhichIsUnsupported(): Unit = {
thrown.expect(classOf[ValidationException])
testSqlApi("EXTRACT(DOY FROM TIME '12:42:25')", "0")
}
private def testExtractFromTimeZeroResult(unit: TimeUnit): Unit = {
thrown.expect(classOf[ValidationException])
testSqlApi("EXTRACT(" + unit + " FROM TIME '00:00:00')", "0")
}
@Test
def testMillenniumWithTime(): Unit = {
thrown.expect(classOf[ValidationException])
testExtractFromTimeZeroResult(TimeUnit.MILLENNIUM)
}
@Test
def testCenturyWithTime(): Unit = {
thrown.expect(classOf[ValidationException])
testExtractFromTimeZeroResult(TimeUnit.CENTURY)
}
@Test
def testYearWithTime(): Unit = {
thrown.expect(classOf[ValidationException])
testExtractFromTimeZeroResult(TimeUnit.YEAR)
}
@Test
def testMonthWithTime(): Unit = {
thrown.expect(classOf[ValidationException])
testExtractFromTimeZeroResult(TimeUnit.MONTH)
}
@Test
def testDayWithTime(): Unit = {
thrown.expect(classOf[ValidationException])
testExtractFromTimeZeroResult(TimeUnit.DAY)
}
// ----------------------------------------------------------------------------------------------
// Builtin functions
// ----------------------------------------------------------------------------------------------
@Test
def testInvalidStringToMap(): Unit = {
// test non-exist key access
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Invalid number of arguments to function 'STR_TO_MAP'")
testSqlApi(
"STR_TO_MAP('k1:v1;k2:v2', ';')",
"EXCEPTION"
)
}
@Test
def testInvalidIf(): Unit = {
// test IF(BOOL, STRING, BOOLEAN)
thrown.expect(classOf[ValidationException])
thrown.expectMessage("Cannot apply 'IF' to arguments")
testSqlApi(
"IF(f7 > 5, f0, f1)",
"FAIL")
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/expressions/validation/ScalarFunctionsValidationTest.scala | Scala | apache-2.0 | 8,099 |
package com.lambtors.poker_api.module.poker.behaviour.table
import java.util.UUID
import cats.implicits._
import com.lambtors.poker_api.module.poker.application.table.find.{FindTableCardsQueryHandler, TableCardsFinder}
import com.lambtors.poker_api.module.poker.behaviour.PokerBehaviourSpec
import com.lambtors.poker_api.module.poker.domain.error.{InvalidGameId, PokerGameNotFound}
import com.lambtors.poker_api.module.poker.infrastructure.stub.{
FindTableCardsQueryStub,
GameIdStub,
PokerGameStub,
TableCardsResponseStub
}
final class FindTableCardsSpec extends PokerBehaviourSpec {
private val queryHandler = new FindTableCardsQueryHandler(new TableCardsFinder(pokerGameRepository))
"Find table cards query" should {
"return cards of the table" in {
val query = FindTableCardsQueryStub.random()
val gameId = GameIdStub.create(UUID.fromString(query.gameId))
val pokerGame = PokerGameStub.create(gameId)
val initialState = PokerState.empty.withGame(pokerGame)
val expectedResponse = TableCardsResponseStub.create(pokerGame.tableCards)
val validatedStateT = queryHandler.handle(query)
validatedStateT should beValid
validatedStateT.map(_.runA(initialState) should beRightContaining(expectedResponse))
}
"fail in case a game does not exists with the given id" in {
val query = FindTableCardsQueryStub.random()
val gameId = GameIdStub.create(UUID.fromString(query.gameId))
val validatedStateT = queryHandler.handle(query)
validatedStateT should beValid
validatedStateT.map(_.runA(PokerState.empty) should beLeftContaining[Throwable](PokerGameNotFound(gameId)))
}
"return a validation error on invalid game id" in {
val query = FindTableCardsQueryStub.create(gameId = GameIdStub.invalid())
queryHandler.handle(query) should haveValidationErrors(InvalidGameId(query.gameId))
}
}
}
| lambtors/poker-api | src/test/scala/com/lambtors/poker_api/module/poker/behaviour/table/FindTableCardsSpec.scala | Scala | mit | 1,920 |
package org.apress.prospark
import com.esotericsoftware.kryo.{KryoSerializable,Kryo}
import com.esotericsoftware.kryo.io.{Output, Input}
class ProtonFlux(
var year: Int,
var bin0_57to1_78: Double,
var bin3_40to17_6: Double,
var bin22_0to31_0: Double,
var bin1_894to2_605: Double,
var bin4_200to6_240: Double,
var bin3_256to8_132: Double,
var bin3_276to8_097: Double,
var bin6_343to42_03: Double,
var bin17_88to26_81: Double,
var bin30_29to69_47: Double,
var bin132_8to242_0: Double
) extends KryoSerializable {
def this(year: String, bin0_57to1_78: String, bin3_40to17_6: String,
bin22_0to31_0: String, bin1_894to2_605: String, bin4_200to6_240: String,
bin3_256to8_132: String, bin3_276to8_097: String, bin6_343to42_03: String,
bin17_88to26_81: String, bin30_29to69_47: String, bin132_8to242_0: String) {
this(year.toInt, bin0_57to1_78.toDouble, bin3_40to17_6.toDouble,
bin22_0to31_0.toDouble, bin1_894to2_605.toDouble, bin4_200to6_240.toDouble,
bin3_256to8_132.toDouble, bin3_276to8_097.toDouble, bin6_343to42_03.toDouble,
bin17_88to26_81.toDouble, bin30_29to69_47.toDouble, bin132_8to242_0.toDouble)
}
def isSolarStorm = (bin0_57to1_78 > 1.0 || bin3_40to17_6 > 1.0
|| bin22_0to31_0 > 1.0 || bin1_894to2_605 > 1.0 || bin4_200to6_240 > 1.0
|| bin3_256to8_132 > 1.0 || bin3_276to8_097 > 1.0 || bin6_343to42_03 > 1.0
|| bin17_88to26_81 > 1.0 || bin30_29to69_47 > 1.0 || bin132_8to242_0 > 1.0)
override def write(kryo: Kryo, output: Output) {
output.writeInt(year)
output.writeDouble(bin0_57to1_78)
output.writeDouble(bin3_40to17_6)
output.writeDouble(bin22_0to31_0)
output.writeDouble(bin1_894to2_605)
output.writeDouble(bin4_200to6_240)
output.writeDouble(bin3_256to8_132)
output.writeDouble(bin3_276to8_097)
output.writeDouble(bin6_343to42_03)
output.writeDouble(bin17_88to26_81)
output.writeDouble(bin30_29to69_47)
output.writeDouble(bin132_8to242_0)
}
override def read(kryo: Kryo, input: Input) {
year = input.readInt()
bin0_57to1_78 = input.readDouble()
bin3_40to17_6 = input.readDouble()
bin22_0to31_0 = input.readDouble()
bin1_894to2_605 = input.readDouble()
bin4_200to6_240 = input.readDouble()
bin3_256to8_132 = input.readDouble()
bin3_276to8_097 = input.readDouble()
bin6_343to42_03 = input.readDouble()
bin17_88to26_81 = input.readDouble()
bin30_29to69_47 = input.readDouble()
bin132_8to242_0 = input.readDouble()
}
} | ZubairNabi/prosparkstreaming | Chap4/src/main/scala/org/apress/prospark/L4-3ProtonFlux.scala | Scala | apache-2.0 | 2,561 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{FileNotFoundException, IOException, OutputStream}
import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors, ExecutorService, Future, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.mutable
import scala.xml.Node
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder}
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.fs.permission.FsAction
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.hdfs.protocol.HdfsConstants
import org.apache.hadoop.security.AccessControlException
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.ReplayListenerBus._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*
* == How new and updated attempts are detected ==
*
* - New attempts are detected in [[checkForLogs]]: the log dir is scanned, and any
* entries in the log dir whose modification time is greater than the last scan time
* are considered new or updated. These are replayed to create a new [[FsApplicationAttemptInfo]]
* entry and update or create a matching [[FsApplicationHistoryInfo]] element in the list
* of applications.
* - Updated attempts are also found in [[checkForLogs]] -- if the attempt's log file has grown, the
* [[FsApplicationAttemptInfo]] is replaced by another one with a larger log size.
* - When [[updateProbe()]] is invoked to check if a loaded [[SparkUI]]
* instance is out of date, the log size of the cached instance is checked against the app last
* loaded by [[checkForLogs]].
*
* The use of log size, rather than simply relying on modification times, is needed to
* address the following issues
* - some filesystems do not appear to update the `modtime` value whenever data is flushed to
* an open file output stream. Changes to the history may not be picked up.
* - the granularity of the `modtime` field may be 2+ seconds. Rapid changes to the FS can be
* missed.
*
* Tracking filesize works given the following invariant: the logs get bigger
* as new events are added. If a format was used in which this did not hold, the mechanism would
* break. Simple streaming of JSON-formatted events, as is implemented today, implicitly
* maintains this invariant.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import FsHistoryProvider._
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.cleaner.interval", "1d")
// Number of threads used to replay event logs.
private val NUM_PROCESSING_THREADS = conf.getInt(SPARK_HISTORY_FS_NUM_REPLAY_THREADS,
Math.ceil(Runtime.getRuntime.availableProcessors() / 4f).toInt)
private val logDir = conf.getOption("spark.history.fs.logDirectory")
.getOrElse(DEFAULT_LOG_DIR)
private val HISTORY_UI_ACLS_ENABLE = conf.getBoolean("spark.history.ui.acls.enable", false)
private val HISTORY_UI_ADMIN_ACLS = conf.get("spark.history.ui.admin.acls", "")
private val HISTORY_UI_ADMIN_ACLS_GROUPS = conf.get("spark.history.ui.admin.acls.groups", "")
logInfo(s"History server ui acls " + (if (HISTORY_UI_ACLS_ENABLE) "enabled" else "disabled") +
"; users with admin permissions: " + HISTORY_UI_ADMIN_ACLS.toString +
"; groups with admin permissions" + HISTORY_UI_ADMIN_ACLS_GROUPS.toString)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private val fs = new Path(logDir).getFileSystem(hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
.setNameFormat("spark-history-task-%d").setDaemon(true).build())
// The modification time of the newest log detected during the last scan. Currently only
// used for logging msgs (logs are re-scanned based on file size, rather than modtime)
private val lastScanTime = new java.util.concurrent.atomic.AtomicLong(-1)
// Mapping of application IDs to their metadata, in descending end time order. Apps are inserted
// into the map in order, so the LinkedHashMap maintains the correct ordering.
@volatile private var applications: mutable.LinkedHashMap[String, FsApplicationHistoryInfo]
= new mutable.LinkedHashMap()
val fileToAppInfo = new ConcurrentHashMap[Path, FsApplicationAttemptInfo]()
// List of application logs to be deleted by event log cleaner.
private var attemptsToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
private val pendingReplayTasksCount = new java.util.concurrent.atomic.AtomicInteger(0)
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* Fixed size thread pool to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!conf.contains("spark.testing")) {
ThreadUtils.newDaemonFixedThreadPool(NUM_PROCESSING_THREADS, "log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
// Conf option used for testing the initialization code.
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
// Validate the log directory.
val path = new Path(logDir)
try {
if (!fs.getFileStatus(path).isDirectory) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
} catch {
case f: FileNotFoundException =>
var msg = s"Log directory specified does not exist: $logDir"
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new FileNotFoundException(msg).initCause(f)
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
logDebug(s"Scheduling update thread every $UPDATE_INTERVAL_S seconds")
pool.scheduleWithFixedDelay(getRunner(checkForLogs), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.getBoolean("spark.history.fs.cleaner.enabled", false)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(getRunner(cleanLogs), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
} else {
logDebug("Background update thread disabled for testing")
}
}
override def getListing(): Iterator[FsApplicationHistoryInfo] = applications.values.iterator
override def getApplicationInfo(appId: String): Option[FsApplicationHistoryInfo] = {
applications.get(appId)
}
override def getEventLogsUnderProcess(): Int = pendingReplayTasksCount.get()
override def getLastUpdatedTime(): Long = lastScanTime.get()
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
try {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId).flatMap { attempt =>
val replayBus = new ReplayListenerBus()
val ui = {
val conf = this.conf.clone()
val appSecManager = new SecurityManager(conf)
SparkUI.createHistoryUI(conf, replayBus, appSecManager, appInfo.name,
HistoryServer.getAttemptURI(appId, attempt.attemptId),
attempt.startTime)
// Do not call ui.bind() to avoid creating a new server for each application
}
val fileStatus = fs.getFileStatus(new Path(logDir, attempt.logPath))
val appListener = replay(fileStatus, isApplicationCompleted(fileStatus), replayBus)
if (appListener.appId.isDefined) {
ui.appSparkVersion = appListener.appSparkVersion.getOrElse("")
ui.getSecurityManager.setAcls(HISTORY_UI_ACLS_ENABLE)
// make sure to set admin acls before view acls so they are properly picked up
val adminAcls = HISTORY_UI_ADMIN_ACLS + "," + appListener.adminAcls.getOrElse("")
ui.getSecurityManager.setAdminAcls(adminAcls)
ui.getSecurityManager.setViewAcls(attempt.sparkUser, appListener.viewAcls.getOrElse(""))
val adminAclsGroups = HISTORY_UI_ADMIN_ACLS_GROUPS + "," +
appListener.adminAclsGroups.getOrElse("")
ui.getSecurityManager.setAdminAclsGroups(adminAclsGroups)
ui.getSecurityManager.setViewAclsGroups(appListener.viewAclsGroups.getOrElse(""))
Some(LoadedAppUI(ui, updateProbe(appId, attemptId, attempt.fileSize)))
} else {
None
}
}
}
} catch {
case e: FileNotFoundException => None
}
}
override def getEmptyListingHtml(): Seq[Node] = {
<p>
Did you specify the correct logging directory? Please verify your setting of
<span style="font-style:italic">spark.history.fs.logDirectory</span>
listed above and whether you have the permissions to access it.
<br/>
It is also possible that your application did not run to
completion or did not stop the SparkContext.
</p>
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = getNewLastScanTime()
logDebug(s"Scanning $logDir with lastScanTime==$lastScanTime")
val statusList = Option(fs.listStatus(new Path(logDir))).map(_.toSeq)
.getOrElse(Seq[FileStatus]())
// scan for modified applications, replay and merge them
val logInfos: Seq[FileStatus] = statusList
.filter { entry =>
val fileInfo = fileToAppInfo.get(entry.getPath())
val prevFileSize = if (fileInfo != null) fileInfo.fileSize else 0L
!entry.isDirectory() &&
// FsHistoryProvider generates a hidden file which can't be read. Accidentally
// reading a garbage file is safe, but we would log an error which can be scary to
// the end-user.
!entry.getPath().getName().startsWith(".") &&
prevFileSize < entry.getLen() &&
SparkHadoopUtil.get.checkAccessPermission(entry, FsAction.READ)
}
.flatMap { entry => Some(entry) }
.sortWith { case (entry1, entry2) =>
entry1.getModificationTime() >= entry2.getModificationTime()
}
if (logInfos.nonEmpty) {
logDebug(s"New/updated attempts found: ${logInfos.size} ${logInfos.map(_.getPath)}")
}
var tasks = mutable.ListBuffer[Future[_]]()
try {
for (file <- logInfos) {
tasks += replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(file)
})
}
} catch {
// let the iteration over logInfos break, since an exception on
// replayExecutor.submit (..) indicates the ExecutorService is unable
// to take any more submissions at this time
case e: Exception =>
logError(s"Exception while submitting event log for replay", e)
}
pendingReplayTasksCount.addAndGet(tasks.size)
tasks.foreach { task =>
try {
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: Exception =>
logError("Exception while merging application listings", e)
} finally {
pendingReplayTasksCount.decrementAndGet()
}
}
lastScanTime.set(newLastScanTime)
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private def getNewLastScanTime(): Long = {
val fileName = "." + UUID.randomUUID().toString
val path = new Path(logDir, fileName)
val fos = fs.create(path)
try {
fos.close()
fs.getFileStatus(path).getModificationTime
} catch {
case e: Exception =>
logError("Exception encountered when attempting to update last scan time", e)
lastScanTime.get()
} finally {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = file.getFileSystem(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
applications.get(appId) match {
case Some(appInfo) =>
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
appInfo.attempts.filter { attempt =>
attempt.attemptId.isEmpty || attemptId.isEmpty || attempt.attemptId.get == attemptId.get
}.foreach { attempt =>
val logPath = new Path(logDir, attempt.logPath)
zipFileToStream(logPath, attempt.logPath, zipStream)
}
} finally {
zipStream.close()
}
case None => throw new SparkException(s"Logs for $appId not found.")
}
}
/**
* Replay the log files in the list and merge the list of old applications with new ones
*/
protected def mergeApplicationListing(fileStatus: FileStatus): Unit = {
val newAttempts = try {
val eventsFilter: ReplayEventsFilter = { eventString =>
eventString.startsWith(APPL_START_EVENT_PREFIX) ||
eventString.startsWith(APPL_END_EVENT_PREFIX) ||
eventString.startsWith(LOG_START_EVENT_PREFIX)
}
val logPath = fileStatus.getPath()
val appCompleted = isApplicationCompleted(fileStatus)
// Use loading time as lastUpdated since some filesystems don't update modifiedTime
// each time file is updated. However use modifiedTime for completed jobs so lastUpdated
// won't change whenever HistoryServer restarts and reloads the file.
val lastUpdated = if (appCompleted) fileStatus.getModificationTime else clock.getTimeMillis()
val appListener = replay(fileStatus, appCompleted, new ReplayListenerBus(), eventsFilter)
// Without an app ID, new logs will render incorrectly in the listing page, so do not list or
// try to show their UI.
if (appListener.appId.isDefined) {
val attemptInfo = new FsApplicationAttemptInfo(
logPath.getName(),
appListener.appName.getOrElse(NOT_STARTED),
appListener.appId.getOrElse(logPath.getName()),
appListener.appAttemptId,
appListener.startTime.getOrElse(-1L),
appListener.endTime.getOrElse(-1L),
lastUpdated,
appListener.sparkUser.getOrElse(NOT_STARTED),
appCompleted,
fileStatus.getLen(),
appListener.appSparkVersion.getOrElse("")
)
fileToAppInfo.put(logPath, attemptInfo)
logDebug(s"Application log ${attemptInfo.logPath} loaded successfully: $attemptInfo")
Some(attemptInfo)
} else {
logWarning(s"Failed to load application log ${fileStatus.getPath}. " +
"The application may have not started.")
None
}
} catch {
case e: Exception =>
logError(
s"Exception encountered when attempting to load application log ${fileStatus.getPath}",
e)
None
}
if (newAttempts.isEmpty) {
return
}
// Build a map containing all apps that contain new attempts. The app information in this map
// contains both the new app attempt, and those that were already loaded in the existing apps
// map. If an attempt has been updated, it replaces the old attempt in the list.
val newAppMap = new mutable.HashMap[String, FsApplicationHistoryInfo]()
applications.synchronized {
newAttempts.foreach { attempt =>
val appInfo = newAppMap.get(attempt.appId)
.orElse(applications.get(attempt.appId))
.map { app =>
val attempts =
app.attempts.filter(_.attemptId != attempt.attemptId) ++ List(attempt)
new FsApplicationHistoryInfo(attempt.appId, attempt.name,
attempts.sortWith(compareAttemptInfo))
}
.getOrElse(new FsApplicationHistoryInfo(attempt.appId, attempt.name, List(attempt)))
newAppMap(attempt.appId) = appInfo
}
// Merge the new app list with the existing one, maintaining the expected ordering (descending
// end time). Maintaining the order is important to avoid having to sort the list every time
// there is a request for the log list.
val newApps = newAppMap.values.toSeq.sortWith(compareAppInfo)
val mergedApps = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
if (!mergedApps.contains(info.id)) {
mergedApps += (info.id -> info)
}
}
val newIterator = newApps.iterator.buffered
val oldIterator = applications.values.iterator.buffered
while (newIterator.hasNext && oldIterator.hasNext) {
if (newAppMap.contains(oldIterator.head.id)) {
oldIterator.next()
} else if (compareAppInfo(newIterator.head, oldIterator.head)) {
addIfAbsent(newIterator.next())
} else {
addIfAbsent(oldIterator.next())
}
}
newIterator.foreach(addIfAbsent)
oldIterator.foreach(addIfAbsent)
applications = mergedApps
}
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = {
try {
val maxAge = conf.getTimeAsSeconds("spark.history.fs.cleaner.maxAge", "7d") * 1000
val now = clock.getTimeMillis()
val appsToRetain = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def shouldClean(attempt: FsApplicationAttemptInfo): Boolean = {
now - attempt.lastUpdated > maxAge
}
// Scan all logs from the log directory.
// Only completed applications older than the specified max age will be deleted.
applications.values.foreach { app =>
val (toClean, toRetain) = app.attempts.partition(shouldClean)
attemptsToClean ++= toClean
if (toClean.isEmpty) {
appsToRetain += (app.id -> app)
} else if (toRetain.nonEmpty) {
appsToRetain += (app.id ->
new FsApplicationHistoryInfo(app.id, app.name, toRetain.toList))
}
}
applications = appsToRetain
val leftToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
attemptsToClean.foreach { attempt =>
try {
fs.delete(new Path(logDir, attempt.logPath), true)
} catch {
case e: AccessControlException =>
logInfo(s"No permission to delete ${attempt.logPath}, ignoring.")
case t: IOException =>
logError(s"IOException in cleaning ${attempt.logPath}", t)
leftToClean += attempt
}
}
attemptsToClean = leftToClean
} catch {
case t: Exception => logError("Exception in cleaning logs", t)
}
}
/**
* Comparison function that defines the sort order for the application listing.
*
* @return Whether `i1` should precede `i2`.
*/
private def compareAppInfo(
i1: FsApplicationHistoryInfo,
i2: FsApplicationHistoryInfo): Boolean = {
val a1 = i1.attempts.head
val a2 = i2.attempts.head
if (a1.endTime != a2.endTime) a1.endTime >= a2.endTime else a1.startTime >= a2.startTime
}
/**
* Comparison function that defines the sort order for application attempts within the same
* application. Order is: attempts are sorted by descending start time.
* Most recent attempt state matches with current state of the app.
*
* Normally applications should have a single running attempt; but failure to call sc.stop()
* may cause multiple running attempts to show up.
*
* @return Whether `a1` should precede `a2`.
*/
private def compareAttemptInfo(
a1: FsApplicationAttemptInfo,
a2: FsApplicationAttemptInfo): Boolean = {
a1.startTime >= a2.startTime
}
/**
* Replays the events in the specified log file on the supplied `ReplayListenerBus`. Returns
* an `ApplicationEventListener` instance with event data captured from the replay.
* `ReplayEventsFilter` determines what events are replayed and can therefore limit the
* data captured in the returned `ApplicationEventListener` instance.
*/
private def replay(
eventLog: FileStatus,
appCompleted: Boolean,
bus: ReplayListenerBus,
eventsFilter: ReplayEventsFilter = SELECT_ALL_FILTER): ApplicationEventListener = {
val logPath = eventLog.getPath()
logInfo(s"Replaying log path: $logPath")
// Note that the eventLog may have *increased* in size since when we grabbed the filestatus,
// and when we read the file here. That is OK -- it may result in an unnecessary refresh
// when there is no update, but will not result in missing an update. We *must* prevent
// an error the other way -- if we report a size bigger (ie later) than the file that is
// actually read, we may never refresh the app. FileStatus is guaranteed to be static
// after it's created, so we get a file size that is no bigger than what is actually read.
val logInput = EventLoggingListener.openEventLog(logPath, fs)
try {
val appListener = new ApplicationEventListener
bus.addListener(appListener)
bus.replay(logInput, logPath.toString, !appCompleted, eventsFilter)
appListener
} finally {
logInput.close()
}
}
/**
* Return true when the application has completed.
*/
private def isApplicationCompleted(entry: FileStatus): Boolean = {
!entry.getPath().getName().endsWith(EventLoggingListener.IN_PROGRESS)
}
/**
* Checks whether HDFS is in safe mode.
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
/* true to check only for Active NNs status */
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)
}
/**
* String description for diagnostics
* @return a summary of the component state
*/
override def toString: String = {
val header = s"""
| FsHistoryProvider: logdir=$logDir,
| last scan time=$lastScanTime
| Cached application count =${applications.size}}
""".stripMargin
val sb = new StringBuilder(header)
applications.foreach(entry => sb.append(entry._2).append("\\n"))
sb.toString
}
/**
* Look up an application attempt
* @param appId application ID
* @param attemptId Attempt ID, if set
* @return the matching attempt, if found
*/
def lookup(appId: String, attemptId: Option[String]): Option[FsApplicationAttemptInfo] = {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId)
}
}
/**
* Return true iff a newer version of the UI is available. The check is based on whether the
* fileSize for the currently loaded UI is smaller than the file size the last time
* the logs were loaded.
*
* This is a very cheap operation -- the work of loading the new attempt was already done
* by [[checkForLogs]].
* @param appId application to probe
* @param attemptId attempt to probe
* @param prevFileSize the file size of the logs for the currently displayed UI
*/
private def updateProbe(
appId: String,
attemptId: Option[String],
prevFileSize: Long)(): Boolean = {
lookup(appId, attemptId) match {
case None =>
logDebug(s"Application Attempt $appId/$attemptId not found")
false
case Some(latest) =>
prevFileSize < latest.fileSize
}
}
}
private[history] object FsHistoryProvider {
val DEFAULT_LOG_DIR = "file:/tmp/spark-events"
private val NOT_STARTED = "<Not Started>"
private val SPARK_HISTORY_FS_NUM_REPLAY_THREADS = "spark.history.fs.numReplayThreads"
private val APPL_START_EVENT_PREFIX = "{\\"Event\\":\\"SparkListenerApplicationStart\\""
private val APPL_END_EVENT_PREFIX = "{\\"Event\\":\\"SparkListenerApplicationEnd\\""
private val LOG_START_EVENT_PREFIX = "{\\"Event\\":\\"SparkListenerLogStart\\""
}
/**
* Application attempt information.
*
* @param logPath path to the log file, or, for a legacy log, its directory
* @param name application name
* @param appId application ID
* @param attemptId optional attempt ID
* @param startTime start time (from playback)
* @param endTime end time (from playback). -1 if the application is incomplete.
* @param lastUpdated the modification time of the log file when this entry was built by replaying
* the history.
* @param sparkUser user running the application
* @param completed flag to indicate whether or not the application has completed.
* @param fileSize the size of the log file the last time the file was scanned for changes
*/
private class FsApplicationAttemptInfo(
val logPath: String,
val name: String,
val appId: String,
attemptId: Option[String],
startTime: Long,
endTime: Long,
lastUpdated: Long,
sparkUser: String,
completed: Boolean,
val fileSize: Long,
appSparkVersion: String)
extends ApplicationAttemptInfo(
attemptId, startTime, endTime, lastUpdated, sparkUser, completed, appSparkVersion) {
/** extend the superclass string value with the extra attributes of this class */
override def toString: String = {
s"FsApplicationAttemptInfo($name, $appId," +
s" ${super.toString}, source=$logPath, size=$fileSize"
}
}
/**
* Application history information
* @param id application ID
* @param name application name
* @param attempts list of attempts, most recent first.
*/
private class FsApplicationHistoryInfo(
id: String,
override val name: String,
override val attempts: List[FsApplicationAttemptInfo])
extends ApplicationHistoryInfo(id, name, attempts)
| VigneshMohan1/spark-branch-2.3 | core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala | Scala | apache-2.0 | 31,229 |
package net.sansa_stack.query.spark.dof.sparql
import net.sansa_stack.query.spark.dof.bindings.Result
import net.sansa_stack.query.spark.dof.tensor.Tensor
import org.apache.jena.query.{Query, QueryFactory}
import scala.reflect.ClassTag
object QueryExecutionFactory {
private def checkNotNull(obj: Object, msg: String) = {
if (obj == null) {
throw new IllegalArgumentException(msg)
}
}
private def checkArg[R, N: ClassTag, T, A](model: Tensor[R, N, T, A]) = checkNotNull(model, "Tensor is a null pointer")
private def checkArg(queryStr: String) = checkNotNull(queryStr, "Query string is null")
private def checkArg(query: Query) = checkNotNull(query, "Query is null")
def makeQuery(queryStr: String): Query = QueryFactory.create(queryStr)
def create[R, N: ClassTag, T, A](query: Query, tensor: Tensor[R, N, T, A]): Result[A] = {
checkArg(query)
checkArg(tensor)
make(query, tensor)
}
def create[R, N: ClassTag, T, A](queryStr: String, tensor: Tensor[R, N, T, A]): Result[A] = {
checkArg(queryStr)
create(makeQuery(queryStr), tensor)
}
private def make[R, N: ClassTag, T, A](query: Query, model: Tensor[R, N, T, A]) = QueryExecution[R, N, T, A](query, model)
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-spark/src/main/scala/net/sansa_stack/query/spark/dof/sparql/QueryExecutionFactory.scala | Scala | apache-2.0 | 1,226 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.broker.kafka
import akka.util.ByteString
import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedDeserializer
import com.lightbend.lagom.scaladsl.api.deser.MessageSerializer.NegotiatedSerializer
import org.apache.kafka.common.serialization.Deserializer
import org.apache.kafka.common.serialization.Serializer
/**
* Adapts a Lagom NegotiatedDeserializer into a Kafka Deserializer so that messages
* stored in Kafka can be deserialized into the expected application's type.
*/
private[lagom] class ScaladslKafkaDeserializer[T](deserializer: NegotiatedDeserializer[T, ByteString])
extends Deserializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def deserialize(topic: String, data: Array[Byte]): T =
deserializer.deserialize(ByteString(data))
override def close(): Unit = () // nothing to do
}
/**
* Adapts a Lagom NegotiatedSerializer into a Kafka Serializer so that application's
* messages can be serialized into a byte array and published into Kafka.
*/
private[lagom] class ScaladslKafkaSerializer[T](serializer: NegotiatedSerializer[T, ByteString]) extends Serializer[T] {
override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = {
() // ignore
}
override def serialize(topic: String, data: T): Array[Byte] =
serializer.serialize(data).toArray
override def close(): Unit = () // nothing to do
}
| rcavalcanti/lagom | service/scaladsl/kafka/client/src/main/scala/com/lightbend/lagom/internal/scaladsl/broker/kafka/KafkaSerializers.scala | Scala | apache-2.0 | 1,588 |
load.ivy("org.eclipse.mylyn.github" % "org.eclipse.egit.github.core" % "2.1.5")
@
import scala.collection.JavaConversions._
import org.eclipse.egit.github.core._
import org.eclipse.egit.github.core.client.GitHubClient
import org.eclipse.egit.github.core.service.RepositoryService
import org.eclipse.egit.github.core.service.ContentsService
import org.eclipse.egit.github.core.service.MarkdownService
import java.util.Base64
implicit class Apply[A](a: A) {
def |>[B](f: A => B): B = f(a)
}
sealed trait Content { def src: RepositoryContents }
case class File (path: String, src: RepositoryContents) extends Content
case class Dir (path: String, children: Seq[Content], src: RepositoryContents) extends Content
val ghclient = new GitHubClient()
val repoService = new RepositoryService(ghclient)
val contService = new ContentsService(ghclient)
val mdService = new MarkdownService(ghclient)
ghclient.setOAuth2Token("c8a329326458c3cbc4d89b20534060764eba8b5e")
val spout = new Repo("eija-johansson", "spout")
class Repo(userName: String, repoName: String) {
val repo = repoService.getRepository(userName, repoName)
def list(path: String = null): Seq[Content] = {
def convert(c: RepositoryContents) = c.getType match {
case RepositoryContents.TYPE_FILE => File(c.getPath, c)
case RepositoryContents.TYPE_DIR => Dir(c.getPath, list(c.getPath), c)
}
val cont = contService.getContents(repo, path)
cont map convert
}
def files = {
def unwrap(c: Content): Seq[File] = c match {
case f: File => Seq(f)
case d: Dir => d.children flatMap unwrap
}
list() flatMap unwrap
}
def read(file: File): Option[String] = {
def decode(b64: String) =
b64.replaceAll("""\s""","") |> Base64.getDecoder.decode |> (new String(_))
contService.getContents(repo, file.path)
.headOption
.map (_.getContent)
.map (decode(_))
}
def renderMd(file: File): String =
read(file)
.map (renderMd)
.getOrElse ("")
def renderMd(text: String): String =
mdService.getRepositoryHtml(repo, text)
}
| tobias-johansson/gitdoc | gitdoc.scala | Scala | mit | 2,096 |
/*
* (c) Copyright 2014 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.rookboom.schedule.dao
import javax.sql.DataSource
import org.springframework.jdbc.core.namedparam.{SqlParameterSource, MapSqlParameterSource, NamedParameterJdbcTemplate}
import org.springframework.jdbc.core.RowMapper
import java.sql.ResultSet
import scala.collection.JavaConversions._
import com.linkedin.rookboom.schedule.TimeSlot
import com.linkedin.rookboom.util.NullSafe.anyToOption
/**
* An implementation of the ScheduleDao class that uses JDBC to get data from an SQL database.
* @author Dmitriy Yefremov
*/
class InternalScheduleDaoImpl(val dataSource: DataSource) extends InternalScheduleDao {
private val jdbcTemplate = new NamedParameterJdbcTemplate(dataSource)
private val eventRowMapper = new RowMapper[InternalEvent] {
def mapRow(rs: ResultSet, rowNum: Int) = {
// event properties
val eventId = rs.getLong("event_id")
val mailbox = rs.getString("mailbox")
val start = rs.getLong("start")
val end = rs.getLong("end")
val eventExtId = rs.getString("event_ext_id")
// appointment properties
val appId = rs.getLong("app_id")
val appointment = if (rs.wasNull()) {
None
} else {
val appExtId = rs.getString("app_ext_id")
val organizer = rs.getString("organizer")
Some(InternalAppointment(appId, appExtId, organizer))
}
InternalEvent(eventId, mailbox, TimeSlot(start, end), eventExtId, appointment)
}
}
private val appointmentRowMapper = new RowMapper[InternalAppointment] {
def mapRow(rs: ResultSet, rowNum: Int) = {
val appId = rs.getLong("id")
val appExtId = rs.getString("ext_id")
val organizer = rs.getString("organizer")
InternalAppointment(appId, appExtId, organizer)
}
}
override def getEventsById(ids: Set[Long]): Map[Long, InternalEvent] = {
if (ids.isEmpty) {
return Map.empty
}
val sql = "SELECT e.id event_id, mailbox, start, end, e.ext_id event_ext_id, a.id app_id, a.ext_id app_ext_id, organizer " +
"FROM event e LEFT JOIN appointment a ON e.appointment_id = a.id " +
"WHERE e.id IN (:ids)"
val params = Map("ids" -> setAsJavaSet(ids))
val events = jdbcTemplate.query(sql, params, eventRowMapper)
events.map(e => (e.id, e)).toMap
}
override def getEvents(mailboxes: Set[String], time: TimeSlot): Seq[InternalEvent] = {
if (mailboxes.isEmpty) {
return Seq.empty
}
val sql = "SELECT e.id event_id, mailbox, start, end, e.ext_id event_ext_id, a.id app_id, a.ext_id app_ext_id, organizer " +
"FROM event e LEFT JOIN appointment a ON e.appointment_id = a.id " +
"WHERE e.mailbox IN (:mailboxes) AND e.end > :from AND e.start < :to"
val params = Map(
"mailboxes" -> setAsJavaSet(mailboxes),
"from" -> time.begin,
"to" -> time.end
)
jdbcTemplate.query(sql, params, eventRowMapper).toSeq
}
override def addEvents(events: Seq[InternalEvent]) {
if (events.isEmpty) {
return
}
val sql = "INSERT INTO event (mailbox, start, end, ext_id, appointment_id) " +
"VALUES (:mailbox, :start, :end, :ext_id, :appointment_id)"
val batchParams = events.map(event => {
val params = Map(
"mailbox" -> event.mailbox,
"start" -> event.time.begin,
"end" -> event.time.end,
"ext_id" -> event.extId.getOrElse(null),
"appointment_id" -> event.appointment.map(_.id).getOrElse(null)
)
new MapSqlParameterSource(params)
})
jdbcTemplate.batchUpdate(sql, batchParams.toArray[SqlParameterSource])
}
override def deleteEvents(ids: Set[Long]) {
if (ids.isEmpty) {
return
}
val sql = "DELETE FROM event WHERE id IN (:ids)"
val params = Map("ids" -> setAsJavaSet(ids))
jdbcTemplate.update(sql, params)
}
override def updateAppointmentIds(ids: Map[Long, Long]) {
if (ids.isEmpty) {
return
}
val sql = "UPDATE event SET appointment_id = :appointment_id WHERE id = :id"
val batchParams = ids.map {
case (eventId, appId) => {
val params = Map(
"id" -> eventId,
"appointment_id" -> appId
)
new MapSqlParameterSource(params)
}
}
jdbcTemplate.batchUpdate(sql, batchParams.toArray[SqlParameterSource])
}
override def addAppointments(appointments: Seq[InternalAppointment]) {
if (appointments.isEmpty) {
return
}
val sql = "INSERT IGNORE INTO appointment (ext_id, organizer) VALUES (:ext_id, :organizer)"
val batchParams = appointments.map(app => {
val params = Map(
"ext_id" -> app.extId.getOrElse(null),
"organizer" -> app.organizer.getOrElse(null)
)
new MapSqlParameterSource(params)
})
jdbcTemplate.batchUpdate(sql, batchParams.toArray[SqlParameterSource])
}
override def getAppointmentsById(ids: Set[Long]): Map[Long, InternalAppointment] = {
if (ids.isEmpty) {
return Map.empty
}
val sql = "SELECT * FROM appointment WHERE id IN (:ids)"
val params = Map("ids" -> setAsJavaSet(ids))
val appointments = jdbcTemplate.query(sql, params, appointmentRowMapper)
appointments.map(a => (a.id, a)).toMap
}
override def getAppointmentsByExtId(extIds: Set[String]): Map[String, InternalAppointment] = {
if (extIds.isEmpty) {
return Map.empty
}
val sql = "SELECT * FROM appointment WHERE ext_id IN (:ext_ids)"
val params = Map("ext_ids" -> setAsJavaSet(extIds))
val appointments = jdbcTemplate.query(sql, params, appointmentRowMapper)
appointments.map(a => (a.extId.get, a)).toMap
}
}
| linkedin/RookBoom | services/src/main/scala/com/linkedin/rookboom/schedule/dao/InternalScheduleDaoImpl.scala | Scala | apache-2.0 | 6,234 |
package BIDMat
import DMat._
import IMat._
import FMat._
import scala.compat.Platform._
object TestDops {
def main(args: Array[String]): Unit = {
val n = 2000
val a = IMat(n,n)
val b = IMat(n,n)
val t0 = currentTime
val m = 1000
println("starting up")
for (i <- 0 until m) {
val c = a + b
}
val t1 = currentTime - t0
println("time="+t1+" msec, gflops="+(n.doubleValue*n*m/t1/1e6))
}
}
| codeaudit/BIDMat | src/test/scala/BIDMat/TestDops.scala | Scala | bsd-3-clause | 442 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LeafNode, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.types.{ArrayType, DataType, MapType, StructType}
import org.apache.spark.sql.util.SchemaUtils._
/**
* Prunes unnecessary physical columns given a [[PhysicalOperation]] over a data source relation.
* By "physical column", we mean a column as defined in the data source format like Parquet format
* or ORC format. For example, in Spark SQL, a root-level Parquet column corresponds to a SQL
* column, and a nested Parquet column corresponds to a [[StructField]].
*/
object SchemaPruning extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.expressions.SchemaPruning._
override def apply(plan: LogicalPlan): LogicalPlan =
if (conf.nestedSchemaPruningEnabled) {
apply0(plan)
} else {
plan
}
private def apply0(plan: LogicalPlan): LogicalPlan =
plan transformDown {
case op @ PhysicalOperation(projects, filters,
l @ LogicalRelation(hadoopFsRelation: HadoopFsRelation, _, _, _))
if canPruneRelation(hadoopFsRelation) =>
prunePhysicalColumns(l.output, projects, filters, hadoopFsRelation.dataSchema,
prunedDataSchema => {
val prunedHadoopRelation =
hadoopFsRelation.copy(dataSchema = prunedDataSchema)(hadoopFsRelation.sparkSession)
buildPrunedRelation(l, prunedHadoopRelation)
}).getOrElse(op)
}
/**
* This method returns optional logical plan. `None` is returned if no nested field is required or
* all nested fields are required.
*/
private def prunePhysicalColumns(
output: Seq[AttributeReference],
projects: Seq[NamedExpression],
filters: Seq[Expression],
dataSchema: StructType,
leafNodeBuilder: StructType => LeafNode): Option[LogicalPlan] = {
val (normalizedProjects, normalizedFilters) =
normalizeAttributeRefNames(output, projects, filters)
val requestedRootFields = identifyRootFields(normalizedProjects, normalizedFilters)
// If requestedRootFields includes a nested field, continue. Otherwise,
// return op
if (requestedRootFields.exists { root: RootField => !root.derivedFromAtt }) {
val prunedDataSchema = pruneDataSchema(dataSchema, requestedRootFields)
// If the data schema is different from the pruned data schema, continue. Otherwise,
// return op. We effect this comparison by counting the number of "leaf" fields in
// each schemata, assuming the fields in prunedDataSchema are a subset of the fields
// in dataSchema.
if (countLeaves(dataSchema) > countLeaves(prunedDataSchema)) {
val prunedRelation = leafNodeBuilder(prunedDataSchema)
val projectionOverSchema = ProjectionOverSchema(prunedDataSchema)
Some(buildNewProjection(projects, normalizedProjects, normalizedFilters,
prunedRelation, projectionOverSchema))
} else {
None
}
} else {
None
}
}
/**
* Checks to see if the given relation can be pruned. Currently we support Parquet and ORC v1.
*/
private def canPruneRelation(fsRelation: HadoopFsRelation) =
fsRelation.fileFormat.isInstanceOf[ParquetFileFormat] ||
fsRelation.fileFormat.isInstanceOf[OrcFileFormat]
/**
* Normalizes the names of the attribute references in the given projects and filters to reflect
* the names in the given logical relation. This makes it possible to compare attributes and
* fields by name. Returns a tuple with the normalized projects and filters, respectively.
*/
private def normalizeAttributeRefNames(
output: Seq[AttributeReference],
projects: Seq[NamedExpression],
filters: Seq[Expression]): (Seq[NamedExpression], Seq[Expression]) = {
val normalizedAttNameMap = output.map(att => (att.exprId, att.name)).toMap
val normalizedProjects = projects.map(_.transform {
case att: AttributeReference if normalizedAttNameMap.contains(att.exprId) =>
att.withName(normalizedAttNameMap(att.exprId))
}).map { case expr: NamedExpression => expr }
val normalizedFilters = filters.map(_.transform {
case att: AttributeReference if normalizedAttNameMap.contains(att.exprId) =>
att.withName(normalizedAttNameMap(att.exprId))
})
(normalizedProjects, normalizedFilters)
}
/**
* Builds the new output [[Project]] Spark SQL operator that has the `leafNode`.
*/
private def buildNewProjection(
projects: Seq[NamedExpression],
normalizedProjects: Seq[NamedExpression],
filters: Seq[Expression],
leafNode: LeafNode,
projectionOverSchema: ProjectionOverSchema): Project = {
// Construct a new target for our projection by rewriting and
// including the original filters where available
val projectionChild =
if (filters.nonEmpty) {
val projectedFilters = filters.map(_.transformDown {
case projectionOverSchema(expr) => expr
})
val newFilterCondition = projectedFilters.reduce(And)
Filter(newFilterCondition, leafNode)
} else {
leafNode
}
// Construct the new projections of our Project by
// rewriting the original projections
val newProjects = normalizedProjects.map(_.transformDown {
case projectionOverSchema(expr) => expr
}).map { case expr: NamedExpression => expr }
if (log.isDebugEnabled) {
logDebug(s"New projects:\\n${newProjects.map(_.treeString).mkString("\\n")}")
}
Project(restoreOriginalOutputNames(newProjects, projects.map(_.name)), projectionChild)
}
/**
* Builds a pruned logical relation from the output of the output relation and the schema of the
* pruned base relation.
*/
private def buildPrunedRelation(
outputRelation: LogicalRelation,
prunedBaseRelation: HadoopFsRelation) = {
val prunedOutput = getPrunedOutput(outputRelation.output, prunedBaseRelation.schema)
// also add the metadata output if any
// TODO: should be able to prune the metadata schema
val metaOutput = outputRelation.output.collect {
case MetadataAttribute(attr) => attr
}
outputRelation.copy(relation = prunedBaseRelation, output = prunedOutput ++ metaOutput)
}
// Prune the given output to make it consistent with `requiredSchema`.
private def getPrunedOutput(
output: Seq[AttributeReference],
requiredSchema: StructType): Seq[AttributeReference] = {
// We need to replace the expression ids of the pruned relation output attributes
// with the expression ids of the original relation output attributes so that
// references to the original relation's output are not broken
val outputIdMap = output.map(att => (att.name, att.exprId)).toMap
requiredSchema
.toAttributes
.map {
case att if outputIdMap.contains(att.name) =>
att.withExprId(outputIdMap(att.name))
case att => att
}
}
/**
* Counts the "leaf" fields of the given dataType. Informally, this is the
* number of fields of non-complex data type in the tree representation of
* [[DataType]].
*/
private def countLeaves(dataType: DataType): Int = {
dataType match {
case array: ArrayType => countLeaves(array.elementType)
case map: MapType => countLeaves(map.keyType) + countLeaves(map.valueType)
case struct: StructType =>
struct.map(field => countLeaves(field.dataType)).sum
case _ => 1
}
}
}
| holdenk/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SchemaPruning.scala | Scala | apache-2.0 | 8,665 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.mutable
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Expression, NamedExpression, PredicateHelper, SchemaPruning}
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.util.CharVarcharUtils
import org.apache.spark.sql.connector.expressions.FieldReference
import org.apache.spark.sql.connector.expressions.aggregate.Aggregation
import org.apache.spark.sql.connector.expressions.filter.{Filter => V2Filter}
import org.apache.spark.sql.connector.read.{Scan, ScanBuilder, SupportsPushDownAggregates, SupportsPushDownFilters, SupportsPushDownRequiredColumns, SupportsPushDownV2Filters}
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, PushableColumnWithoutNestedColumn}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources
import org.apache.spark.sql.types.StructType
object PushDownUtils extends PredicateHelper {
/**
* Pushes down filters to the data source reader
*
* @return pushed filter and post-scan filters.
*/
def pushFilters(
scanBuilder: ScanBuilder,
filters: Seq[Expression]): (Either[Seq[sources.Filter], Seq[V2Filter]], Seq[Expression]) = {
scanBuilder match {
case r: SupportsPushDownFilters =>
// A map from translated data source leaf node filters to original catalyst filter
// expressions. For a `And`/`Or` predicate, it is possible that the predicate is partially
// pushed down. This map can be used to construct a catalyst filter expression from the
// input filter, or a superset(partial push down filter) of the input filter.
val translatedFilterToExpr = mutable.HashMap.empty[sources.Filter, Expression]
val translatedFilters = mutable.ArrayBuffer.empty[sources.Filter]
// Catalyst filter expression that can't be translated to data source filters.
val untranslatableExprs = mutable.ArrayBuffer.empty[Expression]
for (filterExpr <- filters) {
val translated =
DataSourceStrategy.translateFilterWithMapping(filterExpr, Some(translatedFilterToExpr),
nestedPredicatePushdownEnabled = true)
if (translated.isEmpty) {
untranslatableExprs += filterExpr
} else {
translatedFilters += translated.get
}
}
// Data source filters that need to be evaluated again after scanning. which means
// the data source cannot guarantee the rows returned can pass these filters.
// As a result we must return it so Spark can plan an extra filter operator.
val postScanFilters = r.pushFilters(translatedFilters.toArray).map { filter =>
DataSourceStrategy.rebuildExpressionFromFilter(filter, translatedFilterToExpr)
}
(Left(r.pushedFilters()), (untranslatableExprs ++ postScanFilters).toSeq)
case r: SupportsPushDownV2Filters =>
// A map from translated data source leaf node filters to original catalyst filter
// expressions. For a `And`/`Or` predicate, it is possible that the predicate is partially
// pushed down. This map can be used to construct a catalyst filter expression from the
// input filter, or a superset(partial push down filter) of the input filter.
val translatedFilterToExpr = mutable.HashMap.empty[V2Filter, Expression]
val translatedFilters = mutable.ArrayBuffer.empty[V2Filter]
// Catalyst filter expression that can't be translated to data source filters.
val untranslatableExprs = mutable.ArrayBuffer.empty[Expression]
for (filterExpr <- filters) {
val translated =
DataSourceV2Strategy.translateFilterV2WithMapping(
filterExpr, Some(translatedFilterToExpr), nestedPredicatePushdownEnabled = true)
if (translated.isEmpty) {
untranslatableExprs += filterExpr
} else {
translatedFilters += translated.get
}
}
// Data source filters that need to be evaluated again after scanning. which means
// the data source cannot guarantee the rows returned can pass these filters.
// As a result we must return it so Spark can plan an extra filter operator.
val postScanFilters = r.pushFilters(translatedFilters.toArray).map { filter =>
DataSourceV2Strategy.rebuildExpressionFromFilter(filter, translatedFilterToExpr)
}
(Right(r.pushedFilters), (untranslatableExprs ++ postScanFilters).toSeq)
case f: FileScanBuilder =>
val postScanFilters = f.pushFilters(filters)
(Left(f.pushedFilters), postScanFilters)
case _ => (Left(Nil), filters)
}
}
/**
* Pushes down aggregates to the data source reader
*
* @return pushed aggregation.
*/
def pushAggregates(
scanBuilder: ScanBuilder,
aggregates: Seq[AggregateExpression],
groupBy: Seq[Expression]): Option[Aggregation] = {
def columnAsString(e: Expression): Option[FieldReference] = e match {
case PushableColumnWithoutNestedColumn(name) =>
Some(FieldReference(name).asInstanceOf[FieldReference])
case _ => None
}
scanBuilder match {
case r: SupportsPushDownAggregates if aggregates.nonEmpty =>
val translatedAggregates = aggregates.flatMap(DataSourceStrategy.translateAggregate)
val translatedGroupBys = groupBy.flatMap(columnAsString)
if (translatedAggregates.length != aggregates.length ||
translatedGroupBys.length != groupBy.length) {
return None
}
val agg = new Aggregation(translatedAggregates.toArray, translatedGroupBys.toArray)
Some(agg).filter(r.pushAggregation)
case _ => None
}
}
/**
* Applies column pruning to the data source, w.r.t. the references of the given expressions.
*
* @return the `Scan` instance (since column pruning is the last step of operator pushdown),
* and new output attributes after column pruning.
*/
def pruneColumns(
scanBuilder: ScanBuilder,
relation: DataSourceV2Relation,
projects: Seq[NamedExpression],
filters: Seq[Expression]): (Scan, Seq[AttributeReference]) = {
val exprs = projects ++ filters
val requiredColumns = AttributeSet(exprs.flatMap(_.references))
val neededOutput = relation.output.filter(requiredColumns.contains)
scanBuilder match {
case r: SupportsPushDownRequiredColumns if SQLConf.get.nestedSchemaPruningEnabled =>
val rootFields = SchemaPruning.identifyRootFields(projects, filters)
val prunedSchema = if (rootFields.nonEmpty) {
SchemaPruning.pruneDataSchema(relation.schema, rootFields)
} else {
new StructType()
}
val neededFieldNames = neededOutput.map(_.name).toSet
r.pruneColumns(StructType(prunedSchema.filter(f => neededFieldNames.contains(f.name))))
val scan = r.build()
scan -> toOutputAttrs(scan.readSchema(), relation)
case r: SupportsPushDownRequiredColumns =>
r.pruneColumns(neededOutput.toStructType)
val scan = r.build()
// always project, in case the relation's output has been updated and doesn't match
// the underlying table schema
scan -> toOutputAttrs(scan.readSchema(), relation)
case _ => scanBuilder.build() -> relation.output
}
}
private def toOutputAttrs(
schema: StructType,
relation: DataSourceV2Relation): Seq[AttributeReference] = {
val nameToAttr = relation.output.map(_.name).zip(relation.output).toMap
val cleaned = CharVarcharUtils.replaceCharVarcharWithStringInSchema(schema)
cleaned.toAttributes.map {
// we have to keep the attribute id during transformation
a => a.withExprId(nameToAttr(a.name).exprId)
}
}
}
| taroplus/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/PushDownUtils.scala | Scala | apache-2.0 | 8,757 |
package edu.cmu.lti.oaqa.bagpipes.controller
import edu.cmu.lti.oaqa.bagpipes.space.Explorer
import edu.cmu.lti.oaqa.bagpipes.executor.Executor
import edu.cmu.lti.oaqa.bagpipes.executor.DataCache
import edu.cmu.lti.oaqa.bagpipes.executor.ExecutableComponent
import edu.cmu.lti.oaqa.bagpipes.executor.ExecutorTypes
import edu.cmu.lti.oaqa.bagpipes.executor._
import edu.cmu.lti.oaqa.bagpipes.space._
import edu.cmu.lti.oaqa.bagpipes.configuration.Descriptors.ConfigurationDescriptor
import edu.cmu.lti.oaqa.bagpipes.space.ConfigurationSpace
import edu.cmu.lti.oaqa.bagpipes.configuration.Descriptors.ExecutableConf
import edu.cmu.lti.oaqa.bagpipes.configuration.Descriptors.CollectionReaderDescriptor
import edu.cmu.lti.oaqa.bagpipes.configuration.Descriptors.ComponentDescriptor
import edu.cmu.lti.oaqa.bagpipes.space.test._
object controller extends confTrees {
object SimpleExecutionController extends ExecutionController(DepthExplorer, SimpleExecutor);import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(1001);
val controller = SimpleExecutionController;System.out.println("""controller : edu.cmu.lti.oaqa.bagpipes.controller.controller.SimpleExecutionController.type = """ + $show(controller ));$skip(34);
controller.execute(confTree4)()}
}
| Ander-MZ/CSE | .worksheet/src/edu.cmu.lti.oaqa.bagpipes.controller.controller.scala | Scala | apache-2.0 | 1,314 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.schemaguru
import cli.{ CommandContainer, DdlCommand, SchemaCommand }
import cli.Parser._
object Main extends App {
parser.parse(args, CommandContainer(None)).flatMap(_.command) match {
case Some(command: SchemaCommand) => command.processSchema()
case Some(command: DdlCommand) => command.processDdl()
case _ => parser.showUsageAsError()
}
} | snowplow/schema-guru | src/main/scala/com.snowplowanalytics/schemaguru/Main.scala | Scala | apache-2.0 | 1,125 |
/*
* AudioFileIn.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.fscape.lucre
package graph
import java.net.URI
import de.sciss.audiofile.AudioFile
import de.sciss.fscape.Graph.{ProductReader, RefMapIn}
import de.sciss.fscape.UGen.Adjunct
import de.sciss.fscape.graph.{ConstantD, ConstantL, SampleRate => _SampleRate}
import de.sciss.fscape.lucre.UGenGraphBuilder.Input
import de.sciss.fscape.lucre.graph.impl.FutureConstant
import de.sciss.fscape.stream.{BufD, BufL, StreamIn, StreamOut, Builder => SBuilder}
import de.sciss.fscape.{GE, UGen, UGenGraph, UGenIn, UGenInLike, UGenSource}
import de.sciss.lucre.Artifact
import de.sciss.synth.UGenSource.Vec
import de.sciss.proc.AudioCue
object AudioFileIn extends AudioFileInPlatform with ProductReader[AudioFileIn] {
object NumFrames extends ProductReader[NumFrames] {
override def read(in: RefMapIn, key: String, arity: Int): NumFrames = {
require (arity == 1)
val _key = in.readString()
new NumFrames(_key)
}
}
final case class NumFrames(key: String) extends GE.Lazy {
override def productPrefix = s"AudioFileIn$$NumFrames"
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike = {
val cueTr = AudioFileIn.getCue(key, b)
cueTr match {
case Left (cue) => ConstantL(cue.numFrames)
case Right(uri) => FutureConstant[Long, BufL](Adjunct.FileIn(uri), { ctrl =>
import ctrl.config.executionContext
AudioFile.readSpecAsync(uri).map(_.numFrames)
})
}
}
}
object SampleRate extends ProductReader[SampleRate] {
override def read(in: RefMapIn, key: String, arity: Int): SampleRate = {
require (arity == 1)
val _key = in.readString()
new SampleRate(_key)
}
}
final case class SampleRate(key: String) extends _SampleRate with GE.Lazy {
override def productPrefix = s"AudioFileIn$$SampleRate"
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike = {
val cueTr = AudioFileIn.getCue(key, b)
cueTr match {
case Left (cue) => ConstantD(cue.sampleRate)
case Right(uri) => FutureConstant[Double, BufD](Adjunct.FileIn(uri), { ctrl =>
import ctrl.config.executionContext
AudioFile.readSpecAsync(uri).map(_.sampleRate)
})
}
}
}
private def getCue(key: String, b: UGenGraph.Builder): Either[AudioCue, Artifact.Value] = {
val ub = UGenGraphBuilder.get(b)
val v = ub.requestInput(Input.Attribute(key)).peer.getOrElse(sys.error(s"AudioFileIn missing attribute $key"))
v match {
case a: AudioCue => Left(a)
case f: Artifact.Value => Right(f)
case other => sys.error(s"AudioFileIn - requires AudioCue or Artifact value, found $other")
}
}
object WithCue extends ProductReader[WithCue] {
override def read(in: RefMapIn, key: String, arity: Int): WithCue = {
require (arity == 4)
val _uri = in.readURI()
val _offset = in.readLong()
val _gain = in.readDouble()
val _numChannels = in.readInt()
new WithCue(_uri, _offset, _gain, _numChannels)
}
}
final case class WithCue(uri: URI, offset: Long, gain: Double, numChannels: Int)
extends UGenSource.MultiOut {
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike =
makeUGen(Vector.empty)
protected def makeUGen(args: Vec[UGenIn])(implicit b: UGenGraph.Builder): UGenInLike = {
val adjuncts =
Adjunct.FileIn(uri) :: /*Adjunct.AudioFileSpec(cue.spec) ::*/
Adjunct.Long(offset) :: Adjunct.Double(gain) :: Nil
UGen.MultiOut(this, args, numOutputs = numChannels, adjuncts = adjuncts)
}
private[fscape] def makeStream(args: Vec[StreamIn])(implicit b: SBuilder): Vec[StreamOut] = {
stream.AudioFileIn(uri = uri, offset = offset, gain = gain, numChannels = numChannels)
}
override def productPrefix: String = s"AudioFileIn$$WithCue"
}
override def read(in: RefMapIn, key: String, arity: Int): AudioFileIn = {
require (arity == 1)
val _key = in.readString()
new AudioFileIn(_key)
}
}
final case class AudioFileIn(key: String) extends GE.Lazy {
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike = {
val cueTr = AudioFileIn.getCue(key, b)
cueTr match {
case Left (cue) =>
AudioFileIn.WithCue(cue.artifact, cue.offset, cue.gain, cue.spec.numChannels)
case Right(uri) =>
// XXX TODO -- we do have to find a way to determine the number of channels
// before expanding the UGen
// - could be synchronous on JVM and yet unsupported on JS
// - should have an asynchronous `prepare` stage like AuralProc
// AudioFileIn.WithCue(uri, offset = 0L, gain = 1.0, numChannels = 1)
AudioFileIn.mkCue(uri)
}
}
def numFrames : GE = AudioFileIn.NumFrames (key)
def sampleRate: _SampleRate = AudioFileIn.SampleRate(key)
} | Sciss/FScape-next | lucre/shared/src/main/scala/de/sciss/fscape/lucre/graph/AudioFileIn.scala | Scala | agpl-3.0 | 5,191 |
package scala.test.plugins
import scala.tools.nsc
import nsc.Global
import nsc.Phase
import nsc.plugins.Plugin
import nsc.plugins.PluginComponent
class ThePlugin(val global: Global) extends Plugin {
import global._
val name = "beforeparser"
val description = "Declares one plugin that wants to be before the parser phase"
val components = List[PluginComponent](thePhase)
private object thePhase extends PluginComponent {
val global = ThePlugin.this.global
val runsAfter = List[String]()
override val runsBefore = List[String]("parser")
val phaseName = ThePlugin.this.name
def newPhase(prev: Phase) = new ThePhase(prev)
}
private class ThePhase(prev: Phase) extends Phase(prev) {
def name = ThePlugin.this.name
def run: Unit = {}
}
}
| yusuke2255/dotty | tests/untried/neg/t7494-before-parser/ThePlugin.scala | Scala | bsd-3-clause | 786 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.subscribers
import monix.execution.Callback
import monix.execution.Ack.{Continue, Stop}
import scala.util.control.NonFatal
import monix.execution.{Ack, Scheduler}
import monix.reactive.observers.Subscriber
/** Subscriber implementation for `Observable.foreach` */
private[reactive] final class ForeachSubscriber[A](f: A => Unit, onFinish: Callback[Throwable, Unit], s: Scheduler)
extends Subscriber.Sync[A] {
implicit val scheduler: Scheduler = s
private[this] var isDone = false
def onNext(elem: A): Ack = {
try {
f(elem)
Continue
} catch {
case ex if NonFatal(ex) =>
onError(ex)
Stop
}
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true; onFinish.onError(ex)
}
def onComplete(): Unit =
if (!isDone) {
isDone = true; onFinish.onSuccess(())
}
}
| monix/monix | monix-reactive/shared/src/main/scala/monix/reactive/internal/subscribers/ForeachSubscriber.scala | Scala | apache-2.0 | 1,560 |
/*
* Copyright 1998-2019 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.util.markdown
import java.util
import com.vladsch.flexmark.html.HtmlRenderer
import com.vladsch.flexmark.html.renderer.{NodeRenderer, NodeRenderingHandler}
import com.vladsch.flexmark.parser.Parser
import com.vladsch.flexmark.util.ast.Node
import com.vladsch.flexmark.util.options.{DataHolder, DataKey, MutableDataHolder}
import scala.collection.JavaConverters._
object CutExtension {
val CutCollapsed = new DataKey[Boolean]("CutCollapsed", false)
val CutLink = new DataKey[String]("CutLink", "")
}
class CutExtension extends Parser.ParserExtension with HtmlRenderer.HtmlRendererExtension {
override def parserOptions(mutableDataHolder: MutableDataHolder): Unit = {}
override def extend(builder: Parser.Builder): Unit = {
builder.customBlockParserFactory(new LorCutParser.Factory)
}
override def rendererOptions(mutableDataHolder: MutableDataHolder): Unit = {}
override def extend(builder: HtmlRenderer.Builder, renderType: String): Unit = {
builder.nodeRendererFactory(options ⇒ new CutRenderer(options))
}
}
class CutRenderer(options: DataHolder) extends NodeRenderer {
override def getNodeRenderingHandlers: util.Set[NodeRenderingHandler[_ <: Node]] = Set(
new NodeRenderingHandler[CutNode](classOf[CutNode], (node, ctx, html) => {
val id = ctx.getNodeId(node)
if (options.get(CutExtension.CutCollapsed)) {
html.tag("p")
html.text("( ")
html
.withAttr()
.attr("href", options.get(CutExtension.CutLink) + "#" + id)
.tag("a")
.text("читать дальше...")
.closeTag("a")
html.text(" )")
html.closeTag("p")
} else {
html.withAttr.attr("id", id).tagLineIndent("div", () => {
ctx.renderChildren(node)
})
}
})).asJava.asInstanceOf[java.util.Set[NodeRenderingHandler[_]]]
}
| fat0troll/lorsource | src/main/scala/ru/org/linux/util/markdown/CutExtension.scala | Scala | apache-2.0 | 2,514 |
package net.sansa_stack.owl.spark.rdd
import com.typesafe.scalalogging.Logger
import net.sansa_stack.owl.common.parsing.FunctionalSyntaxParsing
import org.apache.spark.sql.SparkSession
import org.semanticweb.owlapi.io.OWLParserException
object FunctionalSyntaxOWLAxiomsRDDBuilder extends FunctionalSyntaxParsing {
private val logger = Logger(this.getClass)
def build(spark: SparkSession, filePath: String): OWLAxiomsRDD = {
build(spark, FunctionalSyntaxOWLExpressionsRDDBuilder.build(spark, filePath))
}
// FIXME: It has to be ensured that expressionsRDD is in functional syntax
def build(spark: SparkSession, expressionsRDD: OWLExpressionsRDD): OWLAxiomsRDD = {
expressionsRDD.map(expression => {
try makeAxiom(expression)
catch {
case exception: OWLParserException =>
logger.warn("Parser error for line " + expression + ": " + exception.getMessage)
null
}
}).filter(_ != null)
}
}
| SANSA-Stack/SANSA-RDF | sansa-owl/sansa-owl-spark/src/main/scala/net/sansa_stack/owl/spark/rdd/FunctionalSyntaxOWLAxiomsRDDBuilder.scala | Scala | apache-2.0 | 958 |
package org.template.recommendation
import org.apache.predictionio.controller.PPreparator
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
class Preparator
extends PPreparator[TrainingData, PreparedData] {
def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = {
new PreparedData(items = trainingData.items, ratings = trainingData.ratings)
}
}
class PreparedData(
val items: RDD[Item],
val ratings: RDD[Rating]
) extends Serializable | alex9311/PredictionIO | examples/scala-parallel-recommendation/filter-by-category/src/main/scala/Preparator.scala | Scala | apache-2.0 | 530 |
//package nz.wicker.autoencoder.demo
//
//import scala.Array.canBuildFrom
//
//import org.jfree.chart.ChartPanel
//import org.jfree.chart.JFreeChart
//import org.jfree.chart.axis.NumberAxis
//import org.jfree.chart.plot.XYPlot
//import org.jfree.chart.renderer.xy.XYSplineRenderer
//import org.jfree.data.xy.DefaultXYDataset
//import org.jfree.ui.ApplicationFrame
//import nz.wicker.autoencoder.math.matrix.Mat
//import nz.wicker.autoencoder.neuralnet.FullBipartiteConnection
//import nz.wicker.autoencoder.neuralnet.rbm.BernoulliUnitLayer
//import nz.wicker.autoencoder.neuralnet.rbm.DefaultRbmTrainingConfiguration
//import nz.wicker.autoencoder.neuralnet.rbm.Rbm
//import nz.wicker.autoencoder.neuralnet.rbm.RbmStack
//
//object SingleRbmExample {
//
// def randomStepFunction(steps: Int): (Double => Double) = {
// val onOff = (0 to steps).map{ x => if (math.random > 0.5) 1d else 0d }
// (x => onOff((x * steps).floor.toInt))
// }
//
// def interval(dataDim: Int) = (for (i <- 0 until dataDim) yield {
// i.toDouble / dataDim
// }).toArray
//
// def arrToRow(arr: Array[Double]) = {
// val res = new Mat(1, arr.size, 0)
// for (i <- 0 until arr.size) res(0, i) = arr(i)
// res
// }
//
// def rowToArr(row: Mat) = {
// val res = new Array[Double](row.width)
// for (i <- 0 until row.width) {
// res(i) = row(0, i)
// }
// res
// }
//
// def main(args: Array[String]) {
//
// // generate data
// val steps = 10
// val visDim = 1000
// val hidDim = 12
// val numberOfExamples = 32000
// val data = new Mat(numberOfExamples, visDim, 0)
// for (r <- 0 until numberOfExamples) {
// val function = randomStepFunction(steps)
// val rowData = interval(visDim) map function
// for (c <- 0 until visDim) {
// data(r, c) = rowData(c)
// }
// }
//
// // generate a single RBM
// val rbm = new Rbm(
// new BernoulliUnitLayer(visDim),
// new FullBipartiteConnection(visDim, hidDim),
// new BernoulliUnitLayer(hidDim)
// )
//
// // train the single RBM
// rbm.train(data,
// new ConstantConfigurationTrainingStrategy(new DefaultRbmTrainingConfiguration()),
// Nil
// )
//
// // create random examples and see the reconstructions after
// // one confabulation step
//
// for (i <- 0 until 5) {
// val f = randomStepFunction(steps)
// val input = arrToRow(interval(visDim) map f)
// val output = rbm.gibbsSampling(input, 1, true)._1
// val xValues = interval(visDim)
// val pointset = new DefaultXYDataset()
// pointset.addSeries("input", Array(xValues, rowToArr(input)))
// pointset.addSeries("output", Array(xValues, rowToArr(output)))
// val splineRenderer = new XYSplineRenderer()
// val xAxis = new NumberAxis("foo")
// val yAxis = new NumberAxis("bar")
// val plot = new XYPlot(pointset, xAxis, yAxis, splineRenderer)
// val chart = new JFreeChart(plot)
// val frame = new ApplicationFrame("Example...")
// val chartPanel = new ChartPanel(chart)
// frame.setContentPane(chartPanel)
// frame.pack()
// frame.setVisible(true)
// }
// }
//} | joergwicker/autoencoder | src/main/scala/nz/wicker/autoencoder/demo/SingleRbmExample.scala | Scala | gpl-3.0 | 3,203 |
package com.twitter.app
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
class TestApp(f: () => Unit) extends App {
var reason: Option[String] = None
protected override def exitOnError(reason: String) = {
this.reason = Some(reason)
}
def main() = f()
}
object VeryBadApp extends App {
var reason: String = throwRuntime()
protected def throwRuntime(): String = {
throw new RuntimeException("this is a bad app")
}
def main() = {
}
}
@RunWith(classOf[JUnitRunner])
class AppTest extends FunSuite {
test("App: make sure system.exit called on exception from main") {
val test1 = new TestApp(() => throw new RuntimeException("simulate main failing"))
test1.main(Array())
assert(test1.reason == Some("Exception thrown in main on startup"))
}
test("App: propagate underlying exception from fields in app") {
intercept[ExceptionInInitializerError] {
VeryBadApp.main(Array.empty)
}
}
test("App: register on main call, last App wins") {
val test1 = new TestApp(() => ())
val test2 = new TestApp(() => ())
assert(App.registered != Some(test1))
assert(App.registered != Some(test2))
test1.main(Array.empty)
assert(App.registered === Some(test1))
test2.main(Array.empty)
assert(App.registered === Some(test2))
}
test("App: pass in bad args and expect usage") {
val test1 = new TestApp(() => ())
test1.main(Array("-environment=staging", "-environment=staging"))
val theReason: String = test1.reason.getOrElse {
fail("There should have been a usage printed and was not")
}
assert(theReason.contains("""Error parsing flag "environment""""))
}
}
| travisbrown/util | util-app/src/test/scala/com/twitter/app/AppTest.scala | Scala | apache-2.0 | 1,724 |
package avrohugger.filesorter
import spray.json.DefaultJsonProtocol._
import spray.json._
/**
* Code adapted from https://github.com/ch4mpy/sbt-avro/blob/master/src/main/scala/com/c4soft/sbtavro/SbtAvro.scala
* by Jerome Wascongne
*/
object ReferredTypeFinder {
object Keys {
val Fields = "fields"
val Type = "type"
val Items = "items"
val Values = "values"
val Array = "array"
val Map = "map"
val Enum = "enum"
val Record = "record"
val Name = "name"
}
def findReferredTypes(json: JsValue): List[String] = {
def matchComplexType(fields: Map[String,JsValue]): List[String] = {
val typeOfRef = fields(Keys.Type)
typeOfRef match {
case JsString(Keys.Array) => findReferredTypes(fields(Keys.Items))
case JsString(Keys.Enum) => List(fields(Keys.Name).convertTo[String])
case JsString(Keys.Record) => findReferredTypes(fields(Keys.Fields))
case JsString(Keys.Map) => findReferredTypes(fields(Keys.Values))
case nestedDefinition => findReferredTypes(nestedDefinition)
}
}
json match {
case str: JsString => List(str.value)
case union: JsArray => union.elements.toList.flatMap(findReferredTypes(_))
case complex: JsObject => matchComplexType(complex.fields)
case _ => List.empty
}
}
}
| julianpeeters/avrohugger | avrohugger-filesorter/src/main/scala/com/julianpeeters/avrohugger/filesorter/ReferredTypeFinder.scala | Scala | apache-2.0 | 1,334 |
package pspz3
package context
import com.microsoft.z3._
trait BitVecs extends AnyRef with HasContext {
import ctx._
// mkBVNegNoOverflow
// mkBVAddNoOverflow
// mkBVMulNoOverflow
// mkBVSubNoOverflow
// mkBVSDivNoOverflow
// mkBVAddNoUnderflow
// mkBVMulNoUnderflow
// mkBVSubNoUnderflow
// mkBVRedAND
// mkBVRedOR
implicit class SignedZBitsOps[A <: ZBits with Tagged[Signed]](val lhs: A) extends ZBitsOps[A] {
def <(rhs: A): ZBool = mkBVSLT(lhs, rhs)
def <=(rhs: A): ZBool = mkBVSLE(lhs, rhs)
def >(rhs: A): ZBool = mkBVSGT(lhs, rhs)
def >=(rhs: A): ZBool = mkBVSGE(lhs, rhs)
def /(rhs: A): A = mkBVSDiv(lhs, rhs)
def %(rhs: A): A = mkBVSRem(lhs, rhs)
def unary_- : A = mkBVNeg(lhs)
}
implicit class UnsignedZBitsOps[A <: ZBits with Tagged[Unsigned]](val lhs: A) extends ZBitsOps[A] {
def <(rhs: A): ZBool = mkBVULT(lhs, rhs)
def <=(rhs: A): ZBool = mkBVULE(lhs, rhs)
def >(rhs: A): ZBool = mkBVUGT(lhs, rhs)
def >=(rhs: A): ZBool = mkBVUGE(lhs, rhs)
def /(rhs: A): A = mkBVUDiv(lhs, rhs)
def %(rhs: A): A = mkBVURem(lhs, rhs)
// no unary minus on unsigned types
}
trait ZBitsOps[A <: ZBits] {
val lhs: A
protected implicit def mkA(x: ZBits): A = x.as[A]
def <(rhs: A): ZBool
def <=(rhs: A): ZBool
def >(rhs: A): ZBool
def >=(rhs: A): ZBool
def /(rhs: A): A
def %(rhs: A): A
def +(rhs: A): A = mkBVAdd(lhs, rhs)
def -(rhs: A): A = mkBVSub(lhs, rhs)
def *(rhs: A): A = mkBVMul(lhs, rhs)
def &(rhs: A): A = mkBVAND(lhs, rhs)
def |(rhs: A): A = mkBVOR(lhs, rhs)
def nand(rhs: A): A = mkBVNAND(lhs, rhs)
def nor(rhs: A): A = mkBVNOR(lhs, rhs)
def xnor(rhs: A): A = mkBVXNOR(lhs, rhs)
def xor(rhs: A): A = mkBVXOR(lhs, rhs)
def toInt: ZInt = mkBV2Int(lhs, true) // signed = true
def toUnsignedInt: ZInt = mkBV2Int(lhs, false)
def unary_~ : A = mkBVNot(lhs)
def <<(n: A): A = mkBVSHL(lhs, n)
def >>(n: A): A = mkBVLSHR(lhs, n)
def >>>(n: A): A = mkBVASHR(lhs, n)
def rotl(n: A): A = mkBVRotateLeft(lhs, n)
def rotr(n: A): A = mkBVRotateRight(lhs, n)
}
}
| paulp/pspz3 | src/main/scala/context/bitvecs.scala | Scala | mit | 2,205 |
package edu.gemini.sp.vcs.log
import edu.gemini.spModel.core.SPProgramID
import edu.gemini.util.security.principal.GeminiPrincipal
case class VcsEvent(id:Int, op: VcsOp, timestamp:Long, pid:SPProgramID, principals:Set[GeminiPrincipal])
| arturog8m/ocs | bundle/edu.gemini.sp.vcs.log/src/main/scala/edu/gemini/sp/vcs/log/VcsEvent.scala | Scala | bsd-3-clause | 239 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.tensor.Tensor
import org.scalatest.{FlatSpec, Matchers}
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.Table
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class CosineSpec extends FlatSpec with Matchers {
"A CosineSpec with scaleW" should "work correctly" in {
val seed = 100
RNG.setSeed(seed)
val input = Tensor[Double](1).apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](2).apply1(_ => Random.nextDouble())
val layer1 = new Cosine[Double](1, 2)
val layer2 = new Cosine[Double](1, 2)
val (weights, grad) = layer1.getParameters()
val (w, g) = layer2.getParameters()
w.copy(weights)
layer2.setScaleW(2)
val output1 = layer1.forward(input)
val output2 = layer2.forward(input)
val gradInput1 = layer1.backward(input, gradOutput)
val gradInput2 = layer2.backward(input, gradOutput)
output1 should be (output2)
gradInput1 should be (gradInput2)
layer2.gradWeight should be (layer1.gradWeight.mul(2))
}
}
class CosineSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val cosine = Cosine[Float](5, 5).setName("cosine")
val input = Tensor[Float](5).apply1(_ => Random.nextFloat())
runSerializationTest(cosine, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/CosineSpec.scala | Scala | apache-2.0 | 2,068 |
package models.cfs
import java.nio.ByteBuffer
import java.util.UUID
import akka.util._
import com.websudos.phantom.dsl._
import helpers._
import models.cassandra._
import play.api.libs.iteratee._
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* @author zepeng.li@gmail.com
*/
case class Block(
indirect_block_id: UUID,
offset: Long,
data: ByteBuffer
)
trait BlockCanonicalNamed extends CanonicalNamed {
override val basicName = "blocks"
}
sealed class BlockTable
extends NamedCassandraTable[BlockTable, Block]
with BlockCanonicalNamed {
object indirect_block_id
extends TimeUUIDColumn(this)
with PartitionKey[UUID]
object offset
extends LongColumn(this)
with ClusteringOrder[Long] with Ascending
object data
extends BlobColumn(this)
override def fromRow(r: Row): Block = {
Block(indirect_block_id(r), offset(r), data(r))
}
}
object Block extends BlockCanonicalNamed {
type BLK = ByteString
}
class Blocks(
implicit
val basicPlayApi: BasicPlayApi,
val keySpaceDef: KeySpaceDef
) extends BlockTable
with ExtCQL[BlockTable, Block]
with BasicPlayComponents
with CassandraComponents
with BootingProcess
with Logging {
onStart(CQL(create.ifNotExists).future())
import Block._
def read(ind_blk_id: UUID): Enumerator[BLK] = {
select(_.data)
.where(_.indirect_block_id eqs ind_blk_id)
.fetchEnumerator().map(ByteString.fromByteBuffer)
}
def read(ind_blk_id: UUID, offset: Long, blk_sz: Int): Enumerator[BLK] = {
Enumerator.flatten(
select(_.data)
.where(_.indirect_block_id eqs ind_blk_id)
.and(_.offset eqs offset - offset % blk_sz)
.one().map(_.map(ByteString.fromByteBuffer))
.map {
case None => Enumerator.empty[BLK]
case Some(blk) => Enumerator(blk.drop((offset % blk_sz).toInt))
}.map {
_ >>> select(_.data)
.where(_.indirect_block_id eqs ind_blk_id)
.and(_.offset gt offset - offset % blk_sz)
.fetchEnumerator().map(ByteString.fromByteBuffer)
}
)
}
def write(
ind_blk_id: UUID, blk_id: Long, blk: BLK, ttl: Duration = Duration.Inf
): Future[ResultSet] = {
val cql =
insert
.value(_.indirect_block_id, ind_blk_id)
.value(_.offset, blk_id)
.value(_.data, blk.toByteBuffer)
(ttl match {
case t: FiniteDuration => cql.ttl(t)
case _ => cql
}).future()
}
def purge(ind_blk_id: UUID): Future[ResultSet] = {
delete.where(_.indirect_block_id eqs ind_blk_id).future()
}
} | lizepeng/app.io | modules/models/app/models/cfs/Block.scala | Scala | apache-2.0 | 2,613 |
package singleton.ops
import org.scalacheck.Properties
import shapeless.test.illTyped
import singleton.TestUtils._
class DivSpec extends Properties("/") {
type OP[L,R] = /[L,R]
type leftNat = shapeless.Nat._12
type leftChar = W.`'\\u000C'`.T
type leftInt = W.`12`.T
type leftLong = W.`12L`.T
type leftFloat = W.`12.0f`.T
type leftDouble = W.`12.0`.T
type leftString = W.`"Something"`.T
type leftBoolean = True
type rightNat = shapeless.Nat._2
type rightChar = W.`'\\u0002'`.T
type rightInt = W.`2`.T
type rightLong = W.`2L`.T
type rightFloat = W.`2.0f`.T
type rightDouble = W.`2.0`.T
type rightString = W.`"Else"`.T
type rightBoolean = False
type resultInt = W.`6`.T
type resultLong = W.`6L`.T
type resultFloat = W.`6.0f`.T
type resultDouble = W.`6.0`.T
////////////////////////////////////////////////////////////////////////
// Nat op XXX
////////////////////////////////////////////////////////////////////////
property("Nat, Nat arguments") = verifyOp2Args[OP,leftNat,rightNat,resultInt]
property("Nat, Int arguments") = verifyOp2Args[OP,leftNat,rightInt,resultInt]
property("Nat, String arguments") = {illTyped("""implicitly[OP[leftNat,rightString]]"""); true}
property("Nat, Boolean arguments") = {illTyped("""implicitly[OP[leftNat,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Char op XXX
////////////////////////////////////////////////////////////////////////
property("Char, Char arguments") = verifyOp2Args[OP,leftChar,rightChar,resultInt]
property("Char, String arguments") = {illTyped("""implicitly[OP[leftChar,rightString]]"""); true}
property("Char, Boolean arguments") = {illTyped("""implicitly[OP[leftChar,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Int op XXX
////////////////////////////////////////////////////////////////////////
property("Int, Nat arguments") = verifyOp2Args[OP,leftInt,rightNat,resultInt]
property("Int, Int arguments") = verifyOp2Args[OP,leftInt,rightInt,resultInt]
property("Int, String arguments") = {illTyped("""implicitly[OP[leftInt,rightString]]"""); true}
property("Int, Boolean arguments") = {illTyped("""implicitly[OP[leftInt,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Long op XXX
////////////////////////////////////////////////////////////////////////
property("Long, Long arguments") = verifyOp2Args[OP,leftLong,rightLong,resultLong]
property("Long, String arguments") = {illTyped("""implicitly[OP[leftLong,rightString]]"""); true}
property("Long, Boolean arguments") = {illTyped("""implicitly[OP[leftLong,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Float op XXX
////////////////////////////////////////////////////////////////////////
property("Float, Float arguments") = verifyOp2Args[OP,leftFloat,rightFloat,resultFloat]
property("Float, String arguments") = {illTyped("""implicitly[OP[leftFloat,rightString]]"""); true}
property("Float, Boolean arguments") = {illTyped("""implicitly[OP[leftFloat,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Double op XXX
////////////////////////////////////////////////////////////////////////
property("Double, Double arguments") = verifyOp2Args[OP,leftDouble,rightDouble,resultDouble]
property("Double, String arguments") = {illTyped("""implicitly[OP[leftDouble,rightString]]"""); true}
property("Double, Boolean arguments") = {illTyped("""implicitly[OP[leftDouble,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// String op XXX
////////////////////////////////////////////////////////////////////////
property("String, Nat arguments") = {illTyped("""implicitly[OP[leftString,rightNat]]"""); true}
property("String, Char arguments") = {illTyped("""implicitly[OP[leftString,rightChar]]"""); true}
property("String, Int arguments") = {illTyped("""implicitly[OP[leftString,rightInt]]"""); true}
property("String, Long arguments") = {illTyped("""implicitly[OP[leftString,rightLong]]"""); true}
property("String, Float arguments") = {illTyped("""implicitly[OP[leftString,rightFloat]]"""); true}
property("String, Double arguments") = {illTyped("""implicitly[OP[leftString,rightDouble]]"""); true}
property("String, String arguments") = {illTyped("""implicitly[OP[leftString,rightString]]"""); true}
property("String, Boolean arguments") = {illTyped("""implicitly[OP[leftString,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Boolean op XXX
////////////////////////////////////////////////////////////////////////
property("Boolean, Nat arguments") = {illTyped("""implicitly[OP[leftBoolean,rightNat]]"""); true}
property("Boolean, Char arguments") = {illTyped("""implicitly[OP[leftBoolean,rightChar]]"""); true}
property("Boolean, Int arguments") = {illTyped("""implicitly[OP[leftBoolean,rightInt]]"""); true}
property("Boolean, Long arguments") = {illTyped("""implicitly[OP[leftBoolean,rightLong]]"""); true}
property("Boolean, Float arguments") = {illTyped("""implicitly[OP[leftBoolean,rightFloat]]"""); true}
property("Boolean, Double arguments") = {illTyped("""implicitly[OP[leftBoolean,rightDouble]]"""); true}
property("Boolean, String arguments") = {illTyped("""implicitly[OP[leftBoolean,rightString]]"""); true}
property("Boolean, Boolean arguments") = {illTyped("""implicitly[OP[leftBoolean,rightBoolean]]"""); true}
////////////////////////////////////////////////////////////////////////
}
| soronpo/singleton-ops | src/test/scala/singleton/ops/DivSpec.scala | Scala | apache-2.0 | 6,280 |
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
package ducttape.hyperdag.meta
import collection._
import ducttape.hyperdag._
/** See [[ducttape.hyperdag.meta.MetaHyperDag]] for definitions of a MetaHyperDag, etc.
*
* epsilonValue is just a dummy value that will never be given back to the user
* (needed since we can't directly created objects with generic types)
*
* <img src="x.gif" /> */
class MetaHyperDagBuilder[V,M,H,E](epsilonV: V = null, epsilonH: H = null, epsilonE: E = null) {
private val delegate = new HyperDagBuilder[V,H,E]
private val metaEdgesByEpsilon = new mutable.HashMap[PackedVertex[_], MetaEdge[M,H,E]]
private val metaEdgeSinks = new mutable.HashMap[PackedVertex[V], mutable.ArrayBuffer[PackedVertex[V]]]
def addVertex(v: V, comment: Option[String] = None): PackedVertex[V] = delegate.addVertex(v, comment)
/** hyperEdgeInfo is a sequence of component hyperedges, represented by pairs of (H, he_info)
* where he_info is a sequence of the component edges of each hyperedge
* (see [[ducttape.hyperdag.HyperDagBuilder.addEdge]] for more) */
def addMetaEdge(m: M,
hyperEdgeInfo: Seq[(H, Seq[(PackedVertex[V],E)])],
sink: PackedVertex[V],
comment: Option[String]): MetaEdge[M,H,E] = {
// TODO: Don't always alternate between normal and epsilon vertices to save some memory
val meEpsilonV = delegate.addVertex(epsilonV, comment=comment)
val hyperedges: Seq[HyperEdge[H,E]] = hyperEdgeInfo.map { heInfo =>
val (h, edgeInfo) = heInfo
edgeInfo.foreach { case (srcV, e) => assert(srcV != sink, "This meta-edge would create a cycle") }
delegate.addHyperEdge(h, edgeInfo, meEpsilonV)
}
// associate this epsilon vertex with its metaedge
val me = new MetaEdge[M,H,E](meEpsilonV, m, hyperedges)
metaEdgesByEpsilon += meEpsilonV -> me
// delay adding the meta-edges to the delegate builder
// until we know we've accumulated all of them
metaEdgeSinks.getOrElseUpdate(sink, {new mutable.ArrayBuffer[PackedVertex[V]]}) += meEpsilonV
me
}
// create a usable immutable representation of this MetaHyperDag
def build() = {
// add single hyperedge that represents all incoming meta-edges
// for all non-epsilon vertices
val epsilonEdges: Set[HyperEdge[H,E]] = metaEdgeSinks.map { case (sink, epsilonVertices) =>
val epsilonParents = epsilonVertices.map { v => (v, epsilonE) }
delegate.addHyperEdge(epsilonH, epsilonParents, sink)
}.toSet
new MetaHyperDag[V,M,H,E](delegate.build(), metaEdgesByEpsilon, epsilonEdges)
}
}
| jhclark/ducttape | src/main/scala/ducttape/hyperdag/meta/MetaHyperDagBuilder.scala | Scala | mpl-2.0 | 2,783 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.Random
import scala.collection.{mutable, Map}
import scala.collection.mutable.ArrayBuffer
import scala.io.Codec
import scala.language.implicitConversions
import scala.ref.WeakReference
import scala.reflect.{classTag, ClassTag}
import scala.util.hashing
import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus
import org.apache.hadoop.io.{BytesWritable, NullWritable, Text}
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.spark._
import org.apache.spark.Partitioner._
import org.apache.spark.annotation.{DeveloperApi, Experimental, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.RDD_LIMIT_SCALE_UP_FACTOR
import org.apache.spark.partial.BoundedDouble
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.partial.GroupedCountEvaluator
import org.apache.spark.partial.PartialResult
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.util.{BoundedPriorityQueue, Utils}
import org.apache.spark.util.collection.{ExternalAppendOnlyMap, OpenHashMap,
Utils => collectionUtils}
import org.apache.spark.util.random.{BernoulliCellSampler, BernoulliSampler, PoissonSampler,
SamplingUtils}
/**
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
* partitioned collection of elements that can be operated on in parallel. This class contains the
* basic operations available on all RDDs, such as `map`, `filter`, and `persist`. In addition,
* [[org.apache.spark.rdd.PairRDDFunctions]] contains operations available only on RDDs of key-value
* pairs, such as `groupByKey` and `join`;
* [[org.apache.spark.rdd.DoubleRDDFunctions]] contains operations available only on RDDs of
* Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
* can be saved as SequenceFiles.
* All operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)])
* through implicit.
*
* Internally, each RDD is characterized by five main properties:
*
* - A list of partitions
* - A function for computing each split
* - A list of dependencies on other RDDs
* - Optionally, a Partitioner for key-value RDDs (e.g. to say that the RDD is hash-partitioned)
* - Optionally, a list of preferred locations to compute each split on (e.g. block locations for
* an HDFS file)
*
* All of the scheduling and execution in Spark is done based on these methods, allowing each RDD
* to implement its own way of computing itself. Indeed, users can implement custom RDDs (e.g. for
* reading data from a new storage system) by overriding these functions. Please refer to the
* <a href="http://people.csail.mit.edu/matei/papers/2012/nsdi_spark.pdf">Spark paper</a>
* for more details on RDD internals.
*/
abstract class RDD[T: ClassTag](
@transient private var _sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
if (classOf[RDD[_]].isAssignableFrom(elementClassTag.runtimeClass)) {
// This is a warning instead of an exception in order to avoid breaking user programs that
// might have defined nested RDDs without running jobs with them.
logWarning("Spark does not support nested RDDs (see SPARK-5063)")
}
private def sc: SparkContext = {
if (_sc == null) {
throw new SparkException(
"This RDD lacks a SparkContext. It could happen in the following cases: \\n(1) RDD " +
"transformations and actions are NOT invoked by the driver, but inside of other " +
"transformations; for example, rdd1.map(x => rdd2.values.count() * x) is invalid " +
"because the values transformation and count action cannot be performed inside of the " +
"rdd1.map transformation. For more information, see SPARK-5063.\\n(2) When a Spark " +
"Streaming job recovers from checkpoint, this exception will be hit if a reference to " +
"an RDD not defined by the streaming job is used in DStream operations. For more " +
"information, See SPARK-13758.")
}
_sc
}
/** Construct an RDD with just a one-to-one dependency on one parent */
def this(@transient oneParent: RDD[_]) =
this(oneParent.context, List(new OneToOneDependency(oneParent)))
private[spark] def conf = sc.conf
// =======================================================================
// Methods that should be implemented by subclasses of RDD
// =======================================================================
/**
* :: DeveloperApi ::
* Implemented by subclasses to compute a given partition.
*/
@DeveloperApi
def compute(split: Partition, context: TaskContext): Iterator[T]
/**
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*
* The partitions in this array must satisfy the following property:
* `rdd.partitions.zipWithIndex.forall { case (partition, index) => partition.index == index }`
*/
protected def getPartitions: Array[Partition]
/**
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getDependencies: Seq[Dependency[_]] = deps
/**
* Optionally overridden by subclasses to specify placement preferences.
*/
protected def getPreferredLocations(split: Partition): Seq[String] = Nil
/** Optionally overridden by subclasses to specify how they are partitioned. */
@transient val partitioner: Option[Partitioner] = None
// =======================================================================
// Methods and fields available on all RDDs
// =======================================================================
/** The SparkContext that created this RDD. */
def sparkContext: SparkContext = sc
/** A unique ID for this RDD (within its SparkContext). */
val id: Int = sc.newRddId()
/** A friendly name for this RDD */
@transient var name: String = _
/** Assign a name to this RDD */
def setName(_name: String): this.type = {
name = _name
this
}
/**
* Mark this RDD for persisting using the specified level.
*
* @param newLevel the target storage level
* @param allowOverride whether to override any existing level with the new one
*/
private def persist(newLevel: StorageLevel, allowOverride: Boolean): this.type = {
// TODO: Handle changes of StorageLevel
if (storageLevel != StorageLevel.NONE && newLevel != storageLevel && !allowOverride) {
throw new UnsupportedOperationException(
"Cannot change storage level of an RDD after it was already assigned a level")
}
// If this is the first time this RDD is marked for persisting, register it
// with the SparkContext for cleanups and accounting. Do this only once.
if (storageLevel == StorageLevel.NONE) {
sc.cleaner.foreach(_.registerRDDForCleanup(this))
sc.persistRDD(this)
}
storageLevel = newLevel
this
}
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. This can only be used to assign a new storage level if the RDD does not
* have a storage level set yet. Local checkpointing is an exception.
*/
def persist(newLevel: StorageLevel): this.type = {
if (isLocallyCheckpointed) {
// This means the user previously called localCheckpoint(), which should have already
// marked this RDD for persisting. Here we should override the old storage level with
// one that is explicitly requested by the user (after adapting it to use disk).
persist(LocalRDDCheckpointData.transformStorageLevel(newLevel), allowOverride = true)
} else {
persist(newLevel, allowOverride = false)
}
}
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def persist(): this.type = persist(StorageLevel.MEMORY_ONLY)
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def cache(): this.type = persist()
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted (default: false)
* @return This RDD.
*/
def unpersist(blocking: Boolean = false): this.type = {
logInfo(s"Removing RDD $id from persistence list")
sc.unpersistRDD(id, blocking)
storageLevel = StorageLevel.NONE
this
}
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel: StorageLevel = storageLevel
/**
* Lock for all mutable state of this RDD (persistence, partitions, dependencies, etc.). We do
* not use `this` because RDDs are user-visible, so users might have added their own locking on
* RDDs; sharing that could lead to a deadlock.
*
* One thread might hold the lock on many of these, for a chain of RDD dependencies; but
* because DAGs are acyclic, and we only ever hold locks for one path in that DAG, there is no
* chance of deadlock.
*
* Executors may reference the shared fields (though they should never mutate them,
* that only happens on the driver).
*/
private val stateLock = new Serializable {}
// Our dependencies and partitions will be gotten by calling subclass's methods below, and will
// be overwritten when we're checkpointed
@volatile private var dependencies_ : Seq[Dependency[_]] = _
// When we overwrite the dependencies we keep a weak reference to the old dependencies
// for user controlled cleanup.
@volatile @transient private var legacyDependencies: WeakReference[Seq[Dependency[_]]] = _
@volatile @transient private var partitions_ : Array[Partition] = _
/** An Option holding our checkpoint RDD, if we are checkpointed */
private def checkpointRDD: Option[CheckpointRDD[T]] = checkpointData.flatMap(_.checkpointRDD)
/**
* Get the list of dependencies of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def dependencies: Seq[Dependency[_]] = {
checkpointRDD.map(r => List(new OneToOneDependency(r))).getOrElse {
if (dependencies_ == null) {
stateLock.synchronized {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
}
}
dependencies_
}
}
/**
* Get the list of dependencies of this RDD ignoring checkpointing.
*/
final private def internalDependencies: Option[Seq[Dependency[_]]] = {
if (legacyDependencies != null) {
legacyDependencies.get
} else if (dependencies_ != null) {
Some(dependencies_)
} else {
// This case should be infrequent.
stateLock.synchronized {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
Some(dependencies_)
}
}
}
/**
* Get the array of partitions of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def partitions: Array[Partition] = {
checkpointRDD.map(_.partitions).getOrElse {
if (partitions_ == null) {
stateLock.synchronized {
if (partitions_ == null) {
partitions_ = getPartitions
partitions_.zipWithIndex.foreach { case (partition, index) =>
require(partition.index == index,
s"partitions($index).partition == ${partition.index}, but it should equal $index")
}
}
}
}
partitions_
}
}
/**
* Returns the number of partitions of this RDD.
*/
@Since("1.6.0")
final def getNumPartitions: Int = partitions.length
/**
* Get the preferred locations of a partition, taking into account whether the
* RDD is checkpointed.
*/
final def preferredLocations(split: Partition): Seq[String] = {
checkpointRDD.map(_.getPreferredLocations(split)).getOrElse {
getPreferredLocations(split)
}
}
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
if (storageLevel != StorageLevel.NONE) {
getOrCompute(split, context)
} else {
computeOrReadCheckpoint(split, context)
}
}
/**
* Return the ancestors of the given RDD that are related to it only through a sequence of
* narrow dependencies. This traverses the given RDD's dependency tree using DFS, but maintains
* no ordering on the RDDs returned.
*/
private[spark] def getNarrowAncestors: Seq[RDD[_]] = {
val ancestors = new mutable.HashSet[RDD[_]]
def visit(rdd: RDD[_]): Unit = {
val narrowDependencies = rdd.dependencies.filter(_.isInstanceOf[NarrowDependency[_]])
val narrowParents = narrowDependencies.map(_.rdd)
val narrowParentsNotVisited = narrowParents.filterNot(ancestors.contains)
narrowParentsNotVisited.foreach { parent =>
ancestors.add(parent)
visit(parent)
}
}
visit(this)
// In case there is a cycle, do not include the root itself
ancestors.filterNot(_ == this).toSeq
}
/**
* Compute an RDD partition or read it from a checkpoint if the RDD is checkpointing.
*/
private[spark] def computeOrReadCheckpoint(split: Partition, context: TaskContext): Iterator[T] =
{
if (isCheckpointedAndMaterialized) {
firstParent[T].iterator(split, context)
} else {
compute(split, context)
}
}
/**
* Gets or computes an RDD partition. Used by RDD.iterator() when an RDD is cached.
*/
private[spark] def getOrCompute(partition: Partition, context: TaskContext): Iterator[T] = {
val blockId = RDDBlockId(id, partition.index)
var readCachedBlock = true
// This method is called on executors, so we need call SparkEnv.get instead of sc.env.
SparkEnv.get.blockManager.getOrElseUpdate(blockId, storageLevel, elementClassTag, () => {
readCachedBlock = false
computeOrReadCheckpoint(partition, context)
}) match {
// Block hit.
case Left(blockResult) =>
if (readCachedBlock) {
val existingMetrics = context.taskMetrics().inputMetrics
existingMetrics.incBytesRead(blockResult.bytes)
new InterruptibleIterator[T](context, blockResult.data.asInstanceOf[Iterator[T]]) {
override def next(): T = {
existingMetrics.incRecordsRead(1)
delegate.next()
}
}
} else {
new InterruptibleIterator(context, blockResult.data.asInstanceOf[Iterator[T]])
}
// Need to compute the block.
case Right(iter) =>
new InterruptibleIterator(context, iter.asInstanceOf[Iterator[T]])
}
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](sc)(body)
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[U: ClassTag](f: T => U): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (_, _, iter) => iter.map(cleanF))
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U: ClassTag](f: T => TraversableOnce[U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (_, _, iter) => iter.flatMap(cleanF))
}
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: T => Boolean): RDD[T] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[T, T](
this,
(_, _, iter) => iter.filter(cleanF),
preservesPartitioning = true)
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
def removeDuplicatesInPartition(partition: Iterator[T]): Iterator[T] = {
// Create an instance of external append only map which ignores values.
val map = new ExternalAppendOnlyMap[T, Null, Null](
createCombiner = _ => null,
mergeValue = (a, b) => a,
mergeCombiners = (a, b) => a)
map.insertAll(partition.map(_ -> null))
map.iterator.map(_._1)
}
partitioner match {
case Some(_) if numPartitions == partitions.length =>
mapPartitions(removeDuplicatesInPartition, preservesPartitioning = true)
case _ => map(x => (x, null)).reduceByKey((x, _) => x, numPartitions).map(_._1)
}
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(): RDD[T] = withScope {
distinct(partitions.length)
}
/**
* Return a new RDD that has exactly numPartitions partitions.
*
* Can increase or decrease the level of parallelism in this RDD. Internally, this uses
* a shuffle to redistribute data.
*
* If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
* which can avoid performing a shuffle.
*/
def repartition(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
coalesce(numPartitions, shuffle = true)
}
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*
* This results in a narrow dependency, e.g. if you go from 1000 partitions
* to 100 partitions, there will not be a shuffle, instead each of the 100
* new partitions will claim 10 of the current partitions. If a larger number
* of partitions is requested, it will stay at the current number of partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can pass shuffle = true. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @note With shuffle = true, you can actually coalesce to a larger number
* of partitions. This is useful if you have a small number of partitions,
* say 100, potentially with a few partitions being abnormally large. Calling
* coalesce(1000, shuffle = true) will result in 1000 partitions with the
* data distributed using a hash partitioner. The optional partition coalescer
* passed in must be serializable.
*/
def coalesce(numPartitions: Int, shuffle: Boolean = false,
partitionCoalescer: Option[PartitionCoalescer] = Option.empty)
(implicit ord: Ordering[T] = null)
: RDD[T] = withScope {
require(numPartitions > 0, s"Number of partitions ($numPartitions) must be positive.")
if (shuffle) {
/** Distributes elements evenly across output partitions, starting from a random partition. */
val distributePartition = (index: Int, items: Iterator[T]) => {
var position = new Random(hashing.byteswap32(index)).nextInt(numPartitions)
items.map { t =>
// Note that the hash code of the key will just be the key itself. The HashPartitioner
// will mod it with the number of total partitions.
position = position + 1
(position, t)
}
} : Iterator[(Int, T)]
// include a shuffle step so that our upstream tasks are still distributed
new CoalescedRDD(
new ShuffledRDD[Int, T, T](
mapPartitionsWithIndexInternal(distributePartition, isOrderSensitive = true),
new HashPartitioner(numPartitions)),
numPartitions,
partitionCoalescer).values
} else {
new CoalescedRDD(this, numPartitions, partitionCoalescer)
}
}
/**
* Return a sampled subset of this RDD.
*
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
* @param fraction expected size of the sample as a fraction of this RDD's size
* without replacement: probability that each element is chosen; fraction must be [0, 1]
* with replacement: expected number of times each element is chosen; fraction must be greater
* than or equal to 0
* @param seed seed for the random number generator
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[RDD]].
*/
def sample(
withReplacement: Boolean,
fraction: Double,
seed: Long = Utils.random.nextLong): RDD[T] = {
require(fraction >= 0,
s"Fraction must be nonnegative, but got ${fraction}")
withScope {
require(fraction >= 0.0, "Negative fraction value: " + fraction)
if (withReplacement) {
new PartitionwiseSampledRDD[T, T](this, new PoissonSampler[T](fraction), true, seed)
} else {
new PartitionwiseSampledRDD[T, T](this, new BernoulliSampler[T](fraction), true, seed)
}
}
}
/**
* Randomly splits this RDD with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1
* @param seed random seed
*
* @return split RDDs in an array
*/
def randomSplit(
weights: Array[Double],
seed: Long = Utils.random.nextLong): Array[RDD[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
withScope {
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
randomSampleWithRange(x(0), x(1), seed)
}.toArray
}
}
/**
* Internal method exposed for Random Splits in DataFrames. Samples an RDD given a probability
* range.
* @param lb lower bound to use for the Bernoulli sampler
* @param ub upper bound to use for the Bernoulli sampler
* @param seed the seed for the Random number generator
* @return A random sub-sample of the RDD without replacement.
*/
private[spark] def randomSampleWithRange(lb: Double, ub: Double, seed: Long): RDD[T] = {
this.mapPartitionsWithIndex( { (index, partition) =>
val sampler = new BernoulliCellSampler[T](lb, ub)
sampler.setSeed(seed + index)
sampler.sample(partition)
}, isOrderSensitive = true, preservesPartitioning = true)
}
/**
* Return a fixed-size sampled subset of this RDD in an array
*
* @param withReplacement whether sampling is done with replacement
* @param num size of the returned sample
* @param seed seed for the random number generator
* @return sample of specified size in an array
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def takeSample(
withReplacement: Boolean,
num: Int,
seed: Long = Utils.random.nextLong): Array[T] = withScope {
val numStDev = 10.0
require(num >= 0, "Negative number of elements requested")
require(num <= (Int.MaxValue - (numStDev * math.sqrt(Int.MaxValue)).toInt),
"Cannot support a sample size > Int.MaxValue - " +
s"$numStDev * math.sqrt(Int.MaxValue)")
if (num == 0) {
new Array[T](0)
} else {
val initialCount = this.count()
if (initialCount == 0) {
new Array[T](0)
} else {
val rand = new Random(seed)
if (!withReplacement && num >= initialCount) {
Utils.randomizeInPlace(this.collect(), rand)
} else {
val fraction = SamplingUtils.computeFractionForSampleSize(num, initialCount,
withReplacement)
var samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
// If the first sample didn't turn out large enough, keep trying to take samples;
// this shouldn't happen often because we use a big multiplier for the initial size
var numIters = 0
while (samples.length < num) {
logWarning(s"Needed to re-sample due to insufficient sample size. Repeat #$numIters")
samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
numIters += 1
}
Utils.randomizeInPlace(samples, rand).take(num)
}
}
}
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: RDD[T]): RDD[T] = withScope {
sc.union(this, other)
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def ++(other: RDD[T]): RDD[T] = withScope {
this.union(other)
}
/**
* Return this RDD sorted by the given key function.
*/
def sortBy[K](
f: (T) => K,
ascending: Boolean = true,
numPartitions: Int = this.partitions.length)
(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T] = withScope {
this.keyBy[K](f)
.sortByKey(ascending, numPartitions)
.values
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*/
def intersection(other: RDD[T]): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)))
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*
* @param partitioner Partitioner to use for the resulting RDD
*/
def intersection(
other: RDD[T],
partitioner: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)), partitioner)
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did. Performs a hash partition across the cluster
*
* @note This method performs a shuffle internally.
*
* @param numPartitions How many partitions to use in the resulting RDD
*/
def intersection(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
intersection(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): RDD[Array[T]] = withScope {
new MapPartitionsRDD[Array[T], T](this, (_, _, iter) => Iterator(iter.toArray))
}
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
new CartesianRDD(sc, this, other)
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy[K](f, defaultPartitioner(this))
}
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](
f: T => K,
numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy(f, new HashPartitioner(numPartitions))
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
: RDD[(K, Iterable[T])] = withScope {
val cleanF = sc.clean(f)
this.map(t => (cleanF(t), t)).groupByKey(p)
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command))
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String, env: Map[String, String]): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command), env)
}
/**
* Return an RDD created by piping elements to a forked external process. The resulting RDD
* is computed by executing the given process once per partition. All elements
* of each input partition are written to a process's stdin as lines of input separated
* by a newline. The resulting partition consists of the process's stdout output, with
* each line of stdout resulting in one element of the output partition. A process is invoked
* even for empty partitions.
*
* The print behavior can be customized by providing two functions.
*
* @param command command to run in forked process.
* @param env environment variables to set.
* @param printPipeContext Before piping elements, this function is called as an opportunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
* @param printRDDElement Use this function to customize how to pipe elements. This function
* will be called with each RDD element as the 1st parameter, and the
* print line function (like out.println()) as the 2nd parameter.
* An example of pipe the RDD data of groupBy() in a streaming way,
* instead of constructing a huge String to concat all the elements:
* {{{
* def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
* for (e <- record._2) {f(e)}
* }}}
* @param separateWorkingDir Use separate working directories for each task.
* @param bufferSize Buffer size for the stdin writer for the piped process.
* @param encoding Char encoding used for interacting (via stdin, stdout and stderr) with
* the piped process
* @return the result RDD
*/
def pipe(
command: Seq[String],
env: Map[String, String] = Map(),
printPipeContext: (String => Unit) => Unit = null,
printRDDElement: (T, String => Unit) => Unit = null,
separateWorkingDir: Boolean = false,
bufferSize: Int = 8192,
encoding: String = Codec.defaultCharsetCodec.name): RDD[String] = withScope {
new PipedRDD(this, command, env,
if (printPipeContext ne null) sc.clean(printPipeContext) else null,
if (printRDDElement ne null) sc.clean(printRDDElement) else null,
separateWorkingDir,
bufferSize,
encoding)
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitions[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(_: TaskContext, _: Int, iter: Iterator[T]) => cleanedF(iter),
preservesPartitioning)
}
/**
* [performance] Spark's internal mapPartitionsWithIndex method that skips closure cleaning.
* It is a performance API to be used carefully only if we are sure that the RDD elements are
* serializable and don't require closure cleaning.
*
* @param preservesPartitioning indicates whether the input function preserves the partitioner,
* which should be `false` unless this is a pair RDD and the input
* function doesn't modify the keys.
* @param isOrderSensitive whether or not the function is order-sensitive. If it's order
* sensitive, it may return totally different result when the input order
* is changed. Mostly stateful functions are order-sensitive.
*/
private[spark] def mapPartitionsWithIndexInternal[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false,
isOrderSensitive: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(_: TaskContext, index: Int, iter: Iterator[T]) => f(index, iter),
preservesPartitioning = preservesPartitioning,
isOrderSensitive = isOrderSensitive)
}
/**
* [performance] Spark's internal mapPartitions method that skips closure cleaning.
*/
private[spark] def mapPartitionsInternal[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(_: TaskContext, _: Int, iter: Iterator[T]) => f(iter),
preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitionsWithIndex[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(_: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(index, iter),
preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*
* `isOrderSensitive` indicates whether the function is order-sensitive. If it is order
* sensitive, it may return totally different result when the input order
* is changed. Mostly stateful functions are order-sensitive.
*/
private[spark] def mapPartitionsWithIndex[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean,
isOrderSensitive: Boolean): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(_: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(index, iter),
preservesPartitioning,
isOrderSensitive = isOrderSensitive)
}
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
zipPartitions(other, preservesPartitioning = false) { (thisIter, otherIter) =>
new Iterator[(T, U)] {
def hasNext: Boolean = (thisIter.hasNext, otherIter.hasNext) match {
case (true, true) => true
case (false, false) => false
case _ => throw new SparkException("Can only zip RDDs with " +
"same number of elements in each partition")
}
def next(): (T, U) = (thisIter.next(), otherIter.next())
}
}
}
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, preservesPartitioning)
}
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B])
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C])
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, rdd4, preservesPartitioning = false)(f)
}
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: T => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => iter.foreach(cleanF))
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => cleanF(iter))
}
/**
* Return an array that contains all of the elements in this RDD.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def collect(): Array[T] = withScope {
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
/**
* Return an iterator that contains all of the elements in this RDD.
*
* The iterator will consume as much memory as the largest partition in this RDD.
*
* @note This results in multiple Spark jobs, and if the input RDD is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input RDD should be cached first.
*/
def toLocalIterator: Iterator[T] = withScope {
def collectPartition(p: Int): Array[T] = {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray, Seq(p)).head
}
partitions.indices.iterator.flatMap(i => collectPartition(i))
}
/**
* Return an RDD that contains all matching values by applying `f`.
*/
def collect[U: ClassTag](f: PartialFunction[T, U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
filter(cleanF.isDefinedAt).map(cleanF)
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: RDD[T]): RDD[T] = withScope {
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.length)))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
subtract(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(
other: RDD[T],
p: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
if (partitioner == Some(p)) {
// Our partitioner knows how to handle T (which, since we have a partitioner, is
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
val p2 = new Partitioner() {
override def numPartitions: Int = p.numPartitions
override def getPartition(k: Any): Int = p.getPartition(k.asInstanceOf[(Any, _)]._1)
}
// Unfortunately, since we're making a new p2, we'll get ShuffleDependencies
// anyway, and when calling .keys, will not have a partitioner set, even though
// the SubtractedRDD will, thanks to p2's de-tupled partitioning, already be
// partitioned by the right/real keys (e.g. p).
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p2).keys
} else {
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p).keys
}
}
/**
* Reduces the elements of this RDD using the specified commutative and
* associative binary operator.
*/
def reduce(f: (T, T) => T): T = withScope {
val cleanF = sc.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
var jobResult: Option[T] = None
val mergeResult = (_: Int, taskResult: Option[T]) => {
if (taskResult.isDefined) {
jobResult = jobResult match {
case Some(value) => Some(f(value, taskResult.get))
case None => taskResult
}
}
}
sc.runJob(this, reducePartition, mergeResult)
// Get the final result out of our Option, or throw an exception if the RDD was empty
jobResult.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Reduces the elements of this RDD in a multi-level tree pattern.
*
* @param depth suggested depth of the tree (default: 2)
* @see [[org.apache.spark.rdd.RDD#reduce]]
*/
def treeReduce(f: (T, T) => T, depth: Int = 2): T = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
val cleanF = context.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
val partiallyReduced = mapPartitions(it => Iterator(reducePartition(it)))
val op: (Option[T], Option[T]) => Option[T] = (c, x) => {
if (c.isDefined && x.isDefined) {
Some(cleanF(c.get, x.get))
} else if (c.isDefined) {
c
} else if (x.isDefined) {
x
} else {
None
}
}
partiallyReduced.treeAggregate(Option.empty[T])(op, op, depth)
.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function
* op(t1, t2) is allowed to modify t1 and return it as its result value to avoid object
* allocation; however, it should not modify t2.
*
* This behaves somewhat differently from fold operations implemented for non-distributed
* collections in functional languages like Scala. This fold operation may be applied to
* partitions individually, and then fold those results into the final result, rather than
* apply the fold to each element sequentially in some defined ordering. For functions
* that are not commutative, the result may differ from that of a fold applied to a
* non-distributed collection.
*
* @param zeroValue the initial value for the accumulated result of each partition for the `op`
* operator, and also the initial value for the combine results from different
* partitions for the `op` operator - this will typically be the neutral
* element (e.g. `Nil` for list concatenation or `0` for summation)
* @param op an operator used to both accumulate results within a partition and combine results
* from different partitions
*/
def fold(zeroValue: T)(op: (T, T) => T): T = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanOp = sc.clean(op)
val foldPartition = (iter: Iterator[T]) => iter.fold(zeroValue)(cleanOp)
val mergeResult = (_: Int, taskResult: T) => jobResult = op(jobResult, taskResult)
sc.runJob(this, foldPartition, mergeResult)
jobResult
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*
* @param zeroValue the initial value for the accumulated result of each partition for the
* `seqOp` operator, and also the initial value for the combine results from
* different partitions for the `combOp` operator - this will typically be the
* neutral element (e.g. `Nil` for list concatenation or `0` for summation)
* @param seqOp an operator used to accumulate results within a partition
* @param combOp an associative operator used to combine results from different partitions
*/
def aggregate[U: ClassTag](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U): U = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.serializer.newInstance())
val cleanSeqOp = sc.clean(seqOp)
val cleanCombOp = sc.clean(combOp)
val aggregatePartition = (it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
val mergeResult = (_: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
sc.runJob(this, aggregatePartition, mergeResult)
jobResult
}
/**
* Aggregates the elements of this RDD in a multi-level tree pattern.
* This method is semantically identical to [[org.apache.spark.rdd.RDD#aggregate]].
*
* @param depth suggested depth of the tree (default: 2)
*/
def treeAggregate[U: ClassTag](zeroValue: U)(
seqOp: (U, T) => U,
combOp: (U, U) => U,
depth: Int = 2): U = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
if (partitions.length == 0) {
Utils.clone(zeroValue, context.env.closureSerializer.newInstance())
} else {
val cleanSeqOp = context.clean(seqOp)
val cleanCombOp = context.clean(combOp)
val aggregatePartition =
(it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
var partiallyAggregated: RDD[U] = mapPartitions(it => Iterator(aggregatePartition(it)))
var numPartitions = partiallyAggregated.partitions.length
val scale = math.max(math.ceil(math.pow(numPartitions, 1.0 / depth)).toInt, 2)
// If creating an extra level doesn't help reduce
// the wall-clock time, we stop tree aggregation.
// Don't trigger TreeAggregation when it doesn't save wall-clock time
while (numPartitions > scale + math.ceil(numPartitions.toDouble / scale)) {
numPartitions /= scale
val curNumPartitions = numPartitions
partiallyAggregated = partiallyAggregated.mapPartitionsWithIndex {
(i, iter) => iter.map((i % curNumPartitions, _))
}.foldByKey(zeroValue, new HashPartitioner(curNumPartitions))(cleanCombOp).values
}
val copiedZeroValue = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
partiallyAggregated.fold(copiedZeroValue)(cleanCombOp)
}
}
/**
* Return the number of elements in the RDD.
*/
def count(): Long = sc.runJob(this, Utils.getIteratorSize _).sum
/**
* Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*
* The confidence is the probability that the error bounds of the result will
* contain the true value. That is, if countApprox were called repeatedly
* with confidence 0.9, we would expect 90% of the results to contain the
* true count. The confidence must be in the range [0,1] or an exception will
* be thrown.
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countApprox(
timeout: Long,
confidence: Double = 0.95): PartialResult[BoundedDouble] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
val countElements: (TaskContext, Iterator[T]) => Long = { (_, iter) =>
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}
val evaluator = new CountEvaluator(partitions.length, confidence)
sc.runApproximateJob(this, countElements, evaluator, timeout)
}
/**
* Return the count of each unique value in this RDD as a local map of (value, count) pairs.
*
* @note This method should only be used if the resulting map is expected to be small, as
* the whole thing is loaded into the driver's memory.
* To handle very large results, consider using
*
* {{{
* rdd.map(x => (x, 1L)).reduceByKey(_ + _)
* }}}
*
* , which returns an RDD[T, Long] instead of a map.
*/
def countByValue()(implicit ord: Ordering[T] = null): Map[T, Long] = withScope {
map(value => (value, null)).countByKey()
}
/**
* Approximate version of countByValue().
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countByValueApprox(timeout: Long, confidence: Double = 0.95)
(implicit ord: Ordering[T] = null)
: PartialResult[Map[T, BoundedDouble]] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
if (elementClassTag.runtimeClass.isArray) {
throw new SparkException("countByValueApprox() does not support arrays")
}
val countPartition: (TaskContext, Iterator[T]) => OpenHashMap[T, Long] = { (_, iter) =>
val map = new OpenHashMap[T, Long]
iter.foreach {
t => map.changeValue(t, 1L, _ + 1L)
}
map
}
val evaluator = new GroupedCountEvaluator[T](partitions.length, confidence)
sc.runApproximateJob(this, countPartition, evaluator, timeout)
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="https://doi.org/10.1145/2452376.2452456">here</a>.
*
* The relative accuracy is approximately `1.054 / sqrt(2^p)`. Setting a nonzero (`sp` is greater
* than `p`) would trigger sparse representation of registers, which may reduce the memory
* consumption and increase accuracy when the cardinality is small.
*
* @param p The precision value for the normal set.
* `p` must be a value between 4 and `sp` if `sp` is not zero (32 max).
* @param sp The precision value for the sparse set, between 0 and 32.
* If `sp` equals 0, the sparse representation is skipped.
*/
def countApproxDistinct(p: Int, sp: Int): Long = withScope {
require(p >= 4, s"p ($p) must be >= 4")
require(sp <= 32, s"sp ($sp) must be <= 32")
require(sp == 0 || p <= sp, s"p ($p) cannot be greater than sp ($sp)")
val zeroCounter = new HyperLogLogPlus(p, sp)
aggregate(zeroCounter)(
(hll: HyperLogLogPlus, v: T) => {
hll.offer(v)
hll
},
(h1: HyperLogLogPlus, h2: HyperLogLogPlus) => {
h1.addAll(h2)
h1
}).cardinality()
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="https://doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = withScope {
require(relativeSD > 0.000017, s"accuracy ($relativeSD) must be greater than 0.000017")
val p = math.ceil(2.0 * math.log(1.054 / relativeSD) / math.log(2)).toInt
countApproxDistinct(if (p < 4) 4 else p, 0)
}
/**
* Zips this RDD with its element indices. The ordering is first based on the partition index
* and then the ordering of items within each partition. So the first item in the first
* partition gets index 0, and the last item in the last partition receives the largest index.
*
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
* This method needs to trigger a spark job when this RDD contains more than one partitions.
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The index assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithIndex(): RDD[(T, Long)] = withScope {
new ZippedWithIndexRDD(this)
}
/**
* Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k,
* 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
* won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex]].
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The unique ID assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithUniqueId(): RDD[(T, Long)] = withScope {
val n = this.partitions.length.toLong
this.mapPartitionsWithIndex { case (k, iter) =>
Utils.getIteratorZipWithIndex(iter, 0L).map { case (item, i) =>
(item, i * n + k)
}
}
}
/**
* Take the first num elements of the RDD. It works by first scanning one partition, and use the
* results from that partition to estimate the number of additional partitions needed to satisfy
* the limit.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @note Due to complications in the internal implementation, this method will raise
* an exception if called on an RDD of `Nothing` or `Null`.
*/
def take(num: Int): Array[T] = withScope {
val scaleUpFactor = Math.max(conf.get(RDD_LIMIT_SCALE_UP_FACTOR), 2)
if (num == 0) {
new Array[T](0)
} else {
val buf = new ArrayBuffer[T]
val totalParts = this.partitions.length
var partsScanned = 0
while (buf.size < num && partsScanned < totalParts) {
// The number of partitions to try in this iteration. It is ok for this number to be
// greater than totalParts because we actually cap it at totalParts in runJob.
var numPartsToTry = 1L
val left = num - buf.size
if (partsScanned > 0) {
// If we didn't find any rows after the previous iteration, quadruple and retry.
// Otherwise, interpolate the number of partitions we need to try, but overestimate
// it by 50%. We also cap the estimation in the end.
if (buf.isEmpty) {
numPartsToTry = partsScanned * scaleUpFactor
} else {
// As left > 0, numPartsToTry is always >= 1
numPartsToTry = Math.ceil(1.5 * left * partsScanned / buf.size).toInt
numPartsToTry = Math.min(numPartsToTry, partsScanned * scaleUpFactor)
}
}
val p = partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts).toInt)
val res = sc.runJob(this, (it: Iterator[T]) => it.take(left).toArray, p)
res.foreach(buf ++= _.take(num - buf.size))
partsScanned += p.size
}
buf.toArray
}
}
/**
* Return the first element in this RDD.
*/
def first(): T = withScope {
take(1) match {
case Array(t) => t
case _ => throw new UnsupportedOperationException("empty collection")
}
}
/**
* Returns the top k (largest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of
* [[takeOrdered]]. For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).top(1)
* // returns Array(12)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).top(2)
* // returns Array(6, 5)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
takeOrdered(num)(ord.reverse)
}
/**
* Returns the first k (smallest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of [[top]].
* For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).takeOrdered(1)
* // returns Array(2)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).takeOrdered(2)
* // returns Array(2, 3)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
if (num == 0) {
Array.empty
} else {
val mapRDDs = mapPartitions { items =>
// Priority keeps the largest elements, so let's reverse the ordering.
val queue = new BoundedPriorityQueue[T](num)(ord.reverse)
queue ++= collectionUtils.takeOrdered(items, num)(ord)
Iterator.single(queue)
}
if (mapRDDs.partitions.length == 0) {
Array.empty
} else {
mapRDDs.reduce { (queue1, queue2) =>
queue1 ++= queue2
queue1
}.toArray.sorted(ord)
}
}
}
/**
* Returns the max of this RDD as defined by the implicit Ordering[T].
* @return the maximum element of the RDD
* */
def max()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.max)
}
/**
* Returns the min of this RDD as defined by the implicit Ordering[T].
* @return the minimum element of the RDD
* */
def min()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.min)
}
/**
* @note Due to complications in the internal implementation, this method will raise an
* exception if called on an RDD of `Nothing` or `Null`. This may be come up in practice
* because, for example, the type of `parallelize(Seq())` is `RDD[Nothing]`.
* (`parallelize(Seq())` should be avoided anyway in favor of `parallelize(Seq[T]())`.)
* @return true if and only if the RDD contains no elements at all. Note that an RDD
* may be empty even when it has at least 1 partition.
*/
def isEmpty(): Boolean = withScope {
partitions.length == 0 || take(1).length == 0
}
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String): Unit = withScope {
saveAsTextFile(path, null)
}
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit = withScope {
this.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
require(x != null, "text files do not allow null rows")
text.set(x.toString)
(NullWritable.get(), text)
}
}.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path, codec)
}
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String): Unit = withScope {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
.map(x => (NullWritable.get(), new BytesWritable(Utils.serialize(x))))
.saveAsSequenceFile(path)
}
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: T => K): RDD[(K, T)] = withScope {
val cleanedF = sc.clean(f)
map(x => (cleanedF(x), x))
}
/** A private method for tests, to look at the contents of each partition */
private[spark] def collectPartitions(): Array[Array[T]] = withScope {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with `SparkContext#setCheckpointDir` and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint(): Unit = RDDCheckpointData.synchronized {
// NOTE: we use a global lock here due to complexities downstream with ensuring
// children RDD partitions point to the correct parent partitions. In the future
// we should revisit this consideration.
if (context.checkpointDir.isEmpty) {
throw new SparkException("Checkpoint directory has not been set in the SparkContext")
} else if (checkpointData.isEmpty) {
checkpointData = Some(new ReliableRDDCheckpointData(this))
}
}
/**
* Mark this RDD for local checkpointing using Spark's existing caching layer.
*
* This method is for users who wish to truncate RDD lineages while skipping the expensive
* step of replicating the materialized data in a reliable distributed file system. This is
* useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
*
* Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
* data is written to ephemeral local storage in the executors instead of to a reliable,
* fault-tolerant storage. The effect is that if an executor fails during the computation,
* the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
*
* This is NOT safe to use with dynamic allocation, which removes executors along
* with their cached blocks. If you must use both features, you are advised to set
* `spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
*
* The checkpoint directory set through `SparkContext#setCheckpointDir` is not used.
*/
def localCheckpoint(): this.type = RDDCheckpointData.synchronized {
if (conf.get(DYN_ALLOCATION_ENABLED) &&
conf.contains(DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT)) {
logWarning("Local checkpointing is NOT safe to use with dynamic allocation, " +
"which removes executors along with their cached blocks. If you must use both " +
"features, you are advised to set `spark.dynamicAllocation.cachedExecutorIdleTimeout` " +
"to a high value. E.g. If you plan to use the RDD for 1 hour, set the timeout to " +
"at least 1 hour.")
}
// Note: At this point we do not actually know whether the user will call persist() on
// this RDD later, so we must explicitly call it here ourselves to ensure the cached
// blocks are registered for cleanup later in the SparkContext.
//
// If, however, the user has already called persist() on this RDD, then we must adapt
// the storage level he/she specified to one that is appropriate for local checkpointing
// (i.e. uses disk) to guarantee correctness.
if (storageLevel == StorageLevel.NONE) {
persist(LocalRDDCheckpointData.DEFAULT_STORAGE_LEVEL)
} else {
persist(LocalRDDCheckpointData.transformStorageLevel(storageLevel), allowOverride = true)
}
// If this RDD is already checkpointed and materialized, its lineage is already truncated.
// We must not override our `checkpointData` in this case because it is needed to recover
// the checkpointed data. If it is overridden, next time materializing on this RDD will
// cause error.
if (isCheckpointedAndMaterialized) {
logWarning("Not marking RDD for local checkpoint because it was already " +
"checkpointed and materialized")
} else {
// Lineage is not truncated yet, so just override any existing checkpoint data with ours
checkpointData match {
case Some(_: ReliableRDDCheckpointData[_]) => logWarning(
"RDD was already marked for reliable checkpointing: overriding with local checkpoint.")
case _ =>
}
checkpointData = Some(new LocalRDDCheckpointData(this))
}
this
}
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
*/
def isCheckpointed: Boolean = isCheckpointedAndMaterialized
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
* This is introduced as an alias for `isCheckpointed` to clarify the semantics of the
* return value. Exposed for testing.
*/
private[spark] def isCheckpointedAndMaterialized: Boolean =
checkpointData.exists(_.isCheckpointed)
/**
* Return whether this RDD is marked for local checkpointing.
* Exposed for testing.
*/
private[rdd] def isLocallyCheckpointed: Boolean = {
checkpointData match {
case Some(_: LocalRDDCheckpointData[T]) => true
case _ => false
}
}
/**
* Return whether this RDD is reliably checkpointed and materialized.
*/
private[rdd] def isReliablyCheckpointed: Boolean = {
checkpointData match {
case Some(reliable: ReliableRDDCheckpointData[_]) if reliable.isCheckpointed => true
case _ => false
}
}
/**
* Gets the name of the directory to which this RDD was checkpointed.
* This is not defined if the RDD is checkpointed locally.
*/
def getCheckpointFile: Option[String] = {
checkpointData match {
case Some(reliable: ReliableRDDCheckpointData[T]) => reliable.getCheckpointDir
case _ => None
}
}
/**
* :: Experimental ::
* Removes an RDD's shuffles and it's non-persisted ancestors.
* When running without a shuffle service, cleaning up shuffle files enables downscaling.
* If you use the RDD after this call, you should checkpoint and materialize it first.
* If you are uncertain of what you are doing, please do not use this feature.
* Additional techniques for mitigating orphaned shuffle files:
* * Tuning the driver GC to be more aggressive, so the regular context cleaner is triggered
* * Setting an appropriate TTL for shuffle files to be auto cleaned
*/
@Experimental
@DeveloperApi
@Since("3.1.0")
def cleanShuffleDependencies(blocking: Boolean = false): Unit = {
sc.cleaner.foreach { cleaner =>
/**
* Clean the shuffles & all of its parents.
*/
def cleanEagerly(dep: Dependency[_]): Unit = {
if (dep.isInstanceOf[ShuffleDependency[_, _, _]]) {
val shuffleId = dep.asInstanceOf[ShuffleDependency[_, _, _]].shuffleId
cleaner.doCleanupShuffle(shuffleId, blocking)
}
val rdd = dep.rdd
val rddDepsOpt = rdd.internalDependencies
if (rdd.getStorageLevel == StorageLevel.NONE) {
rddDepsOpt.foreach(deps => deps.foreach(cleanEagerly))
}
}
internalDependencies.foreach(deps => deps.foreach(cleanEagerly))
}
}
/**
* :: Experimental ::
* Marks the current stage as a barrier stage, where Spark must launch all tasks together.
* In case of a task failure, instead of only restarting the failed task, Spark will abort the
* entire stage and re-launch all tasks for this stage.
* The barrier execution mode feature is experimental and it only handles limited scenarios.
* Please read the linked SPIP and design docs to understand the limitations and future plans.
* @return an [[RDDBarrier]] instance that provides actions within a barrier stage
* @see [[org.apache.spark.BarrierTaskContext]]
* @see <a href="https://jira.apache.org/jira/browse/SPARK-24374">SPIP: Barrier Execution Mode</a>
* @see <a href="https://jira.apache.org/jira/browse/SPARK-24582">Design Doc</a>
*/
@Experimental
@Since("2.4.0")
def barrier(): RDDBarrier[T] = withScope(new RDDBarrier[T](this))
/**
* Specify a ResourceProfile to use when calculating this RDD. This is only supported on
* certain cluster managers and currently requires dynamic allocation to be enabled.
* It will result in new executors with the resources specified being acquired to
* calculate the RDD.
*/
@Experimental
@Since("3.1.0")
def withResources(rp: ResourceProfile): this.type = {
resourceProfile = Option(rp)
sc.resourceProfileManager.addResourceProfile(resourceProfile.get)
this
}
/**
* Get the ResourceProfile specified with this RDD or null if it wasn't specified.
* @return the user specified ResourceProfile or null (for Java compatibility) if
* none was specified
*/
@Experimental
@Since("3.1.0")
def getResourceProfile(): ResourceProfile = resourceProfile.getOrElse(null)
// =======================================================================
// Other internal methods and fields
// =======================================================================
private var storageLevel: StorageLevel = StorageLevel.NONE
@transient private var resourceProfile: Option[ResourceProfile] = None
/** User code that created this RDD (e.g. `textFile`, `parallelize`). */
@transient private[spark] val creationSite = sc.getCallSite()
/**
* The scope associated with the operation that created this RDD.
*
* This is more flexible than the call site and can be defined hierarchically. For more
* detail, see the documentation of {{RDDOperationScope}}. This scope is not defined if the
* user instantiates this RDD himself without using any Spark operations.
*/
@transient private[spark] val scope: Option[RDDOperationScope] = {
Option(sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY)).map(RDDOperationScope.fromJson)
}
private[spark] def getCreationSite: String = Option(creationSite).map(_.shortForm).getOrElse("")
private[spark] def elementClassTag: ClassTag[T] = classTag[T]
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
// Whether to checkpoint all ancestor RDDs that are marked for checkpointing. By default,
// we stop as soon as we find the first such RDD, an optimization that allows us to write
// less data but is not safe for all workloads. E.g. in streaming we may checkpoint both
// an RDD and its parent in every batch, in which case the parent may never be checkpointed
// and its lineage never truncated, leading to OOMs in the long run (SPARK-6847).
private val checkpointAllMarkedAncestors =
Option(sc.getLocalProperty(RDD.CHECKPOINT_ALL_MARKED_ANCESTORS)).exists(_.toBoolean)
/** Returns the first parent RDD */
protected[spark] def firstParent[U: ClassTag]: RDD[U] = {
dependencies.head.rdd.asInstanceOf[RDD[U]]
}
/** Returns the jth parent RDD: e.g. rdd.parent[T](0) is equivalent to rdd.firstParent[T] */
protected[spark] def parent[U: ClassTag](j: Int): RDD[U] = {
dependencies(j).rdd.asInstanceOf[RDD[U]]
}
/** The [[org.apache.spark.SparkContext]] that this RDD was created on. */
def context: SparkContext = sc
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(cls: Class[T]): RDD[T] = {
val classTag: ClassTag[T] = ClassTag.apply(cls)
this.retag(classTag)
}
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(implicit classTag: ClassTag[T]): RDD[T] = {
this.mapPartitions(identity, preservesPartitioning = true)(classTag)
}
// Avoid handling doCheckpoint multiple times to prevent excessive recursion
@transient private var doCheckpointCalled = false
/**
* Performs the checkpointing of this RDD by saving this. It is called after a job using this RDD
* has completed (therefore the RDD has been materialized and potentially stored in memory).
* doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint(): Unit = {
RDDOperationScope.withScope(sc, "checkpoint", allowNesting = false, ignoreParent = true) {
if (!doCheckpointCalled) {
doCheckpointCalled = true
if (checkpointData.isDefined) {
if (checkpointAllMarkedAncestors) {
// TODO We can collect all the RDDs that needs to be checkpointed, and then checkpoint
// them in parallel.
// Checkpoint parents first because our lineage will be truncated after we
// checkpoint ourselves
dependencies.foreach(_.rdd.doCheckpoint())
}
checkpointData.get.checkpoint()
} else {
dependencies.foreach(_.rdd.doCheckpoint())
}
}
}
}
/**
* Changes the dependencies of this RDD from its original parents to a new RDD (`newRDD`)
* created from the checkpoint file, and forget its old dependencies and partitions.
*/
private[spark] def markCheckpointed(): Unit = stateLock.synchronized {
legacyDependencies = new WeakReference(dependencies_)
clearDependencies()
partitions_ = null
deps = null // Forget the constructor argument for dependencies too
}
/**
* Clears the dependencies of this RDD. This method must ensure that all references
* to the original parent RDDs are removed to enable the parent RDDs to be garbage
* collected. Subclasses of RDD may override this method for implementing their own cleaning
* logic. See [[org.apache.spark.rdd.UnionRDD]] for an example.
*/
protected def clearDependencies(): Unit = stateLock.synchronized {
dependencies_ = null
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString: String = {
// Get a debug description of an rdd without its children
def debugSelf(rdd: RDD[_]): Seq[String] = {
import Utils.bytesToString
val persistence = if (storageLevel != StorageLevel.NONE) storageLevel.description else ""
val storageInfo = rdd.context.getRDDStorageInfo(_.id == rdd.id).map(info =>
" CachedPartitions: %d; MemorySize: %s; DiskSize: %s".format(
info.numCachedPartitions, bytesToString(info.memSize), bytesToString(info.diskSize)))
s"$rdd [$persistence]" +: storageInfo
}
// Apply a different rule to the last child
def debugChildren(rdd: RDD[_], prefix: String): Seq[String] = {
val len = rdd.dependencies.length
len match {
case 0 => Seq.empty
case 1 =>
val d = rdd.dependencies.head
debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]], true)
case _ =>
val frontDeps = rdd.dependencies.take(len - 1)
val frontDepStrings = frontDeps.flatMap(
d => debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]]))
val lastDep = rdd.dependencies.last
val lastDepStrings =
debugString(lastDep.rdd, prefix, lastDep.isInstanceOf[ShuffleDependency[_, _, _]], true)
frontDepStrings ++ lastDepStrings
}
}
// The first RDD in the dependency stack has no parents, so no need for a +-
def firstDebugString(rdd: RDD[_]): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val nextPrefix = (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix $desc"
} ++ debugChildren(rdd, nextPrefix)
}
def shuffleDebugString(rdd: RDD[_], prefix: String = "", isLastChild: Boolean): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val thisPrefix = prefix.replaceAll("\\\\|\\\\s+$", "")
val nextPrefix = (
thisPrefix
+ (if (isLastChild) " " else "| ")
+ (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset)))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$thisPrefix+-$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix$desc"
} ++ debugChildren(rdd, nextPrefix)
}
def debugString(
rdd: RDD[_],
prefix: String = "",
isShuffle: Boolean = true,
isLastChild: Boolean = false): Seq[String] = {
if (isShuffle) {
shuffleDebugString(rdd, prefix, isLastChild)
} else {
debugSelf(rdd).map(prefix + _) ++ debugChildren(rdd, prefix)
}
}
firstDebugString(this).mkString("\\n")
}
override def toString: String = "%s%s[%d] at %s".format(
Option(name).map(_ + " ").getOrElse(""), getClass.getSimpleName, id, getCreationSite)
def toJavaRDD() : JavaRDD[T] = {
new JavaRDD(this)(elementClassTag)
}
/**
* Whether the RDD is in a barrier stage. Spark must launch all the tasks at the same time for a
* barrier stage.
*
* An RDD is in a barrier stage, if at least one of its parent RDD(s), or itself, are mapped from
* an [[RDDBarrier]]. This function always returns false for a [[ShuffledRDD]], since a
* [[ShuffledRDD]] indicates start of a new stage.
*
* A [[MapPartitionsRDD]] can be transformed from an [[RDDBarrier]], under that case the
* [[MapPartitionsRDD]] shall be marked as barrier.
*/
private[spark] def isBarrier(): Boolean = isBarrier_
// From performance concern, cache the value to avoid repeatedly compute `isBarrier()` on a long
// RDD chain.
@transient protected lazy val isBarrier_ : Boolean =
dependencies.filter(!_.isInstanceOf[ShuffleDependency[_, _, _]]).exists(_.rdd.isBarrier())
/**
* Returns the deterministic level of this RDD's output. Please refer to [[DeterministicLevel]]
* for the definition.
*
* By default, an reliably checkpointed RDD, or RDD without parents(root RDD) is DETERMINATE. For
* RDDs with parents, we will generate a deterministic level candidate per parent according to
* the dependency. The deterministic level of the current RDD is the deterministic level
* candidate that is deterministic least. Please override [[getOutputDeterministicLevel]] to
* provide custom logic of calculating output deterministic level.
*/
// TODO: make it public so users can set deterministic level to their custom RDDs.
// TODO: this can be per-partition. e.g. UnionRDD can have different deterministic level for
// different partitions.
private[spark] final lazy val outputDeterministicLevel: DeterministicLevel.Value = {
if (isReliablyCheckpointed) {
DeterministicLevel.DETERMINATE
} else {
getOutputDeterministicLevel
}
}
@DeveloperApi
protected def getOutputDeterministicLevel: DeterministicLevel.Value = {
val deterministicLevelCandidates = dependencies.map {
// The shuffle is not really happening, treat it like narrow dependency and assume the output
// deterministic level of current RDD is same as parent.
case dep: ShuffleDependency[_, _, _] if dep.rdd.partitioner.exists(_ == dep.partitioner) =>
dep.rdd.outputDeterministicLevel
case dep: ShuffleDependency[_, _, _] =>
if (dep.rdd.outputDeterministicLevel == DeterministicLevel.INDETERMINATE) {
// If map output was indeterminate, shuffle output will be indeterminate as well
DeterministicLevel.INDETERMINATE
} else if (dep.keyOrdering.isDefined && dep.aggregator.isDefined) {
// if aggregator specified (and so unique keys) and key ordering specified - then
// consistent ordering.
DeterministicLevel.DETERMINATE
} else {
// In Spark, the reducer fetches multiple remote shuffle blocks at the same time, and
// the arrival order of these shuffle blocks are totally random. Even if the parent map
// RDD is DETERMINATE, the reduce RDD is always UNORDERED.
DeterministicLevel.UNORDERED
}
// For narrow dependency, assume the output deterministic level of current RDD is same as
// parent.
case dep => dep.rdd.outputDeterministicLevel
}
if (deterministicLevelCandidates.isEmpty) {
// By default we assume the root RDD is determinate.
DeterministicLevel.DETERMINATE
} else {
deterministicLevelCandidates.maxBy(_.id)
}
}
}
/**
* Defines implicit functions that provide extra functionalities on RDDs of specific types.
*
* For example, [[RDD.rddToPairRDDFunctions]] converts an RDD into a [[PairRDDFunctions]] for
* key-value-pair RDDs, and enabling extra functionalities such as `PairRDDFunctions.reduceByKey`.
*/
object RDD {
private[spark] val CHECKPOINT_ALL_MARKED_ANCESTORS =
"spark.checkpoint.checkpointAllMarkedAncestors"
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
implicit def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null): PairRDDFunctions[K, V] = {
new PairRDDFunctions(rdd)
}
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]): AsyncRDDActions[T] = {
new AsyncRDDActions(rdd)
}
implicit def rddToSequenceFileRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V],
keyWritableFactory: WritableFactory[K],
valueWritableFactory: WritableFactory[V])
: SequenceFileRDDFunctions[K, V] = {
implicit val keyConverter = keyWritableFactory.convert
implicit val valueConverter = valueWritableFactory.convert
new SequenceFileRDDFunctions(rdd,
keyWritableFactory.writableClass(kt), valueWritableFactory.writableClass(vt))
}
implicit def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](rdd: RDD[(K, V)])
: OrderedRDDFunctions[K, V, (K, V)] = {
new OrderedRDDFunctions[K, V, (K, V)](rdd)
}
implicit def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]): DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd)
}
implicit def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T])
: DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd.map(x => num.toDouble(x)))
}
}
/**
* The deterministic level of RDD's output (i.e. what `RDD#compute` returns). This explains how
* the output will diff when Spark reruns the tasks for the RDD. There are 3 deterministic levels:
* 1. DETERMINATE: The RDD output is always the same data set in the same order after a rerun.
* 2. UNORDERED: The RDD output is always the same data set but the order can be different
* after a rerun.
* 3. INDETERMINATE. The RDD output can be different after a rerun.
*
* Note that, the output of an RDD usually relies on the parent RDDs. When the parent RDD's output
* is INDETERMINATE, it's very likely the RDD's output is also INDETERMINATE.
*/
private[spark] object DeterministicLevel extends Enumeration {
val DETERMINATE, UNORDERED, INDETERMINATE = Value
}
| shuangshuangwang/spark | core/src/main/scala/org/apache/spark/rdd/RDD.scala | Scala | apache-2.0 | 88,801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.rdd
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
import org.apache.carbondata.spark.Value
import org.apache.carbondata.spark.util.CarbonQueryUtil
class CarbonCleanFilesRDD[V: ClassTag](
sc: SparkContext,
valueClass: Value[V],
databaseName: String,
tableName: String,
partitioner: Partitioner)
extends RDD[V](sc, Nil) with Logging {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
override def getPartitions: Array[Partition] = {
val splits = CarbonQueryUtil.getTableSplits(databaseName, tableName, null, partitioner)
splits.zipWithIndex.map(s => new CarbonLoadPartition(id, s._2, s._1))
}
override def compute(theSplit: Partition, context: TaskContext): Iterator[V] = {
val iter = new Iterator[(V)] {
val split = theSplit.asInstanceOf[CarbonLoadPartition]
logInfo("Input split: " + split.serializableHadoopSplit.value)
// TODO call CARBON delete API
var havePair = false
var finished = false
override def hasNext: Boolean = {
if (!finished && !havePair) {
finished = true
havePair = !finished
}
!finished
}
override def next(): V = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
havePair = false
valueClass.getValue(null)
}
}
iter
}
override def getPreferredLocations(split: Partition): Seq[String] = {
val theSplit = split.asInstanceOf[CarbonLoadPartition]
val s = theSplit.serializableHadoopSplit.value.getLocations.asScala
logInfo("Host Name : " + s.head + s.length)
s
}
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala | Scala | apache-2.0 | 2,665 |
package io.backchat.scapulet
package stanza
import xml._
import StreamErrors._
import org.specs2.Specification
class BadFormatSpec extends Specification {
def is =
"A BadFormat error should" ^
"match an error without a text and app condition" ! {
BadFormat.unapply(errorStanza) must beSome((None, NodeSeq.Empty))
} ^
"match an error with a text but without an app condition" ! {
val em = "The error message"
BadFormat.unapply(errorStanzaWithText(em)) must beSome((Some(em), NodeSeq.Empty))
} ^
"match an error without a text but with an app condition" ! {
val aa = NodeSeq.fromSeq(Seq(<the-condition/>))
BadFormat.unapply(errorStanzaWithAppCondition(aa)) must beSome((None, aa))
} ^
"match an error with a text and app condition" ! {
val em = "The error message"
val ac = NodeSeq.fromSeq(Seq(<the-condition/>))
BadFormat.unapply(errorStanzaWithTextAndAppCondition(em, ac)) must beSome((Some(em), ac))
} ^
end
def errorStanzaWithText(text: String): Node = <stream:error>
<bad-format xmlns={ ns.XmppStream }/>
<text xmlns={ ns.XmppStream }>{ text }</text>
</stream:error>
def errorStanzaWithTextAndAppCondition(text: String, appCond: Seq[Node]): Node = <stream:error>
<bad-format xmlns={ ns.XmppStream }/>
<text xmlns={ ns.XmppStream }>{ text }</text>
{ appCond }
</stream:error>
def errorStanzaWithAppCondition(appCond: Seq[Node]): Node = <stream:error>
<bad-format xmlns={ ns.XmppStream }/>
{ appCond }
</stream:error>
protected val errorStanza: Node = <stream:error>
<bad-format xmlns={ ns.XmppStream }/>
</stream:error>
} | backchatio/scapulet | src/test/scala/io/backchat/scapulet/stanza/BadFormatSpec.scala | Scala | bsd-3-clause | 2,412 |
package controllers
import play.api.mvc._
import play.api.libs.json._
import models.Polling
import models.Vote
import scala.concurrent.{Future, ExecutionContext}
import ExecutionContext.Implicits.global
import play.api.Logger
object Application extends Controller {
def index = Action {
Ok("Welcome to Schulze Polling made with Play & Scala")
}
def createPolling() = Action(parse.tolerantJson) {
request: Request[JsValue] => {
request.body.validate[Polling](Polling.inputReads).fold (
invalid = error => BadRequest(JsError.toFlatJson(error))
,valid = {polling:Polling => {
// -> insert into mongodb
Polling.pollCollection.insert(Json.toJson(polling))
// set request
Ok(Json.toJson(polling))
}}
)
}
}
def getPolling(pollingId: String) = Action.async {
request: Request[AnyContent] => {
// id checkup
val query = Json.obj( "pollingId" -> pollingId )
// -> mongo select on pollingId
Polling.pollCollection.find(query).one[Polling].map { pollingOpt:Option[Polling] => {
// pattern matching on mongodb result
pollingOpt match {
case None => NotFound("Invalid Polling ID")
case Some(polling:Polling) => {
// set request
Ok(Json.toJson(polling))
}
}
}}
}
}
def castVote(pollingId: String) = Action.async(parse.tolerantJson) {
request: Request[JsValue] => {
// json validize
request.body.validate[Vote](Vote.inputReads).fold (
invalid = error => Future.successful(BadRequest(JsError.toFlatJson(error)))
,valid = { votes:Vote => {
// id checkup
val query = Json.obj( "pollingId" -> pollingId )
// -> mongo select on pollingId
Polling.pollCollection.find(query).one[Polling].map { pollingOpt:Option[Polling] => {
// pattern matching on mongodb result
pollingOpt match {
case None => NotFound("Invalid Polling ID")
case Some(polling:Polling) => {
// -> write votes in polling via mongo
val set = Json.obj( "$push" -> Json.obj( "votes" -> Json.toJson(votes)) )
Polling.pollCollection.update(query, set)
// actor gets polling id
actors.calculationActor ! pollingId
Ok(Json.toJson(votes))
}
}
}}
}}
)
}
}
} | memoConnect/playEval | app/controllers/Application.scala | Scala | gpl-2.0 | 2,531 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.invoker
import akka.actor.Actor
import whisk.common.Logging
import whisk.core.entity.InstanceId
import whisk.http.BasicRasService
/**
* Implements web server to handle certain REST API calls.
* Currently provides a health ping route, only.
*/
class InvokerServer(
override val instance: InstanceId,
override val numberOfInstances: Int)(
override implicit val logging: Logging)
extends BasicRasService
with Actor {
override def actorRefFactory = context
}
| lzbj/openwhisk | core/invoker/src/main/scala/whisk/core/invoker/InvokerServer.scala | Scala | apache-2.0 | 1,311 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc.request
import javax.inject.Inject
import play.api.http.HttpConfiguration
import play.api.libs.crypto.CookieSignerProvider
import play.api.libs.typedmap.TypedMap
import play.api.mvc._
import play.core.system.RequestIdProvider
/**
* A `RequestFactory` provides logic for creating requests.
*/
trait RequestFactory {
/**
* Create a `RequestHeader`.
*/
def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap
): RequestHeader
/**
* Creates a `RequestHeader` based on the values of an
* existing `RequestHeader`. The factory may modify the copied
* values to produce a modified `RequestHeader`.
*/
def copyRequestHeader(rh: RequestHeader): RequestHeader = {
createRequestHeader(rh.connection, rh.method, rh.target, rh.version, rh.headers, rh.attrs)
}
/**
* Create a `Request` with a body. By default this just calls
* `createRequestHeader(...).withBody(body)`.
*/
def createRequest[A](
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap,
body: A
): Request[A] =
createRequestHeader(connection, method, target, version, headers, attrs).withBody(body)
/**
* Creates a `Request` based on the values of an
* existing `Request`. The factory may modify the copied
* values to produce a modified `Request`.
*/
def copyRequest[A](r: Request[A]): Request[A] = {
createRequest[A](r.connection, r.method, r.target, r.version, r.headers, r.attrs, r.body)
}
}
object RequestFactory {
/**
* A `RequestFactory` that creates a request with the arguments given, without
* any additional modification.
*/
val plain = new RequestFactory {
override def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap
): RequestHeader =
new RequestHeaderImpl(connection, method, target, version, headers, attrs)
}
}
/**
* The default [[RequestFactory]] used by a Play application. This
* `RequestFactory` adds the following typed attributes to requests:
* - request id
* - cookie
* - session cookie
* - flash cookie
*/
class DefaultRequestFactory @Inject() (
val cookieHeaderEncoding: CookieHeaderEncoding,
val sessionBaker: SessionCookieBaker,
val flashBaker: FlashCookieBaker
) extends RequestFactory {
def this(config: HttpConfiguration) = this(
new DefaultCookieHeaderEncoding(config.cookies),
new DefaultSessionCookieBaker(config.session, config.secret, new CookieSignerProvider(config.secret).get),
new DefaultFlashCookieBaker(config.flash, config.secret, new CookieSignerProvider(config.secret).get)
)
override def createRequestHeader(
connection: RemoteConnection,
method: String,
target: RequestTarget,
version: String,
headers: Headers,
attrs: TypedMap
): RequestHeader = {
val requestId: Long = RequestIdProvider.freshId()
val cookieCell = new LazyCell[Cookies] {
protected override def emptyMarker: Cookies = null
protected override def create: Cookies =
cookieHeaderEncoding.fromCookieHeader(headers.get(play.api.http.HeaderNames.COOKIE))
}
val sessionCell = new LazyCell[Session] {
protected override def emptyMarker: Session = null
protected override def create: Session =
sessionBaker.decodeFromCookie(cookieCell.value.get(sessionBaker.COOKIE_NAME))
}
val flashCell = new LazyCell[Flash] {
protected override def emptyMarker: Flash = null
protected override def create: Flash = flashBaker.decodeFromCookie(cookieCell.value.get(flashBaker.COOKIE_NAME))
}
val updatedAttrMap = attrs + (
RequestAttrKey.Id -> requestId,
RequestAttrKey.Cookies -> cookieCell,
RequestAttrKey.Session -> sessionCell,
RequestAttrKey.Flash -> flashCell
)
new RequestHeaderImpl(connection, method, target, version, headers, updatedAttrMap)
}
}
| benmccann/playframework | core/play/src/main/scala/play/api/mvc/request/RequestFactory.scala | Scala | apache-2.0 | 4,281 |
package org.scrawler.domain
import java.net.URL
import org.scalacheck.Gen
import org.scrawler.Sample._
object WebPageGenerator {
private def keywords: Set[String] = Seq.fill(5)(sample(Gen.alphaStr)).toSet
def apply(): WebPage = WebPage(Gen.alphaStr, UrlGenerator(), Gen.alphaNumStr, keywords)
}
| slideon/Scrawler | src/test/scala/org/scrawler/domain/WebPageGenerator.scala | Scala | gpl-2.0 | 303 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.content
import com.ibm.spark.kernel.protocol.v5.KernelMessageContent
import play.api.libs.json._
case class KernelStatus (
execution_state: String
) extends KernelMessageContent {
override def content : String =
Json.toJson(this)(KernelStatus.kernelStatusWrites).toString
}
object KernelStatus {
implicit val kernelStatusReads = Json.reads[KernelStatus]
implicit val kernelStatusWrites = Json.writes[KernelStatus]
}
object KernelStatusBusy extends KernelStatus("busy") {
override def toString(): String = {
Json.toJson(this).toString
}
}
object KernelStatusIdle extends KernelStatus("idle") {
override def toString(): String = {
Json.toJson(this).toString
}
} | bpburns/spark-kernel | protocol/src/main/scala/com/ibm/spark/kernel/protocol/v5/content/KernelStatus.scala | Scala | apache-2.0 | 1,326 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kinesis
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.Arrays
import com.amazonaws.services.kinesis.clientlibrary.exceptions._
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason
import com.amazonaws.services.kinesis.model.Record
import org.mockito.ArgumentMatchers.{anyListOf, anyString, eq => meq}
import org.mockito.Mockito.{never, times, verify, when}
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.mockito.MockitoSugar
import org.apache.spark.streaming.{Duration, TestSuiteBase}
/**
* Suite of Kinesis streaming receiver tests focusing mostly on the KinesisRecordProcessor
*/
class KinesisReceiverSuite extends TestSuiteBase with Matchers with BeforeAndAfter
with MockitoSugar {
val app = "TestKinesisReceiver"
val stream = "mySparkStream"
val endpoint = "endpoint-url"
val workerId = "dummyWorkerId"
val shardId = "dummyShardId"
val seqNum = "dummySeqNum"
val checkpointInterval = Duration(10)
val someSeqNum = Some(seqNum)
val record1 = new Record()
record1.setData(ByteBuffer.wrap("Spark In Action".getBytes(StandardCharsets.UTF_8)))
val record2 = new Record()
record2.setData(ByteBuffer.wrap("Learning Spark".getBytes(StandardCharsets.UTF_8)))
val batch = Arrays.asList(record1, record2)
var receiverMock: KinesisReceiver[Array[Byte]] = _
var checkpointerMock: IRecordProcessorCheckpointer = _
override def beforeFunction(): Unit = {
receiverMock = mock[KinesisReceiver[Array[Byte]]]
checkpointerMock = mock[IRecordProcessorCheckpointer]
}
test("process records including store and set checkpointer") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch)
verify(receiverMock, times(1)).setCheckpointer(shardId, checkpointerMock)
}
test("split into multiple processes if a limitation is set") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(1)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch.subList(0, 1))
verify(receiverMock, times(1)).addRecords(shardId, batch.subList(1, 2))
verify(receiverMock, times(1)).setCheckpointer(shardId, checkpointerMock)
}
test("shouldn't store and update checkpointer when receiver is stopped") {
when(receiverMock.isStopped()).thenReturn(true)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.processRecords(batch, checkpointerMock)
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, never).addRecords(anyString, anyListOf(classOf[Record]))
verify(receiverMock, never).setCheckpointer(anyString, meq(checkpointerMock))
}
test("shouldn't update checkpointer when exception occurs during store") {
when(receiverMock.isStopped()).thenReturn(false)
when(receiverMock.getCurrentLimit).thenReturn(Int.MaxValue)
when(
receiverMock.addRecords(anyString, anyListOf(classOf[Record]))
).thenThrow(new RuntimeException())
intercept[RuntimeException] {
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.processRecords(batch, checkpointerMock)
}
verify(receiverMock, times(1)).isStopped()
verify(receiverMock, times(1)).addRecords(shardId, batch)
verify(receiverMock, never).setCheckpointer(anyString, meq(checkpointerMock))
}
test("shutdown should checkpoint if the reason is TERMINATE") {
when(receiverMock.getLatestSeqNumToCheckpoint(shardId)).thenReturn(someSeqNum)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.shutdown(checkpointerMock, ShutdownReason.TERMINATE)
verify(receiverMock, times(1)).removeCheckpointer(meq(shardId), meq(checkpointerMock))
}
test("shutdown should not checkpoint if the reason is something other than TERMINATE") {
when(receiverMock.getLatestSeqNumToCheckpoint(shardId)).thenReturn(someSeqNum)
val recordProcessor = new KinesisRecordProcessor(receiverMock, workerId)
recordProcessor.initialize(shardId)
recordProcessor.shutdown(checkpointerMock, ShutdownReason.ZOMBIE)
recordProcessor.shutdown(checkpointerMock, null)
verify(receiverMock, times(2)).removeCheckpointer(meq(shardId),
meq[IRecordProcessorCheckpointer](null))
}
test("retry success on first attempt") {
val expectedIsStopped = false
when(receiverMock.isStopped()).thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(1)).isStopped()
}
test("retry success on second attempt after a Kinesis throttling exception") {
val expectedIsStopped = false
when(receiverMock.isStopped())
.thenThrow(new ThrottlingException("error message"))
.thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(2)).isStopped()
}
test("retry success on second attempt after a Kinesis dependency exception") {
val expectedIsStopped = false
when(receiverMock.isStopped())
.thenThrow(new KinesisClientLibDependencyException("error message"))
.thenReturn(expectedIsStopped)
val actualVal = KinesisRecordProcessor.retryRandom(receiverMock.isStopped(), 2, 100)
assert(actualVal == expectedIsStopped)
verify(receiverMock, times(2)).isStopped()
}
test("retry failed after a shutdown exception") {
when(checkpointerMock.checkpoint()).thenThrow(new ShutdownException("error message"))
intercept[ShutdownException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after an invalid state exception") {
when(checkpointerMock.checkpoint()).thenThrow(new InvalidStateException("error message"))
intercept[InvalidStateException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after unexpected exception") {
when(checkpointerMock.checkpoint()).thenThrow(new RuntimeException("error message"))
intercept[RuntimeException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
verify(checkpointerMock, times(1)).checkpoint()
}
test("retry failed after exhausting all retries") {
val expectedErrorMessage = "final try error message"
when(checkpointerMock.checkpoint())
.thenThrow(new ThrottlingException("error message"))
.thenThrow(new ThrottlingException(expectedErrorMessage))
val exception = intercept[RuntimeException] {
KinesisRecordProcessor.retryRandom(checkpointerMock.checkpoint(), 2, 100)
}
exception.getMessage().shouldBe(expectedErrorMessage)
verify(checkpointerMock, times(2)).checkpoint()
}
}
| WindCanDie/spark | external/kinesis-asl/src/test/scala/org/apache/spark/streaming/kinesis/KinesisReceiverSuite.scala | Scala | apache-2.0 | 8,687 |
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.uberstore.common
/**
* Trait supplying checksumming capabilities
*/
trait Checksummer {
/**
* Given an array of bytes will calculate a Long checksum
* value for them
*
* @param bytes Array[Byte] to checksum
*
* @return the checksum as a Long
*/
def checksum(bytes: Array[Byte]): Long
}
| weggert/sirius | src/main/scala/com/comcast/xfinity/sirius/uberstore/common/Checksummer.scala | Scala | apache-2.0 | 993 |
package com.seremis.geninfusion.genetics
import com.seremis.geninfusion.api.genetics.{IChromosome, IGene}
import com.seremis.geninfusion.api.util.GeneName
import scala.util.Random
class Gene[A](geneName: GeneName[A], defaultValue: IChromosome[A]) extends IGene[A] {
lazy val rand = new Random
var mutate = true
override def getGeneName: GeneName[A] = geneName
override def inherit(parent1: IChromosome[A], parent2: IChromosome[A]): IChromosome[A] = {
val allele1 = if(rand.nextBoolean()) parent1.getActiveAllele.copy() else parent2.getActiveAllele.copy()
val allele2 = if(rand.nextBoolean()) parent1.getPassiveAllele.copy() else parent2.getPassiveAllele.copy()
Chromosome(allele1, allele2)
}
override def noMutations(): Unit = mutate = false
override def mutate(data: IChromosome[A]): IChromosome[A] = {
var active: A = data.getActiveAllele.getData
var passive: A = data.getPassiveAllele.getData
if(rand.nextBoolean()) {
active = mutateData(active).asInstanceOf[A]
} else {
passive = mutateData(passive).asInstanceOf[A]
}
Chromosome(new Allele(active, data.getActiveAllele.isDominant), new Allele(passive, data.getPassiveAllele.isDominant))
}
def mutateData(data: A): Any = {
data match {
case d : Boolean => !d
case d : Byte => d * (rand.nextFloat() * 2)
case d : Short => d * (rand.nextFloat() * 2)
case d : Int => d * (rand.nextFloat() * 2)
case d : Float => d * (rand.nextFloat() * 2)
case d : Double => d * (rand.nextFloat() * 2)
case d : Long => d * (rand.nextFloat() * 2)
}
}
override def getDefaultValue: IChromosome[A] = defaultValue
}
| Seremis/Genetic-Infusion | src/main/scala/com/seremis/geninfusion/genetics/Gene.scala | Scala | gpl-3.0 | 1,799 |
package com.datastax.spark.connector.cql
import com.datastax.spark.connector._
import com.datastax.spark.connector.mapper.ColumnMapper
import org.apache.spark.Logging
import scala.collection.JavaConversions._
import scala.language.existentials
import com.datastax.driver.core.{ColumnMetadata, Metadata, TableMetadata, KeyspaceMetadata}
import com.datastax.spark.connector.types.{CounterType, ColumnType}
import com.datastax.spark.connector.util.Quote._
/** Abstract column / field definition.
* Common to tables and user-defined types */
trait FieldDef extends Serializable {
def ref: ColumnRef
def columnName: String
def columnType: ColumnType[_]
}
/** Cassandra structure that contains columnar information, e.g. a table or a user defined type.
* This trait allows `ColumnMapper` to work on tables and user defined types.
* Cassandra tables and user defined types are similar in a way data are extracted from them,
* therefore a common interface to describe their metadata is handy. */
trait StructDef extends Serializable {
/** Allows to specify concrete type of column in subclasses,
* so that `columns` and `columnByName` members return concrete types.
* Columns in tables may carry more information than columns in user defined types. */
type Column <: FieldDef
/** Human-readable name for easy identification of this structure.
* Used in the error message when the column is not found.
* E.g. a table name or a type name. */
def name: String
/** Sequence of column definitions in this data structure.
* The order of the columns is implementation-defined. */
def columns: IndexedSeq[Column]
/** References to the columns */
lazy val columnRefs: IndexedSeq[ColumnRef] =
columns.map(_.ref)
/** Names of the columns, in the same order as column definitions. */
def columnNames: IndexedSeq[String] =
columns.map(_.columnName)
/** Types of the columns, in the same order as column names and column definitions. */
def columnTypes: IndexedSeq[ColumnType[_]] =
columns.map(_.columnType)
/** For quickly finding a column definition by name.
* If column is not found, throws NoSuchElementException with information
* about the name of the column and name of the structure. */
def columnByName: Map[String, Column] =
columns.map(c => (c.columnName, c)).toMap.withDefault {
columnName => throw new NoSuchElementException(s"Column $columnName not found in $name")
}
/** For quickly finding a column definition by index.
* If column is not found, throws NoSuchElementException with information
* about the requested index of the column and name of the structure. */
def columnByIndex(index: Int): Column = {
require(index >= 0 && index < columns.length, s"Column index $index out of bounds for $name")
columns(index)
}
/** Returns the columns that are not present in the structure. */
def missingColumns(columnsToCheck: Seq[ColumnRef]): Seq[ColumnRef] =
for (c <- columnsToCheck if !columnByName.contains(c.columnName)) yield c
}
sealed trait ColumnRole
case object PartitionKeyColumn extends ColumnRole
case class ClusteringColumn(index: Int) extends ColumnRole
case object StaticColumn extends ColumnRole
case object RegularColumn extends ColumnRole
/** A Cassandra column metadata that can be serialized. */
case class ColumnDef(
columnName: String,
columnRole: ColumnRole,
columnType: ColumnType[_],
indexed : Boolean = false) extends FieldDef {
def ref: ColumnRef = ColumnName(columnName)
def isStatic = columnRole == StaticColumn
def isCollection = columnType.isCollection
def isPartitionKeyColumn = columnRole == PartitionKeyColumn
def isClusteringColumn = columnRole.isInstanceOf[ClusteringColumn]
def isPrimaryKeyColumn = isClusteringColumn || isPartitionKeyColumn
def isCounterColumn = columnType == CounterType
def isIndexedColumn = indexed
def componentIndex = columnRole match {
case ClusteringColumn(i) => Some(i)
case _ => None
}
def cql = {
s"${quote(columnName)} ${columnType.cqlTypeName}"
}
}
object ColumnDef {
def apply(column: ColumnMetadata, columnRole: ColumnRole): ColumnDef = {
val columnType = ColumnType.fromDriverType(column.getType)
ColumnDef(column.getName, columnRole, columnType, column.getIndex != null)
}
}
/** A Cassandra table metadata that can be serialized. */
case class TableDef(
keyspaceName: String,
tableName: String,
partitionKey: Seq[ColumnDef],
clusteringColumns: Seq[ColumnDef],
regularColumns: Seq[ColumnDef]) extends StructDef {
require(partitionKey.forall(_.isPartitionKeyColumn), "All partition key columns must have role PartitionKeyColumn")
require(clusteringColumns.forall(_.isClusteringColumn), "All clustering columns must have role ClusteringColumn")
require(regularColumns.forall(!_.isPrimaryKeyColumn), "Regular columns cannot have role PrimaryKeyColumn")
override type Column = ColumnDef
override def name: String = s"$keyspaceName.$tableName"
lazy val primaryKey: IndexedSeq[ColumnDef] =
(partitionKey ++ clusteringColumns).toIndexedSeq
override lazy val columns: IndexedSeq[ColumnDef] =
(primaryKey ++ regularColumns).toIndexedSeq
override lazy val columnByName: Map[String, ColumnDef] =
super.columnByName
def cql = {
val columnList = columns.map(_.cql).mkString(",\\n ")
val partitionKeyClause = partitionKey.map(_.columnName).map(quote).mkString("(", ", ", ")")
val clusteringColumnNames = clusteringColumns.map(_.columnName).map(quote)
val primaryKeyClause = (partitionKeyClause +: clusteringColumnNames).mkString(", ")
s"""CREATE TABLE ${quote(keyspaceName)}.${quote(tableName)} (
| $columnList,
| PRIMARY KEY ($primaryKeyClause)
|)""".stripMargin
}
/** Selects a subset of columns.
* Columns are returned in the order specified in the `ColumnSelector`. */
def select(selector: ColumnSelector): IndexedSeq[ColumnDef] = {
selector match {
case AllColumns => columns
case PartitionKeyColumns => partitionKey
case SomeColumns(names @ _*) => names.map {
case ColumnName(columnName, _) =>
columnByName(columnName)
case columnRef =>
throw new IllegalArgumentException(s"Invalid column reference $columnRef for table $keyspaceName.$tableName")
}
}
}.toIndexedSeq
}
object TableDef {
/** Constructs a table definition based on the mapping provided by
* appropriate [[com.datastax.spark.connector.mapper.ColumnMapper]] for the given type. */
def fromType[T : ColumnMapper](keyspaceName: String, tableName: String): TableDef =
implicitly[ColumnMapper[T]].newTable(keyspaceName, tableName)
}
/** A Cassandra keyspace metadata that can be serialized. */
case class KeyspaceDef(keyspaceName: String, tables: Set[TableDef]) {
lazy val tableByName = tables.map(t => (t.tableName, t)).toMap
}
case class Schema(clusterName: String, keyspaces: Set[KeyspaceDef]) {
/** Returns a map from keyspace name to keyspace metadata */
lazy val keyspaceByName: Map[String, KeyspaceDef] =
keyspaces.map(k => (k.keyspaceName, k)).toMap
/** All tables from all keyspaces */
lazy val tables: Set[TableDef] =
for (keyspace <- keyspaces; table <- keyspace.tables) yield table
}
object Schema extends Logging {
private def fetchPartitionKey(table: TableMetadata): Seq[ColumnDef] =
for (column <- table.getPartitionKey) yield
ColumnDef(column, PartitionKeyColumn)
private def fetchClusteringColumns(table: TableMetadata): Seq[ColumnDef] =
for ((column, index) <- table.getClusteringColumns.zipWithIndex) yield
ColumnDef(column, ClusteringColumn(index))
private def fetchRegularColumns(table: TableMetadata) = {
val primaryKey = table.getPrimaryKey.toSet
val regularColumns = table.getColumns.filterNot(primaryKey.contains)
for (column <- regularColumns) yield
if (column.isStatic)
ColumnDef(column, StaticColumn)
else
ColumnDef(column, RegularColumn)
}
/** Fetches database schema from Cassandra. Provides access to keyspace, table and column metadata.
* @param keyspaceName if defined, fetches only metadata of the given keyspace
* @param tableName if defined, fetches only metadata of the given table
*/
def fromCassandra(connector: CassandraConnector, keyspaceName: Option[String] = None, tableName: Option[String] = None): Schema = {
def isKeyspaceSelected(keyspace: KeyspaceMetadata): Boolean =
keyspaceName match {
case None => true
case Some(name) => keyspace.getName == name
}
def isTableSelected(table: TableMetadata): Boolean =
tableName match {
case None => true
case Some(name) => table.getName == name
}
def fetchTables(keyspace: KeyspaceMetadata): Set[TableDef] =
for (table <- keyspace.getTables.toSet if isTableSelected(table)) yield {
val partitionKey = fetchPartitionKey(table)
val clusteringColumns = fetchClusteringColumns(table)
val regularColumns = fetchRegularColumns(table)
TableDef(keyspace.getName, table.getName, partitionKey, clusteringColumns, regularColumns)
}
def fetchKeyspaces(metadata: Metadata): Set[KeyspaceDef] =
for (keyspace <- metadata.getKeyspaces.toSet if isKeyspaceSelected(keyspace)) yield
KeyspaceDef(keyspace.getName, fetchTables(keyspace))
connector.withClusterDo { cluster =>
val clusterName = cluster.getMetadata.getClusterName
logDebug(s"Retrieving database schema from cluster $clusterName...")
val keyspaces = fetchKeyspaces(cluster.getMetadata)
logDebug(s"${keyspaces.size} keyspaces fetched from cluster $clusterName: " +
s"${keyspaces.map(_.keyspaceName).mkString("{", ",", "}")}")
Schema(clusterName, keyspaces)
}
}
}
| boneill42/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/cql/Schema.scala | Scala | apache-2.0 | 9,907 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.io.cloud
import java.io.IOException
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.lib.output.{FileOutputCommitter, PathOutputCommitter, PathOutputCommitterFactory}
import org.apache.spark.internal.io.FileNameSpec
import org.apache.spark.internal.io.HadoopMapReduceCommitProtocol
/**
* Spark Commit protocol for Path Output Committers.
* This committer will work with the `FileOutputCommitter` and subclasses.
* All implementations *must* be serializable.
*
* Rather than ask the `FileOutputFormat` for a committer, it uses the
* `org.apache.hadoop.mapreduce.lib.output.PathOutputCommitterFactory` factory
* API to create the committer.
*
* In `setupCommitter` the factory is identified and instantiated;
* this factory then creates the actual committer implementation.
*
* @constructor Instantiate. dynamic partition overwrite is not supported,
* so that committers for stores which do not support rename
* will not get confused.
* @param jobId job
* @param dest destination
* @param dynamicPartitionOverwrite does the caller want support for dynamic
* partition overwrite. If so, it will be
* refused.
*/
class PathOutputCommitProtocol(
jobId: String,
dest: String,
dynamicPartitionOverwrite: Boolean = false)
extends HadoopMapReduceCommitProtocol(jobId, dest, false) with Serializable {
if (dynamicPartitionOverwrite) {
// until there's explicit extensions to the PathOutputCommitProtocols
// to support the spark mechanism, it's left to the individual committer
// choice to handle partitioning.
throw new IOException(PathOutputCommitProtocol.UNSUPPORTED)
}
/** The committer created. */
@transient private var committer: PathOutputCommitter = _
require(dest != null, "Null destination specified")
private[cloud] val destination: String = dest
/** The destination path. This is serializable in Hadoop 3. */
private[cloud] val destPath: Path = new Path(destination)
logTrace(s"Instantiated committer with job ID=$jobId;" +
s" destination=$destPath;" +
s" dynamicPartitionOverwrite=$dynamicPartitionOverwrite")
import PathOutputCommitProtocol._
/**
* Set up the committer.
* This creates it by talking directly to the Hadoop factories, instead
* of the V1 `mapred.FileOutputFormat` methods.
* @param context task attempt
* @return the committer to use. This will always be a subclass of
* `PathOutputCommitter`.
*/
override protected def setupCommitter(context: TaskAttemptContext): PathOutputCommitter = {
logTrace(s"Setting up committer for path $destination")
committer = PathOutputCommitterFactory.createCommitter(destPath, context)
// Special feature to force out the FileOutputCommitter, so as to guarantee
// that the binding is working properly.
val rejectFileOutput = context.getConfiguration
.getBoolean(REJECT_FILE_OUTPUT, REJECT_FILE_OUTPUT_DEFVAL)
if (rejectFileOutput && committer.isInstanceOf[FileOutputCommitter]) {
// the output format returned a file output format committer, which
// is exactly what we do not want. So switch back to the factory.
val factory = PathOutputCommitterFactory.getCommitterFactory(
destPath,
context.getConfiguration)
logTrace(s"Using committer factory $factory")
committer = factory.createOutputCommitter(destPath, context)
}
logTrace(s"Using committer ${committer.getClass}")
logTrace(s"Committer details: $committer")
if (committer.isInstanceOf[FileOutputCommitter]) {
require(!rejectFileOutput,
s"Committer created is the FileOutputCommitter $committer")
if (committer.isCommitJobRepeatable(context)) {
// If FileOutputCommitter says its job commit is repeatable, it means
// it is using the v2 algorithm, which is not safe for task commit
// failures. Warn
logTrace(s"Committer $committer may not be tolerant of task commit failures")
}
}
committer
}
/**
* Create a temporary file for a task.
*
* @param taskContext task context
* @param dir optional subdirectory
* @param spec file naming specification
* @return a path as a string
*/
override def newTaskTempFile(
taskContext: TaskAttemptContext,
dir: Option[String],
spec: FileNameSpec): String = {
val workDir = committer.getWorkPath
val parent = dir.map {
d => new Path(workDir, d)
}.getOrElse(workDir)
val file = new Path(parent, getFilename(taskContext, spec))
logTrace(s"Creating task file $file for dir $dir and spec $spec")
file.toString
}
}
object PathOutputCommitProtocol {
/**
* Hadoop configuration option.
* Fail fast if the committer is using the path output protocol.
* This option can be used to catch configuration issues early.
*
* It's mostly relevant when testing/diagnostics, as it can be used to
* enforce that schema-specific options are triggering a switch
* to a new committer.
*/
val REJECT_FILE_OUTPUT = "pathoutputcommit.reject.fileoutput"
/**
* Default behavior: accept the file output.
*/
val REJECT_FILE_OUTPUT_DEFVAL = false
/** Error string for tests. */
private[cloud] val UNSUPPORTED: String = "PathOutputCommitProtocol does not support" +
" dynamicPartitionOverwrite"
}
| mahak/spark | hadoop-cloud/src/hadoop-3/main/scala/org/apache/spark/internal/io/cloud/PathOutputCommitProtocol.scala | Scala | apache-2.0 | 6,385 |
/*
ASIB - A Scala IRC Bot
Copyright (C) 2012 Iain Cambridge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package asib.mocks.command
import asib.command.PrivMsg
import asib.util.Message
class MockPrivMsg extends PrivMsg {
var lastMessage = new Message
override def handle(line: Message) = {
lastMessage = line
super.handle(line)
}
} | icambridge-old/asib | src/test/scala/asib/mocks/command/MockPrivMsg.scala | Scala | gpl-3.0 | 961 |
package core.guice.injection
import java.lang.annotation.Annotation
import net.codingwell.scalaguice.InjectorExtensions._
import com.google.inject.{Inject, Injector}
object InjectorProvider {
@Inject
var injector: Injector = _
def inject[T: Manifest]: T = {
injector.instance[T]
}
def inject[T: Manifest](ann: Annotation): T = {
injector.instance[T](ann)
}
}
| sysgears/apollo-universal-starter-kit | modules/core/server-scala/src/main/scala/core/guice/injection/InjectorProvider.scala | Scala | mit | 385 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.convert
/**
* Conversions between yaidom nodes and DOM nodes.
*
* These conversions are used in implementations of yaidom XML parsers and printers. They are also useful
* in application code. One scenario in which these conversions are useful is as follows:
* {{{
* val dbf = DocumentBuilderFactory.newInstance()
* val db = dbf.newDocumentBuilder
* val domDoc = db.parse(inputFile)
*
* editDomTreeInPlace(domDoc)
*
* val doc = DomConversions.convertToDocument(domDoc)
*
* useImmutableDoc(doc)
* }}}
*
* @author Chris de Vreeze
*/
object DomConversions extends YaidomToDomConversions with DomToYaidomConversions
| dvreeze/yaidom | jvm/src/main/scala/eu/cdevreeze/yaidom/convert/DomConversions.scala | Scala | apache-2.0 | 1,259 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <v.primault@ucl.ac.uk>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.sparkle
import com.google.common.base.MoreObjects
import fr.cnrs.liris.util.random.XORShiftRandom
import scala.util.Random
private[sparkle] class SampleDataFrame[T](inner: DataFrame[T], fraction: Double, seed: Long)
extends DataFrame[T] {
require(fraction >= 0.0, s"Negative fraction value: $fraction")
private[this] val seeds = {
val random = new Random(seed)
inner.keys.map(key => key -> random.nextLong).toMap
}
override def toString: String = MoreObjects.toStringHelper(this).addValue(inner).toString
override private[sparkle] def keys: Seq[String] = inner.keys
override private[sparkle] def load(key: String): Iterable[T] = {
val elements = inner.load(key)
if (fraction <= 0.0) {
Iterable.empty
} else if (fraction >= 1.0) {
elements
} else {
val rng = new XORShiftRandom(seeds(key))
elements.filter(_ => rng.nextDouble() <= fraction)
}
}
override private[sparkle] def env = inner.env
override private[sparkle] def encoder = inner.encoder
} | privamov/accio | accio/java/fr/cnrs/liris/sparkle/SampleDataFrame.scala | Scala | gpl-3.0 | 1,814 |
package models.db
import models.join.MasterRemodelWithName
import scalikejdbc._
import com.ponkotuy.data
import scala.util.Try
case class MasterRemodel(
slotitemId: Int,
slotitemLevel: Int,
secondShipId: Int,
develop: Int,
remodel: Int,
certainDevelop: Int,
certainRemodel: Int,
useSlotitemId: Int,
useSlotitemNum: Int,
changeFlag: Boolean) {
def save()(implicit session: DBSession = MasterRemodel.autoSession): MasterRemodel = MasterRemodel.save(this)(session)
def destroy()(implicit session: DBSession = MasterRemodel.autoSession): Unit = MasterRemodel.destroy(this)(session)
def sumKit: Int = develop + remodel + certainDevelop + certainRemodel + useSlotitemNum
}
object MasterRemodel extends SQLSyntaxSupport[MasterRemodel] {
override val tableName = "master_remodel"
override val columns = Seq("slotitem_id", "slotitem_level", "second_ship_id", "develop", "remodel", "certain_develop", "certain_remodel", "use_slotitem_id", "use_slotitem_num", "change_flag")
def apply(mr: SyntaxProvider[MasterRemodel])(rs: WrappedResultSet): MasterRemodel = apply(mr.resultName)(rs)
def apply(mr: ResultName[MasterRemodel])(rs: WrappedResultSet): MasterRemodel = autoConstruct(rs, mr)
val mr = MasterRemodel.syntax("mr")
val msi1 = MasterSlotItem.syntax("msi1")
val msi2 = MasterSlotItem.syntax("msi2")
val ms = MasterShipBase.syntax("ms")
override val autoSession = AutoSession
def find(slotitemId: Int, slotitemLevel: Int, secondShipId: Int)(implicit session: DBSession = autoSession): Option[MasterRemodel] = {
withSQL {
select.from(MasterRemodel as mr)
.where.eq(mr.slotitemId, slotitemId)
.and.eq(mr.slotitemLevel, slotitemLevel)
.and.eq(mr.secondShipId, secondShipId)
}.map(MasterRemodel(mr.resultName)).single().apply()
}
def findAll()(implicit session: DBSession = autoSession): List[MasterRemodel] = {
withSQL(select.from(MasterRemodel as mr)).map(MasterRemodel(mr.resultName)).list().apply()
}
def countAll()(implicit session: DBSession = autoSession): Long = {
withSQL(select(sqls"count(1)").from(MasterRemodel as mr)).map(rs => rs.long(1)).single().apply().get
}
def findAllBy(where: SQLSyntax)(implicit session: DBSession = autoSession): List[MasterRemodel] = {
withSQL {
select.from(MasterRemodel as mr).where.append(sqls"${where}").orderBy(mr.slotitemLevel)
}.map(MasterRemodel(mr.resultName)).list().apply()
}
def findAllByWithName(where: SQLSyntax)(implicit session: DBSession = autoSession): List[MasterRemodelWithName] = {
withSQL {
select.from(MasterRemodel as mr)
.innerJoin(MasterSlotItem as msi1).on(mr.slotitemId, msi1.id)
.leftJoin(MasterSlotItem as msi2).on(mr.useSlotitemId, msi2.id)
.innerJoin(MasterShipBase as ms).on(mr.secondShipId, ms.id)
.where(where).orderBy(mr.slotitemLevel)
}.map { rs =>
val use = Try { MasterSlotItem(msi2)(rs) }.toOption
MasterRemodelWithName(MasterRemodel(mr)(rs), MasterSlotItem(msi1)(rs), use, MasterShipBase(ms)(rs))
}.list().apply()
}
def countBy(where: SQLSyntax)(implicit session: DBSession = autoSession): Long = {
withSQL {
select(sqls"count(1)").from(MasterRemodel as mr).where.append(sqls"${where}")
}.map(_.long(1)).single().apply().get
}
/** @return 処理しなかったときはfalseが返る */
def createFromData(x: data.master.MasterRemodel, memberId: Long)(implicit session: DBSession = autoSession): Boolean = {
val isExec = for {
item <- SlotItem.find(x.origSlotId, memberId)
secondShip <- Ship.find(memberId, x.secondShipId)
} yield {
val orig = find(item.slotitemId, item.level, secondShip.shipId)
// 間違えてLevelが低い状態でmasterを登録した可能性があるので、元のデータを消しておく
val isDestroy = orig.filter(_.sumKit > x.sumKit).map(_.destroy()).isDefined
if(orig.isEmpty || isDestroy) {
create(
item.slotitemId,
item.level,
secondShip.shipId,
x.develop,
x.remodel,
x.certainDevelop,
x.certainRemodel,
x.slotitemId,
x.slotitemNum,
x.changeFlag
)
true
} else false
}
isExec.getOrElse(false)
}
def create(
slotitemId: Int,
slotitemLevel: Int,
secondShipId: Int,
develop: Int,
remodel: Int,
certainDevelop: Int,
certainRemodel: Int,
useSlotitemId: Int,
useSlotitemNum: Int,
changeFlag: Boolean)(implicit session: DBSession = autoSession): MasterRemodel = {
withSQL {
insert.into(MasterRemodel).columns(
column.slotitemId,
column.slotitemLevel,
column.secondShipId,
column.develop,
column.remodel,
column.certainDevelop,
column.certainRemodel,
column.useSlotitemId,
column.useSlotitemNum,
column.changeFlag
).values(
slotitemId,
slotitemLevel,
secondShipId,
develop,
remodel,
certainDevelop,
certainRemodel,
useSlotitemId,
useSlotitemNum,
changeFlag
)
}.update().apply()
MasterRemodel(
slotitemId = slotitemId,
slotitemLevel = slotitemLevel,
secondShipId = secondShipId,
develop = develop,
remodel = remodel,
certainDevelop = certainDevelop,
certainRemodel = certainRemodel,
useSlotitemId = useSlotitemId,
useSlotitemNum = useSlotitemNum,
changeFlag = changeFlag)
}
def save(entity: MasterRemodel)(implicit session: DBSession = autoSession): MasterRemodel = {
withSQL {
update(MasterRemodel).set(
column.slotitemId -> entity.slotitemId,
column.slotitemLevel -> entity.slotitemLevel,
column.secondShipId -> entity.secondShipId,
column.develop -> entity.develop,
column.remodel -> entity.remodel,
column.certainDevelop -> entity.certainDevelop,
column.certainRemodel -> entity.certainRemodel,
column.useSlotitemId -> entity.useSlotitemId,
column.useSlotitemNum -> entity.useSlotitemNum,
column.changeFlag -> entity.changeFlag
).where.eq(column.slotitemId, entity.slotitemId).and.eq(column.slotitemLevel, entity.slotitemLevel)
}.update().apply()
entity
}
def destroy(entity: MasterRemodel)(implicit session: DBSession = autoSession): Unit = {
withSQL {
delete.from(MasterRemodel).where.eq(column.slotitemId, entity.slotitemId).and.eq(column.slotitemLevel, entity.slotitemLevel)
}.update().apply()
}
}
| b-wind/MyFleetGirls | server/app/models/db/MasterRemodel.scala | Scala | mit | 6,711 |
/*
* Copyright 2017 Simeon Simeonov and Swoop, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.swoop.scala_util
/** Simple utility for managing bit flags based on an `Int` (32 bits).
* The implementation uses mutable state.
*
* @param value initial value for the bit flags
*/
class BitFlags(protected var value: Int = 0) extends Serializable {
/** Sets the value to 0.
*
* @return the updated object
*/
def clear(): this.type = {
value = 0
this
}
/** Adds the 1 bits from the provided mask to the value.
*
* @param mask the bit mask to add (using bit OR)
* @return the updated object
*/
def add(mask: Int): this.type = {
value |= mask
this
}
/** Clears 1 the bits from the provided mask from the value.
*
* @param mask the bit mask whose 1 bits will be added
* @return the updated object
*/
def remove(mask: Int): this.type = {
value &= (~mask)
this
}
/** Returns `true` if all 1 bits in the provided mask are set to 1 in the value.
*
* @param mask the mask to check
*/
def containsAll(mask: Int): Boolean =
(value & mask) == mask
/** Returns `true` if none of the 1 bits in the provided mask are set to 1 in the value.
*
* @param mask the mask to check
*/
def containsNone(mask: Int): Boolean =
(value & mask) == 0
/** Returns `true` if some of the 1 bits in the provided mask are set to 1 in the value.
*
* @param mask the mask to check
*/
def containsSome(mask: Int): Boolean =
(value & mask) != 0
/** Returns the underlying value. */
def toInt: Int = value
}
| swoop-inc/spark-records | src/main/scala/com/swoop/scala_util/BitFlags.scala | Scala | apache-2.0 | 2,163 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer (t3l@threelights.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules.jvm
import edu.latrobe._
import edu.latrobe.blaze.modules.{ReQU, ReQUBuilder}
abstract class ReQU_JVM
extends ReQU
with MapLayer_JVM[ReQUBuilder] {
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
final override protected def doPredict(input: Tensor)
: RealArrayTensor = {
val out = input.toRealArrayTensor
doPredict(out)
out
}
protected def doPredict(output: RealArrayTensor): Unit
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
final override protected def doDeriveInputError(input: Tensor,
error: Tensor)
: RealArrayTensor = {
val inp = input.asOrToRealArrayTensor
val err = error.asOrToRealArrayTensor
doDeriveInputError(inp, err)
if (inp ne input) {
inp.close()
}
err
}
protected def doDeriveInputError(input: RealArrayTensor,
error: RealArrayTensor)
: Unit
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/jvm/ReQU_JVM.scala | Scala | apache-2.0 | 1,939 |
package com.sksamuel.elastic4s.streams
import akka.actor.ActorRefFactory
import com.sksamuel.elastic4s.{ElasticClient, ElasticDsl, IndexAndTypes, SearchDefinition}
import scala.concurrent.duration._
import scala.language.implicitConversions
object ReactiveElastic {
implicit class ReactiveElastic(client: ElasticClient) {
import ElasticDsl._
def subscriber[T](config: SubscriberConfig)
(implicit builder: RequestBuilder[T], actorRefFactory: ActorRefFactory): BulkIndexingSubscriber[T] = {
new BulkIndexingSubscriber[T](client, builder, config)
}
def subscriber[T](batchSize: Int = 100,
concurrentRequests: Int = 5,
refreshAfterOp: Boolean = false,
listener: ResponseListener = ResponseListener.noop,
completionFn: () => Unit = () => (),
errorFn: Throwable => Unit = _ => (),
flushInterval: Option[FiniteDuration] = None,
flushAfter: Option[FiniteDuration] = None,
failureWait: FiniteDuration = 2.seconds,
maxAttempts: Int = 5)
(implicit builder: RequestBuilder[T], actorRefFactory: ActorRefFactory): BulkIndexingSubscriber[T] = {
val config = SubscriberConfig(
batchSize = batchSize,
concurrentRequests = concurrentRequests,
refreshAfterOp = refreshAfterOp,
listener = listener,
completionFn = completionFn,
errorFn = errorFn,
failureWait = failureWait,
flushInterval = flushInterval,
flushAfter = flushAfter,
maxAttempts = maxAttempts
)
subscriber(config)
}
def publisher(indexType: IndexAndTypes, elements: Long = Long.MaxValue, keepAlive: String = "1m")
(implicit actorRefFactory: ActorRefFactory): ScrollPublisher = {
publisher(search in indexType query "*:*" scroll keepAlive)
}
def publisher(q: SearchDefinition)(implicit actorRefFactory: ActorRefFactory): ScrollPublisher = publisher(q, Long.MaxValue)
def publisher(q: SearchDefinition, elements: Long)
(implicit actorRefFactory: ActorRefFactory): ScrollPublisher = {
new ScrollPublisher(client, q, elements)
}
}
}
| sjoerdmulder/elastic4s | elastic4s-streams/src/main/scala/com/sksamuel/elastic4s/streams/ReactiveElastic.scala | Scala | apache-2.0 | 2,318 |
package org.bitcoins.crypto
import scodec.bits.ByteVector
import java.math.BigInteger
/** Represents a point on the secp256k1 elliptic curve. */
sealed trait SecpPoint extends NetworkElement {
/** Returns the group sum of this point and the input. */
def add(point: SecpPoint): SecpPoint = {
CryptoUtil.add(this, point)
}
}
/** The point at infinity, this is the secp256k1 group identity element meaning
* p + 0x00 = 0x00 + p = p for any point p and
* p + (-p) = 0x00.
*
* Note that this does not correspond to a valid ECPublicKey just like
* FieldElement.zero does not correspond to a valid private key (and in fact
* 0x00 = FieldElement.zero*G).
*/
case object SecpPointInfinity extends SecpPoint {
override val bytes: ByteVector = ByteVector(0x00)
}
/** A non-identity point, (x, y), on the secp256k1 elliptic curve.
*/
case class SecpPointFinite(x: CurveCoordinate, y: CurveCoordinate)
extends SecpPoint {
override def bytes: ByteVector = {
ByteVector(0x04) ++ x.bytes ++ y.bytes
}
def toPublicKey: ECPublicKey = {
ECPublicKey(bytes)
}
}
object SecpPoint {
def fromPublicKey(key: ECPublicKey): SecpPointFinite = {
val (x, y) = key.decompressedBytes.tail.splitAt(32)
SecpPointFinite(CurveCoordinate.fromBytes(x), CurveCoordinate.fromBytes(y))
}
def apply(x: ByteVector, y: ByteVector): SecpPointFinite =
SecpPointFinite(CurveCoordinate.fromBytes(x), CurveCoordinate.fromBytes(y))
def apply(x: Array[Byte], y: Array[Byte]): SecpPointFinite =
SecpPointFinite(CurveCoordinate.fromByteArray(x),
CurveCoordinate.fromByteArray(y))
def apply(x: BigInteger, y: BigInteger): SecpPointFinite =
SecpPointFinite(CurveCoordinate(x), CurveCoordinate(y))
def apply(x: BigInt, y: BigInt): SecpPointFinite =
SecpPointFinite(CurveCoordinate(x), CurveCoordinate(y))
def apply(x: String, y: String): SecpPointFinite =
SecpPointFinite(CurveCoordinate.fromHex(x), CurveCoordinate.fromHex(y))
}
| bitcoin-s/bitcoin-s | crypto/src/main/scala/org/bitcoins/crypto/SecpPoint.scala | Scala | mit | 2,000 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import models._
import views._
import anorm._
import anorm.SqlParser._
import java.io.File
import play.libs.Json;
import play.Logger
object Users extends Controller with Secured{
def list = IsAuthenticated{user => implicit request =>
Ok(views.html.admin.user.list(User.list))
}
def init = Action { implicit request =>
myForm.bindFromRequest.fold(
errors => BadRequest(views.html.admin.user.init(errors, 0)),
values => {
User.insertOrUpdate(values)
Ok("Hello World")
}
)
}
def add = IsAuthenticated{user => _ =>
Ok(views.html.admin.user.edit(myForm, 0))
}
def insert = IsAuthenticated{user => implicit request =>
myForm.bindFromRequest.fold(
errors => BadRequest(views.html.admin.user.edit(errors, 0)),
values => {
User.insertOrUpdate(values)
Redirect(routes.Users.list)
}
)
}
def edit(id: Long) = IsAuthenticated{user => _ =>
Ok(views.html.admin.user.edit(myForm.fill(User.edit(id)), id))
}
def update(id: Long) = IsAuthenticated{user => implicit request =>
myForm.bindFromRequest.fold(
errors => BadRequest(views.html.admin.user.edit(errors, id)),
values => {
User.insertOrUpdate(values, id)
Redirect(routes.Users.list)
}
)
}
def delete(id: Long) = IsAuthenticated{user => _ =>
User.delete(id)
Redirect(routes.Users.list)
}
val myForm = {
import java.util.Date
Form(
mapping(
"id" -> ignored(NotAssigned:Pk[Long]),
"name" -> nonEmptyText,
"email" -> email
)
(User_e.apply)(User_e.unapply)
)}
val passwordForm = Form(
tuple(
"password" -> nonEmptyText,
"password2" -> nonEmptyText
) verifying("passwords not identical", result => result match{
case (p1, p2) => p1 == p2
})
)
def changePassword = IsAuthenticated{user => _ =>
Ok(views.html.admin.user.changePassword(passwordForm))
}
def insertPassword = IsAuthenticated{user => implicit request =>
passwordForm.bindFromRequest.fold(
errors => Ok(views.html.admin.user.changePassword(errors)),
values => {
User.changePassword(user, values._1)
Redirect(routes.Users.list)
.flashing(
"success" -> "password.changed.successfully"
)
}
)
}
} | musethno/MGS | app/controllers/Users.scala | Scala | mit | 2,362 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, AttributeSet}
import org.apache.spark.sql.catalyst.optimizer.SimpleTestOptimizer
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.logical.{EventTimeWatermark, Filter, LeafNode}
import org.apache.spark.sql.types.{IntegerType, MetadataBuilder, TimestampType}
class StreamingJoinHelperSuite extends AnalysisTest {
test("extract watermark from time condition") {
val attributesToFindConstraintFor = Seq(
AttributeReference("leftTime", TimestampType)(),
AttributeReference("leftOther", IntegerType)())
val metadataWithWatermark = new MetadataBuilder()
.putLong(EventTimeWatermark.delayKey, 1000)
.build()
val attributesWithWatermark = Seq(
AttributeReference("rightTime", TimestampType, metadata = metadataWithWatermark)(),
AttributeReference("rightOther", IntegerType)())
case class DummyLeafNode() extends LeafNode {
override def output: Seq[Attribute] =
attributesToFindConstraintFor ++ attributesWithWatermark
}
def watermarkFrom(
conditionStr: String,
rightWatermark: Option[Long] = Some(10000)): Option[Long] = {
val conditionExpr = Some(conditionStr).map { str =>
val plan =
Filter(
CatalystSqlParser.parseExpression(str),
DummyLeafNode())
val optimized = SimpleTestOptimizer.execute(SimpleAnalyzer.execute(plan))
optimized.asInstanceOf[Filter].condition
}
StreamingJoinHelper.getStateValueWatermark(
AttributeSet(attributesToFindConstraintFor), AttributeSet(attributesWithWatermark),
conditionExpr, rightWatermark)
}
// Test comparison directionality. E.g. if leftTime < rightTime and rightTime > watermark,
// then cannot define constraint on leftTime.
assert(watermarkFrom("leftTime > rightTime") === Some(10000))
assert(watermarkFrom("leftTime >= rightTime") === Some(9999))
assert(watermarkFrom("leftTime < rightTime") === None)
assert(watermarkFrom("leftTime <= rightTime") === None)
assert(watermarkFrom("rightTime > leftTime") === None)
assert(watermarkFrom("rightTime >= leftTime") === None)
assert(watermarkFrom("rightTime < leftTime") === Some(10000))
assert(watermarkFrom("rightTime <= leftTime") === Some(9999))
// Test type conversions
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS LONG)") === Some(10000))
assert(watermarkFrom("CAST(leftTime AS LONG) < CAST(rightTime AS LONG)") === None)
assert(watermarkFrom("CAST(leftTime AS DOUBLE) > CAST(rightTime AS DOUBLE)") === Some(10000))
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS DOUBLE)") === Some(10000))
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS FLOAT)") === Some(10000))
assert(watermarkFrom("CAST(leftTime AS DOUBLE) > CAST(rightTime AS FLOAT)") === Some(10000))
assert(watermarkFrom("CAST(leftTime AS STRING) > CAST(rightTime AS STRING)") === None)
// Test with timestamp type + calendar interval on either side of equation
// Note: timestamptype and calendar interval don't commute, so less valid combinations to test.
assert(watermarkFrom("leftTime > rightTime + interval 1 second") === Some(11000))
assert(watermarkFrom("leftTime + interval 2 seconds > rightTime ") === Some(8000))
assert(watermarkFrom("leftTime > rightTime - interval 3 second") === Some(7000))
assert(watermarkFrom("rightTime < leftTime - interval 3 second") === Some(13000))
assert(watermarkFrom("rightTime - interval 1 second < leftTime - interval 3 second")
=== Some(12000))
assert(watermarkFrom("leftTime > rightTime + interval '0 00:00:01' day to second")
=== Some(11000))
assert(watermarkFrom("leftTime + interval '00:00:02' hour to second > rightTime ")
=== Some(8000))
assert(watermarkFrom("leftTime > rightTime - interval '00:03' minute to second")
=== Some(7000))
assert(watermarkFrom("rightTime < leftTime - interval '1 20:30:40' day to second")
=== Some(160250000))
assert(watermarkFrom(
"rightTime - interval 1 second < leftTime - interval '20:15:32' hour to second")
=== Some(72941000))
// Test with casted long type + constants on either side of equation
// Note: long type and constants commute, so more combinations to test.
// -- Constants on the right
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS LONG) + 1") === Some(11000))
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS LONG) - 1") === Some(9000))
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST((rightTime + interval 1 second) AS LONG)")
=== Some(11000))
assert(watermarkFrom("CAST(leftTime AS LONG) > 2 + CAST(rightTime AS LONG)") === Some(12000))
assert(watermarkFrom("CAST(leftTime AS LONG) > -0.5 + CAST(rightTime AS LONG)") === Some(9500))
assert(watermarkFrom("CAST(leftTime AS LONG) - CAST(rightTime AS LONG) > 2") === Some(12000))
assert(watermarkFrom("-CAST(rightTime AS DOUBLE) + CAST(leftTime AS LONG) > 0.1")
=== Some(10100))
assert(watermarkFrom("0 > CAST(rightTime AS LONG) - CAST(leftTime AS LONG) + 0.2")
=== Some(10200))
// -- Constants on the left
assert(watermarkFrom("CAST(leftTime AS LONG) + 2 > CAST(rightTime AS LONG)") === Some(8000))
assert(watermarkFrom("1 + CAST(leftTime AS LONG) > CAST(rightTime AS LONG)") === Some(9000))
assert(watermarkFrom("CAST((leftTime + interval 3 second) AS LONG) > CAST(rightTime AS LONG)")
=== Some(7000))
assert(watermarkFrom("CAST(leftTime AS LONG) - 2 > CAST(rightTime AS LONG)") === Some(12000))
assert(watermarkFrom("CAST(leftTime AS LONG) + 0.5 > CAST(rightTime AS LONG)") === Some(9500))
assert(watermarkFrom("CAST(leftTime AS LONG) - CAST(rightTime AS LONG) - 2 > 0")
=== Some(12000))
assert(watermarkFrom("-CAST(rightTime AS LONG) + CAST(leftTime AS LONG) - 0.1 > 0")
=== Some(10100))
// -- Constants on both sides, mixed types
assert(watermarkFrom("CAST(leftTime AS LONG) - 2.0 > CAST(rightTime AS LONG) + 1")
=== Some(13000))
// Test multiple conditions, should return minimum watermark
assert(watermarkFrom(
"leftTime > rightTime - interval 3 second AND rightTime < leftTime + interval 2 seconds") ===
Some(7000)) // first condition wins
assert(watermarkFrom(
"leftTime > rightTime - interval 3 second AND rightTime < leftTime + interval 4 seconds") ===
Some(6000)) // second condition wins
// Test invalid comparisons
assert(watermarkFrom("cast(leftTime AS LONG) > leftOther") === None) // non-time attributes
assert(watermarkFrom("leftOther > rightOther") === None) // non-time attributes
assert(watermarkFrom("leftOther > rightOther AND leftTime > rightTime") === Some(10000))
assert(watermarkFrom("cast(rightTime AS DOUBLE) < rightOther") === None) // non-time attributes
assert(watermarkFrom("leftTime > rightTime + interval 1 month") === None) // month not allowed
// Test static comparisons
assert(watermarkFrom("cast(leftTime AS LONG) > 10") === Some(10000))
// Test non-positive results
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS LONG) - 10") === Some(0))
assert(watermarkFrom("CAST(leftTime AS LONG) > CAST(rightTime AS LONG) - 100") === Some(-90000))
}
}
| mahak/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/StreamingJoinHelperSuite.scala | Scala | apache-2.0 | 8,361 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.lsp.api.commands
import org.ensime.lsp.api.types._
import spray.json._
import scalaz.deriving
object TextDocumentSyncKind {
/**
* Documents should not be synced at all.
*/
final val None = 0
/**
* Documents are synced by always sending the full content
* of the document.
*/
final val Full = 1
/**
* Documents are synced by sending the full content on open.
* After that only incremental updates to the document are
* send.
*/
final val Incremental = 2
}
object MessageType {
/** An error message. */
final val Error = 1
/** A warning message. */
final val Warning = 2
/** An information message. */
final val Info = 3
/** A log message. */
final val Log = 4
}
sealed trait Message
sealed trait ServerCommand extends Message
sealed trait ClientCommand extends Message
sealed trait Response extends Message
sealed trait ResultResponse extends Response
sealed trait Notification extends Message
/**
* Parameters and types used in the `initialize` message.
*/
@deriving(JsReader, JsWriter)
final case class InitializeParams(
// The process Id of the parent process that started the server.
processId: Long,
//The rootPath of the workspace. Is null if no folder is open.
rootPath: String,
//The capabilities provided by the client (editor)
capabilities: ClientCapabilities)
extends ServerCommand
final case class InitializeError(retry: Boolean)
@deriving(JsReader, JsWriter)
final case class ClientCapabilities()
@deriving(JsReader, JsWriter)
final case class ServerCapabilities(
//Defines how text documents are synced.
textDocumentSync: Int = TextDocumentSyncKind.Full,
//The server provides hover support.
hoverProvider: Boolean = false,
//The server provides completion support.
completionProvider: Option[CompletionOptions],
//The server provides signature help support.
signatureHelpProvider: Option[SignatureHelpOptions] = None,
//The server provides goto definition support.
definitionProvider: Boolean = false,
///The server provides find references support.
referencesProvider: Boolean = false,
//The server provides document highlight support.
documentHighlightProvider: Boolean = false,
//The server provides document symbol support.
documentSymbolProvider: Boolean = false,
//The server provides workspace symbol support.
workspaceSymbolProvider: Boolean = false,
//The server provides code actions.
codeActionProvider: Boolean = false,
//The server provides code lens.
codeLensProvider: Option[CodeLensOptions] = None,
//The server provides document formatting.
documentFormattingProvider: Boolean = false,
//The server provides document range formatting.
documentRangeFormattingProvider: Boolean = false,
//The server provides document formatting on typing.
documentOnTypeFormattingProvider: Option[DocumentOnTypeFormattingOptions] =
None,
//The server provides rename support.
renameProvider: Boolean = false
)
@deriving(JsReader, JsWriter)
final case class CompletionOptions(resolveProvider: Boolean,
triggerCharacters: Seq[String])
@deriving(JsReader, JsWriter)
final case class SignatureHelpOptions(triggerCharacters: Seq[String])
@deriving(JsReader, JsWriter)
final case class CodeLensOptions(resolveProvider: Boolean = false)
@deriving(JsReader, JsWriter)
final case class DocumentOnTypeFormattingOptions(
firstTriggerCharacter: String,
moreTriggerCharacters: Seq[String]
)
@deriving(JsReader, JsWriter)
final case class CompletionList(isIncomplete: Boolean,
items: Seq[CompletionItem])
extends ResultResponse
@deriving(JsReader, JsWriter)
final case class InitializeResult(capabilities: ServerCapabilities)
extends ResultResponse
@deriving(JsReader, JsWriter)
final case class Shutdown() extends ServerCommand
final case class ShutdownResult(dummy: Int) extends ResultResponse
@deriving(JsReader, JsWriter)
final case class ShowMessageRequestParams(
//The message type. @see MessageType
tpe: Long,
//The actual message
message: String,
//The message action items to present.
actions: Seq[MessageActionItem])
extends ClientCommand
/**
* A short title like 'Retry', 'Open Log' etc.
*/
@deriving(JsReader, JsWriter)
final case class MessageActionItem(title: String)
@deriving(JsReader, JsWriter)
final case class TextDocumentPositionParams(
textDocument: TextDocumentIdentifier,
position: Position
)
@deriving(JsReader, JsWriter)
final case class DocumentSymbolParams(textDocument: TextDocumentIdentifier)
extends ServerCommand
final case class TextDocumentCompletionRequest(
params: TextDocumentPositionParams
) extends ServerCommand
object TextDocumentCompletionRequest {
implicit val jsWriter: JsWriter[TextDocumentCompletionRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentCompletionRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentCompletionRequest(_))
}
final case class TextDocumentDefinitionRequest(
params: TextDocumentPositionParams
) extends ServerCommand
object TextDocumentDefinitionRequest {
implicit val jsWriter: JsWriter[TextDocumentDefinitionRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentDefinitionRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentDefinitionRequest(_))
}
final case class TextDocumentHoverRequest(params: TextDocumentPositionParams)
extends ServerCommand
object TextDocumentHoverRequest {
implicit val jsWriter: JsWriter[TextDocumentHoverRequest] =
JsWriter[TextDocumentPositionParams].contramap(_.params)
implicit val jsReader: JsReader[TextDocumentHoverRequest] =
JsReader[TextDocumentPositionParams].map(TextDocumentHoverRequest(_))
}
@deriving(JsReader, JsWriter)
final case class Hover(contents: Seq[MarkedString], range: Option[Range])
extends ResultResponse
///////////////////////////// Notifications ///////////////////////////////
// From server to client
@deriving(JsReader, JsWriter)
final case class ShowMessageParams(tpe: Int, message: String)
extends Notification
@deriving(JsReader, JsWriter)
final case class LogMessageParams(tpe: Int, message: String)
extends Notification
@deriving(JsReader, JsWriter)
final case class PublishDiagnostics(uri: String, diagnostics: Seq[Diagnostic])
extends Notification
// from client to server
final case class ExitNotification() extends Notification
@deriving(JsReader, JsWriter)
final case class DidOpenTextDocumentParams(textDocument: TextDocumentItem)
extends Notification
@deriving(JsReader, JsWriter)
final case class DidChangeTextDocumentParams(
textDocument: VersionedTextDocumentIdentifier,
contentChanges: Seq[TextDocumentContentChangeEvent]
) extends Notification
@deriving(JsReader, JsWriter)
final case class DidCloseTextDocumentParams(
textDocument: TextDocumentIdentifier
) extends Notification
@deriving(JsReader, JsWriter)
final case class DidSaveTextDocumentParams(textDocument: TextDocumentIdentifier)
extends Notification
@deriving(JsReader, JsWriter)
final case class DidChangeWatchedFiles(changes: Seq[FileEvent])
extends Notification
@deriving(JsReader, JsWriter)
final case class Initialized() extends Notification
@deriving(JsReader, JsWriter)
final case class CancelRequest(id: Int) extends Notification
@deriving(JsReader, JsWriter)
final case class FileEvent(uri: String, `type`: Int)
object FileChangeType {
final val Created = 1
final val Changed = 2
final val Deleted = 3
}
final case class DocumentSymbolResult(params: Seq[SymbolInformation])
extends ResultResponse
object DocumentSymbolResult {
implicit val jsWriter: JsWriter[DocumentSymbolResult] =
JsWriter[Seq[SymbolInformation]].contramap(_.params)
implicit val jsReader: JsReader[DocumentSymbolResult] =
JsReader[Seq[SymbolInformation]].map(DocumentSymbolResult(_))
}
final case class DefinitionResult(params: Seq[Location]) extends ResultResponse
object DefinitionResult {
implicit val jsWriter: JsWriter[DefinitionResult] =
JsWriter[Seq[Location]].contramap(_.params)
implicit val jsReader: JsReader[DefinitionResult] =
JsReader[Seq[Location]].map(DefinitionResult(_))
}
| fommil/ensime-server | lsp/src/main/scala/org/ensime/lsp/api/commands.scala | Scala | gpl-3.0 | 8,905 |
package org.automanlang.core.question
import org.automanlang.core.question.confidence.ConfidenceInterval
object Dim {
def mean(X: Seq[Double]) = X.sum / X.length
def apply(id: Symbol,
confidence_interval: ConfidenceInterval,
min: Option[Double] = None,
max: Option[Double] = None,
estimator: Seq[Double] => Double = mean
) : Dimension = {
Dimension(id, confidence_interval, min, max, estimator)
}
}
case class Dimension(id: Symbol,
confidence_interval: ConfidenceInterval,
min: Option[Double],
max: Option[Double],
estimator: Seq[Double] => Double) | dbarowy/AutoMan | libautoman/src/main/scala/org/automanlang/core/question/Dimension.scala | Scala | gpl-2.0 | 704 |
package com.twitter.hashing
import _root_.java.io.{BufferedReader, InputStreamReader}
import scala.collection.mutable
import org.junit.runner.RunWith
import org.scalatest.WordSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KetamaDistributorTest extends WordSpec {
"KetamaDistributor" should {
val nodes = Seq(
KetamaNode("10.0.1.1", 600, 1),
KetamaNode("10.0.1.2", 300, 2),
KetamaNode("10.0.1.3", 200, 3),
KetamaNode("10.0.1.4", 350, 4),
KetamaNode("10.0.1.5", 1000, 5),
KetamaNode("10.0.1.6", 800, 6),
KetamaNode("10.0.1.7", 950, 7),
KetamaNode("10.0.1.8", 100, 8)
)
// 160 is the hard coded value for libmemcached, which was this input data is from
val ketamaDistributor = new KetamaDistributor(nodes, 160)
val ketamaDistributorInoldLibMemcachedVersionComplianceMode = new KetamaDistributor(nodes, 160, true)
"pick the correct node with ketama hash function" in {
// Test from Smile's KetamaNodeLocatorSpec.scala
// Load known good results (key, hash(?), continuum ceiling(?), IP)
val stream = getClass.getClassLoader.getResourceAsStream("ketama_results")
val reader = new BufferedReader(new InputStreamReader(stream))
val expected = new mutable.ListBuffer[Array[String]]
var line: String = null
do {
line = reader.readLine
if (line != null) {
val segments = line.split(" ")
assert(segments.length === 4)
expected += segments
}
} while (line != null)
assert(expected.size === 99)
// Test that ketamaClient.clientOf(key) == expected IP
val handleToIp = nodes.map { n => n.handle -> n.identifier }.toMap
for (testcase <- expected) {
val hash = KeyHasher.KETAMA.hashKey(testcase(0).getBytes)
val handle = ketamaDistributor.nodeForHash(hash)
val handle2 = ketamaDistributorInoldLibMemcachedVersionComplianceMode.nodeForHash(hash)
val resultIp = handleToIp(handle)
val resultIp2 = handleToIp(handle2)
assert(testcase(3) === resultIp)
assert(testcase(3) === resultIp2)
}
}
"pick the correct node with 64-bit hash values" in {
val knownGoodValues = Map(
-166124121512512L -> 5,
8796093022208L -> 3,
4312515125124L -> 2,
-8192481414141L -> 1,
-9515121512312L -> 5
)
knownGoodValues foreach { case (key, node) =>
val handle = ketamaDistributor.nodeForHash(key)
assert(handle === node)
}
}
}
}
| travisbrown/util | util-hashing/src/test/scala/com/twitter/hashing/KetamaDistributorTest.scala | Scala | apache-2.0 | 2,586 |
package scredis.protocol
import com.typesafe.scalalogging.LazyLogging
import akka.actor.ActorRef
import akka.util.ByteString
import scredis._
import scredis.exceptions._
import scredis.serialization.UTF8StringReader
import scredis.util.BufferPool
import scala.collection.mutable.{ ArrayBuilder, ListBuffer, Stack }
import scala.util.{ Try, Success, Failure }
import scala.concurrent.{ ExecutionContext, Future, Promise, Await }
import scala.concurrent.duration.Duration
import scala.annotation.tailrec
import java.nio.{ ByteBuffer, CharBuffer }
import java.util.concurrent.Semaphore
import scala.language.higherKinds
/**
* This object implements various aspects of the `Redis` protocol.
*/
object Protocol {
private case class ArrayState(
val size: Int,
var count: Int
) {
def increment(): Unit = count += 1
def isCompleted = (count == size)
}
private val Encoding = "UTF-8"
private val CrByte = '\\r'.toByte
private val CfByte = '\\n'.toByte
private val SimpleStringResponseByte = '+'.toByte
private val ErrorResponseByte = '-'.toByte
private val IntegerResponseByte = ':'.toByte
private val BulkStringResponseByte = '$'.toByte
private val BulkStringResponseLength = 1
private val ArrayResponseByte = '*'.toByte
private val ArrayResponseLength = 1
private val CrLf = "\\r\\n".getBytes(Encoding)
private val CrLfLength = CrLf.length
private val bufferPool = new BufferPool(
maxCapacity = RedisConfigDefaults.Global.EncodeBufferPool.PoolMaxCapacity,
maxBufferSize = RedisConfigDefaults.Global.EncodeBufferPool.BufferMaxSize
)
private val concurrentOpt: Option[(Semaphore, Boolean)] = {
RedisConfigDefaults.Global.MaxConcurrentRequestsOpt.map { concurrent =>
(new Semaphore(30000), true)
}
}
private def aquire(count: Int = 1): Unit = concurrentOpt.foreach {
case (semaphore, true) => semaphore.acquire(count)
case (semaphore, false) => if (!semaphore.tryAcquire(count)) {
throw new Exception("Busy")
}
}
private def parseInt(buffer: ByteBuffer): Int = {
var length: Int = 0
var isPositive = true
var char = buffer.get()
if (char == '-') {
isPositive = false
char = buffer.get()
}
while (char != '\\r') {
length = (length * 10) + (char - '0')
char = buffer.get()
}
// skip \\n
buffer.get()
if (isPositive) {
length
} else {
-length
}
}
private def parseLong(buffer: ByteBuffer): Long = {
var length: Long = 0
var isPositive = true
var char = buffer.get()
if (char == '-') {
isPositive = false
char = buffer.get()
}
while (char != '\\r') {
length = (length * 10) + (char - '0')
char = buffer.get()
}
// skip \\n
buffer.get()
if (isPositive) {
length
} else {
-length
}
}
private def parseString(buffer: ByteBuffer): String = {
val bytes = new ArrayBuilder.ofByte()
var count = 0
var char = buffer.get()
while (char != '\\r') {
bytes += char
char = buffer.get()
}
buffer.get()
new String(bytes.result(), "UTF-8")
}
private def decodeBulkStringResponse(buffer: ByteBuffer): BulkStringResponse = {
val length = parseInt(buffer)
val valueOpt = if (length >= 0) {
val array = new Array[Byte](length)
buffer.get(array)
buffer.get()
buffer.get()
Some(array)
} else {
None
}
BulkStringResponse(valueOpt)
}
private[scredis] def release(): Unit = concurrentOpt.foreach {
case (semaphore, _) => semaphore.release()
}
private[scredis] def releaseBuffer(buffer: ByteBuffer): Unit = bufferPool.release(buffer)
private[scredis] def encodeZeroArgCommand(names: Seq[String]): Array[Byte] = {
val buffer = encode(names)
val bytes = new Array[Byte](buffer.remaining)
buffer.get(bytes)
bufferPool.release(buffer)
bytes
}
private[scredis] def encode(args: Seq[Any]): ByteBuffer = {
val argsSize = args.size.toString.getBytes(Encoding)
var length = ArrayResponseLength + argsSize.length + CrLfLength
val serializedArgs = args.map { arg =>
val serializedArg = arg match {
case null => throw new NullPointerException(args.mkString(" "))
case x: Array[Byte] => x
case x => arg.toString.getBytes(Encoding)
}
val serializedArgSize = serializedArg.length.toString.getBytes(Encoding)
length += ArrayResponseLength +
serializedArgSize.length +
CrLfLength +
serializedArg.length +
CrLfLength
(serializedArg, serializedArgSize)
}
val buffer = bufferPool.acquire(length)
buffer.put(ArrayResponseByte).put(argsSize).put(CrLf)
for ((serializedArg, serializedArgSize) <- serializedArgs) {
buffer
.put(BulkStringResponseByte)
.put(serializedArgSize)
.put(CrLf)
.put(serializedArg)
.put(CrLf)
}
buffer.flip()
buffer
}
private[scredis] def count(buffer: ByteBuffer): Int = {
var char: Byte = 0
var requests = 0
var position = -1
var arrayPosition = -1
val arrayStack = Stack[ArrayState]()
var isFragmented = false
@inline @tailrec
def increment(): Unit = if (arrayStack.isEmpty) {
arrayPosition = -1
requests += 1
} else {
val array = arrayStack.top
array.increment()
if (array.isCompleted) {
arrayStack.pop()
increment()
}
}
@inline
def stop(): Unit = isFragmented = true
while (buffer.remaining > 0 && !isFragmented) {
char = buffer.get()
if (
char == ErrorResponseByte ||
char == SimpleStringResponseByte ||
char == IntegerResponseByte
) {
position = buffer.position - 1
while (buffer.remaining > 0 && char != '\\n') {
char = buffer.get()
}
if (char == '\\n') {
increment()
} else {
stop()
}
} else if (char == BulkStringResponseByte) {
position = buffer.position - 1
try {
val length = parseInt(buffer) match {
case -1 => 0
case x => x + 2
}
if (buffer.remaining >= length) {
buffer.position(buffer.position + length)
increment()
} else {
stop()
}
} catch {
case e: java.nio.BufferUnderflowException => stop()
}
} else if (char == ArrayResponseByte) {
if (arrayStack.isEmpty) {
arrayPosition = buffer.position - 1
}
try {
val length = parseInt(buffer)
if (length <= 0) {
increment()
} else {
arrayStack.push(ArrayState(length, 0))
}
} catch {
case e: java.nio.BufferUnderflowException => stop()
}
}
}
if (!arrayStack.isEmpty) {
isFragmented = true
}
if (isFragmented) {
if (arrayPosition >= 0) {
buffer.position(arrayPosition)
} else {
buffer.position(position)
}
}
requests
}
private[scredis] def decode(buffer: ByteBuffer): Response = buffer.get() match {
case ErrorResponseByte => ErrorResponse(parseString(buffer))
case SimpleStringResponseByte => SimpleStringResponse(parseString(buffer))
case IntegerResponseByte => IntegerResponse(parseLong(buffer))
case BulkStringResponseByte => decodeBulkStringResponse(buffer)
case ArrayResponseByte => ArrayResponse(parseInt(buffer), buffer)
}
private[scredis] def decodePubSubResponse(
response: Response
): Either[ErrorResponse, PubSubMessage] = response match {
case e: ErrorResponse => Left(e)
case a: ArrayResponse => Right {
val vector = a.parsed[Any, Vector] {
case BulkStringResponse(valueOpt) => valueOpt
case IntegerResponse(value) => value.toInt
}
val kind = UTF8StringReader.read(vector(0).asInstanceOf[Option[Array[Byte]]].get)
kind match {
case "subscribe" => {
val channel = UTF8StringReader.read(vector(1).asInstanceOf[Option[Array[Byte]]].get)
val channelsCount = vector(2).asInstanceOf[Int]
PubSubMessage.Subscribe(channel, channelsCount)
}
case "psubscribe" => {
val pattern = UTF8StringReader.read(vector(1).asInstanceOf[Option[Array[Byte]]].get)
val patternsCount = vector(2).asInstanceOf[Int]
PubSubMessage.PSubscribe(pattern, patternsCount)
}
case "unsubscribe" => {
val channelOpt = vector(1).asInstanceOf[Option[Array[Byte]]].map(UTF8StringReader.read)
val channelsCount = vector(2).asInstanceOf[Int]
PubSubMessage.Unsubscribe(channelOpt, channelsCount)
}
case "punsubscribe" => {
val patternOpt = vector(1).asInstanceOf[Option[Array[Byte]]].map(UTF8StringReader.read)
val patternsCount = vector(2).asInstanceOf[Int]
PubSubMessage.PUnsubscribe(patternOpt, patternsCount)
}
case "message" => {
val channel = UTF8StringReader.read(vector(1).asInstanceOf[Option[Array[Byte]]].get)
val message = vector(2).asInstanceOf[Option[Array[Byte]]].get
PubSubMessage.Message(channel, message)
}
case "pmessage" => {
val pattern = UTF8StringReader.read(vector(1).asInstanceOf[Option[Array[Byte]]].get)
val channel = UTF8StringReader.read(vector(2).asInstanceOf[Option[Array[Byte]]].get)
val message = vector(3).asInstanceOf[Option[Array[Byte]]].get
PubSubMessage.PMessage(pattern, channel, message)
}
case x => throw RedisProtocolException(
s"Invalid PubSubMessage type received: $x"
)
}
}
case x => throw RedisProtocolException(s"Invalid PubSubResponse received: $x")
}
private[scredis] def send[A](request: Request[A])(
implicit listenerActor: ActorRef
): Future[A] = {
aquire()
listenerActor ! request
request.future
}
private[scredis] def send[A](transaction: Transaction)(
implicit listenerActor: ActorRef
): Future[Vector[Try[Any]]] = {
aquire(1 + transaction.requests.size)
listenerActor ! transaction
transaction.execRequest.future
}
}
| Livestream/scredis | src/main/scala/scredis/protocol/Protocol.scala | Scala | apache-2.0 | 10,492 |
package dotty.tools.dotc
package transform
import core._
import DenotTransformers._
import Phases.Phase
import Contexts.Context
import SymDenotations.SymDenotation
import Types._
import Symbols._
import SymUtils._
import Constants._
import ast.Trees._
import TreeTransforms._
import NameOps._
import Flags._
import Decorators._
/** Provides the implementations of all getters and setters, introducing
* fields to hold the value accessed by them.
* TODO: Make LazyVals a part of this phase?
*
* <accessor> <stable> <mods> def x(): T = e
* --> private val x: T = e
* <accessor> <stable> <mods> def x(): T = x
*
* <accessor> <mods> def x(): T = e
* --> private var x: T = e
* <accessor> <mods> def x(): T = x
*
* <accessor> <mods> def x_=(y: T): Unit = ()
* --> <accessor> <mods> def x_=(y: T): Unit = x = y
*/
class Memoize extends MiniPhaseTransform with IdentityDenotTransformer { thisTransform =>
import ast.tpd._
override def phaseName = "memoize"
/** Should to run after mixin so that fields get generated in the
* class that contains the concrete getter rather than the trait
* that defines it.
*/
override def runsAfter: Set[Class[_ <: Phase]] = Set(classOf[Mixin])
override def checkPostCondition(tree: Tree)(implicit ctx: Context): Unit = tree match {
case tree: DefDef if !tree.symbol.is(Lazy | Deferred) =>
assert(!tree.rhs.isEmpty, i"unimplemented: $tree")
case _ =>
}
override def prepareForDefDef(tree: DefDef)(implicit ctx: Context) = {
val sym = tree.symbol
if (sym.isGetter && !sym.is(NoFieldNeeded)) {
// allocate field early so that initializer has the right owner for subsequeny phases in
// the group.
val maybeMutable = if (sym is Stable) EmptyFlags else Mutable
val field = ctx.newSymbol(
owner = ctx.owner,
name = sym.name.asTermName.fieldName,
flags = Private | maybeMutable,
info = sym.info.resultType,
coord = tree.pos).enteredAfter(thisTransform)
tree.rhs.changeOwnerAfter(sym, field, thisTransform)
}
this
}
override def transformDefDef(tree: DefDef)(implicit ctx: Context, info: TransformerInfo): Tree = {
val sym = tree.symbol
def field = {
val field = sym.field.asTerm
assert(field.exists, i"no field for ${sym.showLocated} in ${sym.owner.info.decls.toList.map{_.showDcl}}%; %")
field
}
if (sym.is(Accessor, butNot = NoFieldNeeded))
if (sym.isGetter) {
val fieldDef = transformFollowing(ValDef(field, tree.rhs))
val getterDef = cpy.DefDef(tree)(rhs = transformFollowingDeep(ref(field)))
Thicket(fieldDef, getterDef)
}
else if (sym.isSetter) {
if (!sym.is(ParamAccessor)) { val Literal(Constant(())) = tree.rhs }
val initializer = Assign(ref(field), ref(tree.vparamss.head.head.symbol))
cpy.DefDef(tree)(rhs = transformFollowingDeep(initializer))
}
else tree // curiously, some accessors from Scala2 have ' ' suffixes. They count as
// neither getters nor setters
else tree
}
private val NoFieldNeeded = Lazy | Deferred | JavaDefined
} | AlexSikia/dotty | src/dotty/tools/dotc/transform/Memoize.scala | Scala | bsd-3-clause | 3,200 |
package uk.org.nbn.nbnv.importer.validation
import uk.org.nbn.nbnv.importer.fidelity.{Result, ResultLevel}
import uk.org.nbn.nbnv.importer.records.NbnRecord
//validate RecordKey is provided
class Nbnv163Validator {
// Validate a record based on the presence of its record key, if its there then we are good to go, if the key is
// an empty string or whitespace then fail the validation of this record
def validate(record:NbnRecord) = {
def success = {
new Result {
def level = ResultLevel.DEBUG
def message = "NBNV-163: RecordKey was present"
def reference = record.key
}
}
def fail = {
new Result {
def level = ResultLevel.ERROR
def message = "NBNV-163: RecordKey was not present"
def reference = "(no record key)"
}
}
if (record.key.trim != "")
success else fail
}
}
| JNCC-dev-team/nbn-importer | importer/src/main/scala/uk/org/nbn/nbnv/importer/validation/Nbnv163Validator.scala | Scala | apache-2.0 | 911 |
package io.github.edadma.table
object Main extends App {
println()
println(
new TextTable {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(markdown = true) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(tabbed = true) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(columnDividers = true, headerLine = true, headerUnderlined = false) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(border = HEAVY, columnDividers = true, headerLine = true, headerUnderlined = false) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(border = LIGHT, columnDividers = true, headerLine = true, headerUnderlined = false) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(border = ASCII, columnDividers = true, headerBold = false, headerUnderlined = false) {
header("one", "two", "three")
line()
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(border = ASCII, columnDividers = true, headerLine = true, headerUnderlined = false) {
header("one", "two", "three")
row(null, "this is a string", List(1, 2, 3))
row(12, 234, 3456)
1 to 3 foreach rightAlignment
}
)
println(
new TextTable(matrix = true) {
row(1, 2, 3)
row(4, 5, 6)
row(7, 8, 9)
1 to 3 foreach rightAlignment
}
)
}
| edadma/table | shared/src/test/scala/xyz/hyperreal/table/Main.scala | Scala | mit | 2,193 |
package com.github.mdr.mash.inference
import com.github.mdr.mash.evaluator.{ EvaluationContext, Evaluator }
import com.github.mdr.mash.parser.AbstractSyntax._
import com.github.mdr.mash.runtime._
import com.github.mdr.mash.utils.Utils
/**
* Statically determine the values of expressions, if possible.
*/
object SimpleEvaluator {
def evaluate(expr: Expr)(implicit context: EvaluationContext): Option[MashValue] = {
val result = simplyEvaluate(expr)
expr.constantValueOpt = result
result
}
def simplyEvaluate(expr: Expr)(implicit context: EvaluationContext): Option[MashValue] = expr match {
case _: Hole | _: PipeExpr | _: HeadlessMemberExpr ⇒ None // Should have been removed from the AST by now
case _: InterpolatedString | _: MishFunction ⇒ None
case ImportStatement(expr, _, _) ⇒ evaluate(expr)
case literal: Literal ⇒ Some(literal.value)
case stringLiteral: StringLiteral ⇒ Some(Evaluator.evaluateStringLiteral(stringLiteral))
case listExpr: ListExpr ⇒ Utils.sequence(listExpr.elements.map(evaluate(_))).map(MashList(_))
case identifier: Identifier ⇒ context.scopeStack.lookup(identifier.name).map(_.value)
case objectExpr: ObjectExpr ⇒ simplyEvaluate(objectExpr)
case StatementSeq(statements, _) ⇒ statements.map(evaluate).lastOption getOrElse Some(MashUnit)
case ParenExpr(body, _) ⇒ evaluate(body)
case blockExpr: BlockExpr ⇒ evaluate(blockExpr.expr)
case HelpExpr(body, _) ⇒
evaluate(body)
None
case MinusExpr(subExpr, _) ⇒
evaluate(subExpr)
None
case memberExpr: MemberExpr ⇒
val MemberExpr(target, name, _, _) = memberExpr
// We just check for field lookups
for {
targetValue ← evaluate(target)
targetObject ← targetValue.asObject
fieldValue ← targetObject.get(name)
} yield fieldValue
case lookupExpr: LookupExpr ⇒
val indexOpt = evaluate(lookupExpr.index)
val targetOpt = evaluate(lookupExpr.target)
for {
targetList ← targetOpt.flatMap(_.asList)
index ← indexOpt.collect { case MashInteger(i) ⇒ i }
value ← targetList.elements.lift(index)
} yield value
case invocationExpr: InvocationExpr ⇒
evaluate(invocationExpr.function)
invocationExpr.arguments.foreach(simplyEvaluate(_))
None
case LambdaExpr(params, body, _) ⇒
evaluate(body)
params.params.flatMap(_.defaultExprOpt).map(evaluate)
None
case binOpExpr: BinOpExpr ⇒
binOpExpr.children.foreach(evaluate)
None
case chainedOpExpr: ChainedOpExpr ⇒
chainedOpExpr.children.foreach(evaluate)
None
case assExpr: AssignmentExpr ⇒
assExpr.children.foreach(evaluate)
None
case assExpr: PatternAssignmentExpr ⇒
evaluate(assExpr.right)
None
case ifExpr: IfExpr ⇒
ifExpr.children.foreach(evaluate)
None
case mishExpr: MishExpr ⇒
mishExpr.children.foreach(evaluate)
None
case interpolationExpr: MishInterpolation ⇒
interpolationExpr.part match {
case ExprPart(partExpr) ⇒ evaluate(partExpr)
case _ ⇒
}
None
case FunctionDeclaration(_, attributes, _, params, body, _) ⇒
attributes.foreach(simplyEvaluate(_))
params.params.flatMap(_.defaultExprOpt).map(evaluate)
evaluate(body)
None
case ClassDeclaration(_, attributes, _, params, bodyOpt, _) ⇒
attributes.foreach(simplyEvaluate(_))
params.params.flatMap(_.defaultExprOpt).map(evaluate)
for {
body ← bodyOpt
method ← body.methods
} evaluate(method)
None
case ThisExpr(_) ⇒
None
}
private def simplyEvaluate(attribute: Attribute)(implicit context: EvaluationContext): Unit =
attribute.argumentsOpt.foreach(arguments ⇒ arguments.foreach(simplyEvaluate(_)))
private def simplyEvaluate(arg: Argument)(implicit context: EvaluationContext): Unit = arg match {
case Argument.PositionArg(value, _) ⇒ evaluate(value)
case Argument.LongFlag(_, Some(value), _) ⇒ evaluate(value)
case _ =>
}
def simplyEvaluate(objectExpr: ObjectExpr)(implicit context: EvaluationContext): Option[MashValue] = {
def getFieldName(fieldNameExpr: Expr): Option[String] =
fieldNameExpr match {
case Identifier(name, _) ⇒ Some(name)
case _ ⇒ evaluate(fieldNameExpr) collect {
case MashString(s, _) ⇒ s
}
}
val fieldPairsOpt =
objectExpr.fields.map {
case FullObjectEntry(field, value, _) ⇒ getFieldName(field) -> evaluate(value)
case ShorthandObjectEntry(field, _) ⇒ Some(field) -> context.scopeStack.lookup(field).map(_.value)
}.map(pairOfOptionToOptionPair)
Utils.sequence(fieldPairsOpt).map(MashObject.of(_))
}
private def pairOfOptionToOptionPair[X, Y](pair: (Option[X], Option[Y])): Option[(X, Y)] =
for (x ← pair._1;
y ← pair._2) yield (x, y)
} | mdr/mash | src/main/scala/com/github/mdr/mash/inference/SimpleEvaluator.scala | Scala | mit | 5,845 |
/*
* DARWIN Genetic Algorithms Framework Project.
* Copyright (c) 2003, 2005, 2007, 2009, 2011, 2016, 2017. Phasmid Software
*
* Originally, developed in Java by Rubecula Software, LLC and hosted by SourceForge.
* Converted to Scala by Phasmid Software and hosted by github at https://github.com/rchillyard/Darwin
*
* This file is part of Darwin.
*
* Darwin is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.phasmid.darwin.genetics
import com.phasmid.darwin.genetics.dna._
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by scalaprof on 5/6/16.
*/
class GenotypeSpec extends FlatSpec with Matchers {
// XXX this is a very simple 1:1 mapping from bases to alleles
private val transcriber = PlainTranscriber[Base, String] { bs => Some(Allele(bs.head.toString)) }
val hox = Location("hox", 0, 1)
// C or A
val hix = Location("hix", 1, 1)
// G or G
val hoxB = Location("hoxB", 1, 1)
val hoxA = Location("hoxA", 0, 1)
val hoxC = Location("hoxC", 2, 1)
val locusMap: (Location) => Locus[String] = Map(
hox -> UnknownLocus[String](hox),
hix -> UnknownLocus[String](hix),
hoxA -> UnknownLocus[String](hoxA),
hoxB -> UnknownLocus[String](hoxB),
hoxC -> UnknownLocus[String](hoxC))
"apply" should "work" in {
val karyotype = Seq(Chromosome("test", isSex = false, Seq(hox)))
val g = Genome("test", karyotype, true, transcriber, locusMap)
val loci = g.loci
val bsss: Seq[Seq[Sequence[Base]]] = Seq(Seq(Sequence(Seq(Cytosine, Guanine)), Sequence(Seq(Adenine, Guanine))))
println(s"bsss: $bsss")
val gt: Genotype[String, Boolean] = g(bsss)
gt.genes.size shouldBe loci
val gene = gt.genes.head
gene.name shouldBe "hox"
gene(false) shouldBe Allele("A")
gene(true) shouldBe Allele("C")
}
it should "work with multiple chromosomes" in {
val chromosome1 = Chromosome("test1", isSex = false, Seq(hox, hix))
val chromosome2 = Chromosome("test2", isSex = false, Seq(hoxB))
val chromosome3 = Chromosome("test3", isSex = false, Seq(hoxA, hoxB, hoxC))
val karyotype: Seq[Chromosome] = Seq(chromosome1, chromosome2, chromosome3)
val g = Genome("test", karyotype, true, transcriber, locusMap)
val loci = g.loci
val bsss: Nucleus[Base] = Seq(Seq(Sequence("CG"), Sequence("AG")), Seq(Sequence("CT"), Sequence("AG")), Seq(Sequence("CGT"), Sequence("AGA")))
println(s"bsss: $bsss")
val gt: Genotype[String, Boolean] = g(bsss)
gt.genes.size shouldBe loci
val gene = gt.genes.head
gene.name shouldBe "hox"
gene(false) shouldBe Allele("A")
gene(true) shouldBe Allele("C")
}
}
| rchillyard/Darwin | src/test/scala/com/phasmid/darwin/genetics/GenotypeSpec.scala | Scala | gpl-3.0 | 3,268 |
package org.monarchinitiative.dosdp.cli
import java.io.{File, PrintWriter}
import com.github.tototoshi.csv.CSVWriter
import org.apache.jena.query.{QueryExecutionFactory, QueryFactory, QuerySolution}
import org.apache.jena.rdf.model.{Model, ModelFactory}
import org.monarchinitiative.dosdp.Utilities.isDirectory
import org.monarchinitiative.dosdp.cli.Config.AxiomKind
import org.monarchinitiative.dosdp.{DOSDP, ExpandedDOSDP, SPARQL, SesameJena}
import org.phenoscape.owlet.Owlet
import org.semanticweb.HermiT.ReasonerFactory
import org.semanticweb.elk.owlapi.ElkReasonerFactory
import org.semanticweb.owlapi.apibinding.OWLManager
import org.semanticweb.owlapi.model.OWLOntology
import org.semanticweb.owlapi.reasoner.{OWLReasoner, OWLReasonerFactory}
import uk.ac.manchester.cs.jfact.JFactFactory
import zio._
import zio.blocking.Blocking
import scala.jdk.CollectionConverters._
object Query {
def run(config: QueryConfig): ZIO[ZEnv, DOSDPError, Unit] = {
val reasonerFactoryOptZ = ZIO.foreach(config.reasoner) { reasonerArg =>
reasonerArg.toLowerCase match {
case "elk" => ZIO.succeed(new ElkReasonerFactory())
case "hermit" => ZIO.succeed(new ReasonerFactory())
case "jfact" => ZIO.succeed(new JFactFactory())
case other => ZIO.fail(DOSDPError(s"Reasoner $other not supported. Options are ELK, HermiT, or JFact"))
}
}
for {
targets <- determineTargets(config).mapError(e => DOSDPError("Failure to configure input or output", e))
reasonerFactoryOpt <- reasonerFactoryOptZ
ontologyOpt <- config.common.ontologyOpt
modelOpt <- ZIO.foreach(ontologyOpt)(makeModel)
_ <- makeOptionalReasoner(ontologyOpt, reasonerFactoryOpt).use { reasonerOpt =>
ZIO.foreach(targets) { target =>
ZIO.effectTotal(scribe.info(s"Processing pattern ${target.templateFile}")) *>
createQuery(target, config, reasonerOpt).flatMap(processTarget(target, config, _, modelOpt))
}
}
} yield ()
}
def makeModel(ont: OWLOntology): ZIO[Any, DOSDPError, Model] =
for {
model <- ZIO.effectTotal(ModelFactory.createDefaultModel())
allAxioms = for {
completeOnt <- ont.getImportsClosure.asScala.to(Set)
axiom <- completeOnt.getAxioms().asScala.to(Set)
} yield axiom
manager <- ZIO.effectTotal(OWLManager.createOWLOntologyManager())
finalOnt <- ZIO.effectTotal(manager.createOntology(allAxioms.asJava))
triples = SesameJena.ontologyAsTriples(finalOnt)
_ <- ZIO.effectTotal(model.add(triples.toList.asJava))
} yield model
private def makeOptionalReasoner(ontologyOpt: Option[OWLOntology], factoryOpt: Option[OWLReasonerFactory]): ZManaged[Any, DOSDPError, Option[OWLReasoner]] =
ZManaged.foreach(
for {
ontology <- ontologyOpt
factory <- factoryOpt
} yield ZIO
.effect(factory.createReasoner(ontology))
.mapError(e => DOSDPError(s"Failed to create reasoner for ontology $ontology", e))
.toManaged(o => ZIO.effectTotal(o.dispose()))
)(identity)
private def createQuery(target: QueryTarget, config: QueryConfig, reasonerOpt: Option[OWLReasoner]): ZIO[Any, DOSDPError, String] =
for {
dosdp <- Config.inputDOSDPFrom(target.templateFile)
prefixes <- config.common.prefixesMap
query <- ZIO.fromEither(makeProcessedQuery(dosdp, prefixes, config.restrictAxiomsTo, reasonerOpt))
} yield query
def makeProcessedQuery(dosdp: DOSDP, prefixes: PartialFunction[String, String], axiomKind: AxiomKind, reasonerOpt: Option[OWLReasoner]): Either[DOSDPError, String] = {
val maybeSparqlQuery = SPARQL.queryFor(ExpandedDOSDP(dosdp, prefixes), axiomKind)
for {
sparqlQuery <- maybeSparqlQuery
} yield {
reasonerOpt
.map { reasoner =>
new Owlet(reasoner).expandQueryString(sparqlQuery)
}
.getOrElse(sparqlQuery)
}
}
private def processTarget(target: QueryTarget,
config: QueryConfig,
processedQuery: String,
modelOpt: Option[Model]): ZIO[Any, DOSDPError, Unit] = {
val doPrintQuery = ZIO
.effect(new PrintWriter(new File(target.outputFile), "utf-8"))
.bracketAuto(w => ZIO.effect(w.print(processedQuery)))
val doPerformQuery = for {
model <- ZIO.fromOption(modelOpt).orElseFail(DOSDPError("Can't run query; no ontology provided."))
(columns, results) <- performQuery(processedQuery, model)
sepFormat <- ZIO.fromEither(Config.tabularFormat(config.common.tableFormat))
_ <-
ZIO
.effect(CSVWriter.open(target.outputFile, "utf-8")(sepFormat))
.bracketAuto(w => writeQueryResults(w, columns, results))
} yield ()
(if (config.printQuery.bool) doPrintQuery else doPerformQuery).mapError(e => DOSDPError("Failure performing query command", e))
}
private def writeQueryResults(writer: CSVWriter, columns: List[String], results: List[QuerySolution]) =
ZIO.effect(writer.writeRow(columns)) *> ZIO.foreach(results) { qs =>
ZIO.effect(writer.writeRow(columns.map(variable => Option(qs.get(variable)).map(_.toString).getOrElse(""))))
}
private def determineTargets(config: QueryConfig): RIO[Blocking, List[QueryTarget]] = {
val patternNames = config.common.batchPatterns.items
if (patternNames.nonEmpty) for {
_ <- ZIO.effectTotal(scribe.info("Running in batch mode"))
_ <- ZIO.ifM(isDirectory(config.common.template))(ZIO.unit, ZIO.fail(DOSDPError("\"--template must be a directory in batch mode\"")))
_ <- ZIO.ifM(isDirectory(config.common.outfile))(ZIO.unit, ZIO.fail(DOSDPError("\"--outfile must be a directory in batch mode\"")))
} yield patternNames.map { pattern =>
val templateFileName = s"${config.common.template}/$pattern.yaml"
val suffix =
if (config.printQuery.bool) "rq"
else config.common.tableFormat.toLowerCase
val outFileName = s"${config.common.outfile}/$pattern.$suffix"
QueryTarget(templateFileName, outFileName)
}
else ZIO.succeed(List(QueryTarget(config.common.template, config.common.outfile)))
}
def performQuery(sparql: String, model: Model): Task[(List[String], List[QuerySolution])] =
for {
query <- ZIO.effect(QueryFactory.create(sparql))
results <- ZIO.effect(QueryExecutionFactory.create(query, model)).bracketAuto { qe =>
ZIO.effect {
val resultSet = qe.execSelect()
val columns = resultSet.getResultVars.asScala.toList
val results = resultSet.asScala.toList
(columns, results)
}
}
} yield results
final private case class QueryTarget(templateFile: String, outputFile: String)
}
| balhoff/dosdp-scala | src/main/scala/org/monarchinitiative/dosdp/cli/Query.scala | Scala | mit | 6,770 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package runtime
final class RichLong(val self: Long) extends AnyVal with IntegralProxy[Long] {
protected def num = scala.math.Numeric.LongIsIntegral
protected def ord = scala.math.Ordering.Long
override def doubleValue = self.toDouble
override def floatValue = self.toFloat
override def longValue = self
override def intValue = self.toInt
override def byteValue = self.toByte
override def shortValue = self.toShort
override def isValidByte = self.toByte.toLong == self
override def isValidShort = self.toShort.toLong == self
override def isValidChar = self.toChar.toLong == self
override def isValidInt = self.toInt.toLong == self
def isValidLong = true
// override def isValidFloat = self.toFloat.toLong == self && self != Long.MaxValue
// override def isValidDouble = self.toDouble.toLong == self && self != Long.MaxValue
override def abs: Long = math.abs(self)
override def max(that: Long): Long = math.max(self, that)
override def min(that: Long): Long = math.min(self, that)
/** There is no reason to round a `Long`, but this method is provided to avoid accidental conversion to `Int` through `Float`. */
@deprecated("this is an integer type; there is no reason to round it. Perhaps you meant to call this on a floating-point value?", "2.11.0")
def round: Long = self
def toBinaryString: String = java.lang.Long.toBinaryString(self)
def toHexString: String = java.lang.Long.toHexString(self)
def toOctalString: String = java.lang.Long.toOctalString(self)
}
| martijnhoekstra/scala | src/library/scala/runtime/RichLong.scala | Scala | apache-2.0 | 1,860 |
package scalaxb.servlet.model
import java.net.{URI}
import java.io.{InputStream, OutputStream, ByteArrayOutputStream, ByteArrayInputStream, StringWriter, PrintWriter}
class ScalaFile(val fileName: String) {
val out = new StringWriter
val printout = new PrintWriter(out)
def content = out.toString
def inputStream: InputStream =
new ByteArrayInputStream(out.toString.getBytes)
def write(out: OutputStream) {
val in = inputStream
try {
val buffer = new Array[Byte](1024)
Iterator.continually(in.read(buffer))
.takeWhile(_ != -1)
.foreach { out.write(buffer, 0 , _) }
}
finally {
in.close
}
}
}
object ScalaFile {
import java.util.zip.{ZipOutputStream, ZipEntry}
def zip(files: Seq[ScalaFile]): Array[Byte] = {
val out = new ByteArrayOutputStream()
val zout = new ZipOutputStream(out)
files map { file =>
zout.putNextEntry(new ZipEntry(file.fileName))
file.write(zout)
}
zout.close
out.toByteArray
}
}
| eed3si9n/scalaxb | web/src/main/scala/scalaxb/servlet/model/ScalaFile.scala | Scala | mit | 1,025 |
package org.openurp.edu.eams.teach.grade.lesson.web.action
import org.apache.commons.collections.CollectionUtils
import org.beangle.commons.bean.comparators.PropertyComparator
import org.beangle.commons.collection.Collections
import org.beangle.commons.collection.Order
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.lang.Objects
import org.beangle.commons.lang.Strings
import org.beangle.struts2.convention.route.Action
import org.openurp.base.Semester
import org.openurp.edu.base.Course
import org.openurp.edu.eams.teach.Grade
import org.openurp.edu.teach.code.ExamStatus
import org.openurp.edu.eams.teach.code.industry.ExamType
import org.openurp.edu.teach.code.GradeType
import org.openurp.edu.eams.teach.grade.course.service.CourseGradeComparator
import org.openurp.edu.eams.teach.grade.course.service.MakeupStdStrategy
import org.openurp.edu.eams.teach.grade.course.web.action.AdminAction
import org.openurp.edu.eams.teach.grade.course.web.helper.TeachClassGradeHelper
import org.openurp.edu.eams.teach.grade.lesson.service.GradeSegStats
import org.openurp.edu.eams.teach.grade.lesson.service.LessonGradeService
import org.openurp.edu.eams.teach.grade.model.CourseGradeSetting
import org.openurp.edu.eams.teach.grade.model.GradeRateConfig
import org.openurp.edu.eams.teach.grade.service.CourseGradeService
import org.openurp.edu.eams.teach.grade.service.CourseGradeSettings
import org.openurp.edu.teach.grade.CourseGrade
import org.openurp.edu.teach.grade.model.CourseGradeState
import org.openurp.edu.teach.lesson.CourseTake
import org.openurp.edu.teach.grade.ExamGrade
import org.openurp.edu.teach.exam.ExamTake
import org.openurp.edu.eams.teach.lesson.GradeTypeConstants
import org.openurp.edu.teach.lesson.Lesson
import org.openurp.edu.eams.teach.lesson.helper.LessonSearchHelper
import org.openurp.edu.eams.teach.lesson.service.LessonService
import org.openurp.edu.eams.web.action.common.SemesterSupportAction
class ReportAction extends SemesterSupportAction {
var lessonService: LessonService = _
var courseGradeService: CourseGradeService = _
var lessonGradeService: LessonGradeService = _
var teachClassGradeHelper: TeachClassGradeHelper = _
var lessonSearchHelper: LessonSearchHelper = _
var settings: CourseGradeSettings = _
var makeupStdStrategy: MakeupStdStrategy = _
def index(): String = {
setSemesterDataRealm(hasStdTypeDepart)
put("courseTypes", lessonService.courseTypesOfSemester(getProjects, getDeparts, getAttribute("semester").asInstanceOf[Semester]))
put("teachDepartList", lessonService.teachDepartsOfSemester(getProjects, getDeparts, getAttribute("semester").asInstanceOf[Semester]))
put("departmentList", lessonService.teachDepartsOfSemester(getProjects, getDeparts, getAttribute("semester").asInstanceOf[Semester]))
forward()
}
def search(): String = {
val builder = lessonSearchHelper.buildQuery()
builder.where("exists (select cgs.lesson.id from " + classOf[CourseGradeState].getName +
" cgs where lesson.id = cgs.lesson.id and cgs.status = :status)", Grade.Status.PUBLISHED)
val lessons = entityDao.search(builder)
val unpassedMap = Collections.newMap[Any]
if (!lessons.isEmpty) {
val builder2 = OqlBuilder.from(classOf[CourseGrade], "cg")
builder2.where("cg.lesson in(:lessons) and cg.passed=false", lessons)
builder2.select("cg.lesson.id,count(*)").groupBy("cg.lesson.id")
val rs = entityDao.search(builder2)
for (data <- rs) {
val datas = data.asInstanceOf[Array[Any]]
unpassedMap.put(datas(0).asInstanceOf[java.lang.Long], datas(1).asInstanceOf[Number])
}
}
put("unpassedMap", unpassedMap)
put("lessons", lessons)
put("FINAL", baseCodeService.getCode(classOf[GradeType], GradeTypeConstants.FINAL_ID))
forward()
}
def unpassed(): String = {
val lesson = getEntity(classOf[Lesson], "lesson")
val query = OqlBuilder.from(classOf[CourseGrade], "courseGrade")
query.where("courseGrade.lesson = :lesson and courseGrade.passed=false", lesson)
val grades = entityDao.search(query)
val gradeTypes = Collections.newBuffer[Any]
val exited = Collections.newSet[Any]
for (grade <- grades; eg <- grade.getExamGrades) exited.add(eg.gradeType)
gradeTypes.addAll(exited)
var orderBy = get("orderBy")
if (Strings.isEmpty(orderBy)) {
orderBy = "std.code"
} else {
if (orderBy.startsWith("courseGrade.")) orderBy = Strings.substringAfter(orderBy, "courseGrade.")
}
val orders = Order.parse(orderBy)
if (Collections.isNotEmpty(orders)) {
val order = orders.get(0)
Collections.sort(grades, new CourseGradeComparator(order.getProperty, order.isAscending, gradeTypes))
}
put("gradeTypes", gradeTypes)
put("grades", grades)
put("NORMAL", baseCodeService.getCode(classOf[ExamStatus], ExamStatus.NORMAL))
put("FINAL", baseCodeService.getCode(classOf[GradeType], GradeTypeConstants.FINAL_ID))
put("lesson", lesson)
forward()
}
def report(): String = {
val lessonIds = getLongIds("lesson")
if (null == lessonIds || lessonIds.length == 0) {
return forwardError("error.parameters.needed")
}
var gradeTypeIdArray = getIntIds("gradeType")
if (null == gradeTypeIdArray) {
gradeTypeIdArray = Array(GradeTypeConstants.USUAL_ID, GradeTypeConstants.MIDDLE_ID, GradeTypeConstants.END_ID, GradeTypeConstants.GA_ID)
}
val gradeTypeIds = Collections.newHashSet(gradeTypeIdArray)
val makeupIds = Collections.newHashSet(GradeTypeConstants.MAKEUP_ID, GradeTypeConstants.DELAY_ID)
val isMakeup = CollectionUtils.containsAny(gradeTypeIds, makeupIds)
if (isMakeup) gradeTypeIds.addAll(makeupIds)
teachClassGradeHelper.report(entityDao.get(classOf[Lesson], lessonIds), gradeTypeIds.toArray(Array.ofDim[Integer](gradeTypeIds.size)))
val query = OqlBuilder.from(classOf[GradeRateConfig], "config")
.where("config.project=:project", getProject)
val gradeConfigMap = Collections.newMap[Any]
for (config <- entityDao.search(query)) {
gradeConfigMap.put(String.valueOf(config.getScoreMarkStyle.id), config)
}
put("MIDDLE_ID", GradeTypeConstants.MIDDLE_ID)
put("NORMAL", ExamStatus.NORMAL)
put("GA_ID", GradeTypeConstants.GA_ID)
put("gradeConfigMap", gradeConfigMap)
if (isMakeup) "reportMakeup" else "reportGa"
}
def blank(): String = {
val lessonIds = getLongIds("lesson")
val lessons = entityDao.get(classOf[Lesson], lessonIds)
put("lessons", lessons)
var gradeTypes = Collections.newBuffer[Any]
val courseTakes = Collections.newMap[Any]
var makeup = getBool("makeup")
val gradeTypeId = getInt("gradeType.id")
if (null != gradeTypeId &&
(gradeTypeId == GradeTypeConstants.MAKEUP_ID || gradeTypeId == GradeTypeConstants.DELAY_ID)) {
makeup = true
}
if (makeup) {
gradeTypes = baseCodeService.getCodes(classOf[GradeType], GradeTypeConstants.DELAY_ID, GradeTypeConstants.MAKEUP_ID)
for (lesson <- lessons) courseTakes.put(lesson, makeupStdStrategy.getCourseTakes(lesson))
val examTypes = Collections.newSet[Any]
for (`type` <- gradeTypes) examTypes.add(`type`.getExamType)
put("stdExamTakeMap", getStdExamTakeMap(lessons, examTypes))
} else {
for (lesson <- lessons) {
val takes = Collections.newBuffer[Any](lesson.getTeachClass.getCourseTakes)
courseTakes.put(lesson, takes)
}
val setting = settings.getSetting(getProject)
for (gradeType <- setting.getGaElementTypes) {
val freshedGradeType = entityDao.get(classOf[GradeType], gradeType.id)
if (null != freshedGradeType) gradeTypes.add(freshedGradeType)
}
val ga = entityDao.get(classOf[GradeType], GradeTypeConstants.GA_ID)
put("GA", ga)
gradeTypes.add(ga)
}
put("courseTakeMap", courseTakes)
Collections.sort(gradeTypes, new PropertyComparator("code"))
put("gradeTypes", gradeTypes)
if (makeup) "blankMakeuptable" else "blankGatable"
}
protected def getStdExamTakeMap(lessons: List[Lesson], examTypes: Set[ExamType]): Map[String, ExamTake] = {
if (examTypes.isEmpty) {
return Collections.newMap[Any]
}
val query = OqlBuilder.from(classOf[ExamTake], "examTake").where("examTake.lesson in(:lessons)",
lessons)
if (Collections.isNotEmpty(examTypes)) query.where("examTake.examType in (:examTypes)", examTypes)
val stdExamTypeMap = Collections.newMap[Any]
val examTakes = entityDao.search(query)
for (examTake <- examTakes) {
stdExamTypeMap.put(examTake.getLesson.id + "_" + examTake.getStd.id, examTake)
}
stdExamTypeMap
}
def stat(): String = {
val gradeTypeIds = Array(GradeTypeConstants.END_ID, GradeTypeConstants.GA_ID)
if (Objects.!=(get("kind"), GradeSegStats.COURSE)) {
val lessonIdSeq = getLongIds("lesson")
if (null == lessonIdSeq || lessonIdSeq.length == 0) {
return forwardError("error.parameters.needed")
}
val lessons = entityDao.get(classOf[Lesson], lessonIdSeq)
teachClassGradeHelper.statLesson(lessons, gradeTypeIds)
put("kind", GradeSegStats.LESSON)
} else {
val lessonIdSeq = get("lesson.ids")
var courses: List[Course] = null
var semester: Semester = null
if (Strings.isNotEmpty(lessonIdSeq)) {
var query1 = OqlBuilder.from(classOf[Lesson], "lesson")
query1.where("lesson.id in (:lessonIds)", Strings.splitToLong(lessonIdSeq))
query1.select("distinct lesson.course")
courses = entityDao.search(query1)
query1 = OqlBuilder.from(classOf[Lesson], "lesson")
query1.where("lesson.id in (:lessonIds)", Strings.splitToLong(lessonIdSeq))
query1.select("distinct lesson.semester")
semester = entityDao.search(query1).iterator().next().asInstanceOf[Semester]
}
teachClassGradeHelper.statCourse(courses, semester, gradeTypeIds)
put("kind", GradeSegStats.COURSE)
}
forward()
}
def reportForExam(): String = {
forward(new Action(classOf[AdminAction], "reportForExam"))
}
protected def getExportDatas(): Iterable[_] = {
val lessonIdSeq = get("lessonIds")
if (Strings.isEmpty(lessonIdSeq)) {
val builder = lessonSearchHelper.buildQuery()
builder.where("exists (select cgs.lesson.id from " + classOf[CourseGradeState].getName +
" cgs where lesson.id = cgs.lesson.id and cgs.status = :status)", Grade.Status.PUBLISHED)
populateConditions(builder)
builder.orderBy(get(Order.ORDER_STR))
return entityDao.search(builder)
}
entityDao.get(classOf[Lesson], Strings.splitToLong(lessonIdSeq))
}
def printStdListForDuty(): String = {
forward(new Action("teachTask", "printStdListForDuty"))
}
}
| openurp/edu-eams-webapp | grade/src/main/scala/org/openurp/edu/eams/teach/grade/lesson/web/action/ReportAction.scala | Scala | gpl-3.0 | 10,828 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule.examples.core
import molecule._
import molecule.stream._
import platform.Platform, channel.{ Console, Timer }
import java.util.concurrent.TimeUnit
object Join extends App {
def timeFeed[A: Message](as: Array[A], length: Int, period_ms: Int): IChan[A] =
Timer.every(period_ms, TimeUnit.MILLISECONDS).map(i => as(i % as.length)).take(length)
/**
* The 'connect' method below takes a platform as implicit argument because it must
* create a lightweight Connector process to send all data from an input to an output.
*/
val p = Platform("clock")
println("Two transitions on the left for one transition on the right")
val stream = timeFeed(Array("a", "b"), 20, 50).join(timeFeed(Array(1, 2), 10, 100)) connect Console.logOut[(String, Int)]("log:")
p.launch(stream).get_!
} | molecule-labs/molecule | molecule-core-examples/src/main/scala/molecule/examples/core/Join.scala | Scala | apache-2.0 | 1,537 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\\
* @ @ *
* # # # # (c) 2017 CAB *
* # # # # # # *
* # # # # # # # # # # # # *
* # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* @ @ *
\\* * http://github.com/alexcab * * * * * * * * * * * * * * * * * * * * * * * * * */
package mathact.tools
import mathact.core.bricks.blocks.{Block, BlockContext}
/** Base class for tall tools.
* Created by CAB on 07.05.2016.
*/
private[mathact] abstract class Tool(
blockContext: BlockContext,
toolTypeName: String,
toolImgPath: String)
extends Block(blockContext){
//Variables
private var _name: Option[String] = None
//DSL
def name_=(v: String) { _name = v match{case "" ⇒ None; case s ⇒ Some(s)} }
def name = _name
//Abstract callbacks (will called by system after sketch will constructed)
private[mathact] def blockName: Option[String] =
Some(toolTypeName + (_name match{case Some(n) ⇒ " - " + n case _ ⇒ ""}))
private[mathact] def blockImagePath: Option[String] = Some(toolImgPath)
//TODO Add more
}
| AlexCAB/MathAct | mathact_tools/src/main/scala/mathact/tools/Tool.scala | Scala | mit | 1,875 |
package com.softwaremill.codebrag.service.comments
import com.typesafe.scalalogging.slf4j.Logging
import com.softwaremill.codebrag.domain.{CommitInfo, Like}
import org.bson.types.ObjectId
import com.softwaremill.codebrag.dao.user.UserDAO
import com.softwaremill.codebrag.dao.commitinfo.CommitInfoDAO
import com.softwaremill.codebrag.dao.reaction.LikeDAO
class LikeValidator(val commitDao: CommitInfoDAO, val likeDao: LikeDAO, val userDao: UserDAO)
extends UserAlreadyLikedItCheck
with UserIsCommitAuthorCheck
with UserIsLikeAuthor {
import LikeValidator._
def isLikeValid(like: Like): Either[String, Unit] = {
if(userIsCommitAuthor(like)) {
Left(UserCantLikeOwnCode)
} else if(userAlreadyLikedThat(like)) {
Left(UserCantLikeMultipleTimes)
} else {
Right()
}
}
def canUserDoUnlike(userId: ObjectId, likeId: ObjectId): Either[String, Unit] = {
if(userIsLikeAuthor(userId, likeId)) {
Right()
} else {
Left(LikeValidator.UserIsNotLikeAuthor)
}
}
}
object LikeValidator {
val UserCantLikeMultipleTimes = "User can't like the same code multiple times"
val UserCantLikeOwnCode = "User can't like own code"
val UserIsNotLikeAuthor = "User is not like's author or like doesn't exist"
}
trait UserIsCommitAuthorCheck extends Logging {
def userDao: UserDAO
def commitDao: CommitInfoDAO
def userIsCommitAuthor(like: Like) = {
val Some(commit) = commitDao.findByCommitId(like.commitId)
isUserSameAsAuthor(commit, like)
}
private def isUserSameAsAuthor(commit: CommitInfo, like: Like): Boolean = {
val userOpt = userDao.findCommitAuthor(commit)
userOpt.getOrElse(logger.debug(s"Cannot find user: ${commit.authorName}/${commit.authorEmail}"))
userOpt.exists(_.id == like.authorId)
}
}
trait UserAlreadyLikedItCheck {
def likeDao: LikeDAO
def userAlreadyLikedThat(like: Like): Boolean = {
val allForTheSameCode = likeDao.findAllLikesForThread(like.threadId)
allForTheSameCode.find(_.authorId == like.authorId).nonEmpty
}
}
trait UserIsLikeAuthor extends Logging {
def likeDao: LikeDAO
def userIsLikeAuthor(userId: ObjectId, likeId: ObjectId): Boolean = {
likeDao.findById(likeId) match {
case Some(like) => like.authorId.equals(userId)
case None => {
logger.warn(s"Can't find like with id ${likeId.toString}")
false
}
}
}
} | softwaremill/codebrag | codebrag-service/src/main/scala/com/softwaremill/codebrag/service/comments/LikeValidator.scala | Scala | agpl-3.0 | 2,400 |
package io.finch
import java.nio.charset.Charset
import com.twitter.finagle.http.Method
import com.twitter.io.Buf
import io.finch.internal.BufText
class InputSpec extends FinchSpec {
behavior of "Input"
it should "properly construct Inputs using factories with params for the different methods" in {
def validateInput(
input: Input,
method: Method,
segments: Seq[String],
params: Map[String, String]
): Boolean =
input.request.method === method &&
input.request.path === "/" + segments.mkString("/") &&
input.request.params === params &&
input.path === segments
check { (ps: Params, p: Path) =>
val segments = p.p.split("/").toList.drop(1)
validateInput(Input.get(p.p, ps.p.toSeq: _*), Method.Get, segments, ps.p)
validateInput(Input.put(p.p, ps.p.toSeq: _*), Method.Put, segments, ps.p)
validateInput(Input.patch(p.p, ps.p.toSeq: _*), Method.Patch, segments, ps.p)
validateInput(Input.delete(p.p, ps.p.toSeq: _*), Method.Delete, segments, ps.p)
}
}
it should "add content through withBody" in {
check { (i: Input, b: Buf) =>
i.withBody[Text.Plain](b).request.content === b
}
}
it should "add content corresponding to a class through withBody[JSON]" in {
check { (i: Input, s: String, cs: Charset) =>
val input = i.withBody[Application.Json](new Exception(s), Some(cs))
input.request.content === BufText(s"""{"message":"$s"}""", cs) &&
input.request.contentType === Some(s"application/json;charset=${cs.displayName.toLowerCase}")
}
}
it should "add headers through withHeaders" in {
check { (i: Input, hs: Headers) =>
val hm = i.withHeaders(hs.m.toSeq: _*).request.headerMap
hs.m.forall { case (k, v) => hm.contains(k) && hm(k) === v}
}
}
it should "add form elements through withForm" in {
check { (i: Input, ps: Params) =>
ps.p.isEmpty || {
val input = i.withForm(ps.p.toSeq: _*)
val contentString = input.request.contentString
ps.p.forall { case (k, v) => contentString.contains(s"$k=$v") } &&
input.request.contentType === Some("application/x-www-form-urlencoded;charset=utf-8")
}
}
}
}
| ilya-murzinov/finch | core/src/test/scala/io/finch/InputSpec.scala | Scala | apache-2.0 | 2,232 |
package spark
import java.net.URL
import java.io.EOFException
import java.io.ObjectInputStream
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
sealed trait CoGroupSplitDep extends Serializable
case class NarrowCoGroupSplitDep(rdd: RDD[_], split: Split) extends CoGroupSplitDep
case class ShuffleCoGroupSplitDep(shuffleId: Int) extends CoGroupSplitDep
class CoGroupSplit(idx: Int, val deps: Seq[CoGroupSplitDep]) extends Split with Serializable {
override val index = idx
override def hashCode(): Int = idx
}
class CoGroupAggregator
extends Aggregator[Any, Any, ArrayBuffer[Any]](
{ x => ArrayBuffer(x) },
{ (b, x) => b += x },
{ (b1, b2) => b1 ++ b2 })
with Serializable
class CoGroupedRDD[K](rdds: Seq[RDD[(_, _)]], part: Partitioner)
extends RDD[(K, Seq[Seq[_]])](rdds.head.context) with Logging {
val aggr = new CoGroupAggregator
override val dependencies = {
val deps = new ArrayBuffer[Dependency[_]]
for ((rdd, index) <- rdds.zipWithIndex) {
if (rdd.partitioner == Some(part)) {
logInfo("Adding one-to-one dependency with " + rdd)
deps += new OneToOneDependency(rdd)
} else {
logInfo("Adding shuffle dependency with " + rdd)
deps += new ShuffleDependency[Any, Any, ArrayBuffer[Any]](
context.newShuffleId, rdd, aggr, part)
}
}
deps.toList
}
@transient
val splits_ : Array[Split] = {
val firstRdd = rdds.head
val array = new Array[Split](part.numPartitions)
for (i <- 0 until array.size) {
array(i) = new CoGroupSplit(i, rdds.zipWithIndex.map { case (r, j) =>
dependencies(j) match {
case s: ShuffleDependency[_, _, _] =>
new ShuffleCoGroupSplitDep(s.shuffleId): CoGroupSplitDep
case _ =>
new NarrowCoGroupSplitDep(r, r.splits(i)): CoGroupSplitDep
}
}.toList)
}
array
}
override def splits = splits_
override val partitioner = Some(part)
override def preferredLocations(s: Split) = Nil
override def compute(s: Split): Iterator[(K, Seq[Seq[_]])] = {
val split = s.asInstanceOf[CoGroupSplit]
val map = new HashMap[K, Seq[ArrayBuffer[Any]]]
def getSeq(k: K): Seq[ArrayBuffer[Any]] = {
map.getOrElseUpdate(k, Array.fill(rdds.size)(new ArrayBuffer[Any]))
}
for ((dep, depNum) <- split.deps.zipWithIndex) dep match {
case NarrowCoGroupSplitDep(rdd, itsSplit) => {
// Read them from the parent
for ((k, v) <- rdd.iterator(itsSplit)) {
getSeq(k.asInstanceOf[K])(depNum) += v
}
}
case ShuffleCoGroupSplitDep(shuffleId) => {
// Read map outputs of shuffle
def mergePair(k: K, vs: Seq[Any]) {
val mySeq = getSeq(k)
for (v <- vs)
mySeq(depNum) += v
}
val fetcher = SparkEnv.get.shuffleFetcher
fetcher.fetch[K, Seq[Any]](shuffleId, split.index, mergePair)
}
}
map.iterator
}
}
| javelinjs/spark | core/src/main/scala/spark/CoGroupedRDD.scala | Scala | bsd-3-clause | 3,006 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.