code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.broadcast
import scala.reflect.ClassTag
import org.apache.spark.{SecurityManager, SparkConf}
/**
* A [[org.apache.spark.broadcast.BroadcastFactory]] implementation that uses a
* HTTP server as the broadcast mechanism. Refer to
* [[org.apache.spark.broadcast.HttpBroadcast]] for more details about this mechanism.
*/
class HttpBroadcastFactory extends BroadcastFactory {
override def initialize(isDriver: Boolean, conf: SparkConf, securityMgr: SecurityManager) {
//调用HttpBroadcast初始化
HttpBroadcast.initialize(isDriver, conf, securityMgr)
}
override def newBroadcast[T: ClassTag](value_ : T, isLocal: Boolean, id: Long): Broadcast[T] =
new HttpBroadcast[T](value_, isLocal, id)
override def stop() { HttpBroadcast.stop() }
/**
* Remove all persisted state associated with the HTTP broadcast with the given ID.
* 删除与给定ID的HTTP广播关联的所有持久状态
* @param removeFromDriver Whether to remove state from the driver
* @param blocking Whether to block until unbroadcasted
*/
override def unbroadcast(id: Long, removeFromDriver: Boolean, blocking: Boolean) {
HttpBroadcast.unpersist(id, removeFromDriver, blocking)
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/broadcast/HttpBroadcastFactory.scala | Scala | apache-2.0 | 2,028 |
package controllers
import java.util.concurrent.ThreadLocalRandom
import scala.concurrent.{Future, ExecutionContext}
import play.api.libs.json.{JsObject, Json, JsValue}
import play.api.mvc._
import play.mvc.Http
import reactivemongo.api.ReadPreference
import reactivemongo.play.json.collection.JSONCollection
import play.modules.reactivemongo.{
ReactiveMongoApi, ReactiveMongoComponents, MongoController
}
import play.modules.reactivemongo.json._
class Application (val controllerComponents: ControllerComponents, reactiveMongoApi: ReactiveMongoApi)(implicit ec: ExecutionContext)
extends BaseController {
private def worldCollection: JSONCollection = reactiveMongoApi.db.collection[JSONCollection]("world")
private def fortuneCollection: JSONCollection = reactiveMongoApi.db.collection[JSONCollection]("fortune")
private val projection = Json.obj("_id" -> 0)
def getRandomWorlds(queries: Int): Future[Seq[Option[JsObject]]] = {
val futureWorlds: Seq[Future[Option[JsObject]]] = for {
_ <- 1 to queries
} yield { worldCollection
.find(Json.obj("_id" -> getNextRandom), projection)
.one[JsObject]
}
Future.sequence(futureWorlds)
}
def getRandomWorld = {
val futureWorld = worldCollection
.find(Json.obj("id" -> getNextRandom), projection)
.one[JsValue]
futureWorld
}
def getFortunes: Future[List[JsObject]] = {
val futureFortunes: Future[List[JsObject]] =
fortuneCollection.find(Json.obj())
.cursor[JsObject](ReadPreference.primaryPreferred, false).collect[List]()
futureFortunes
}
def updateWorlds(queries: Int): Future[Seq[Option[JsObject]]] = {
val futureWorlds: Future[Seq[Option[JsObject]]] = getRandomWorlds(queries)
val futureNewWorlds: Future[Seq[Option[JsObject]]] = futureWorlds.map( worlds => {
worlds.map(worldOption => {
worldOption.map(world => {
val newWorld = world ++ Json.obj("randomNumber" -> getNextRandom)
worldCollection.update(world, newWorld)
newWorld
})
})
})
futureNewWorlds
}
def getNextRandom: Int = {
ThreadLocalRandom.current().nextInt(TestDatabaseRows) + 1
}
// Semi-Common code between Scala database code
protected val TestDatabaseRows = 10000
def db = Action.async {
getRandomWorld.map { worlds =>
Ok(Json.toJson(worlds.head))
}
}
def queries(countString: String) = Action.async {
val n = parseCount(countString)
getRandomWorlds(n).map { worlds =>
Ok(Json.toJson(worlds))
}
}
private def byMessage(item: JsValue): String = {
(item \\ "message").as[String]
}
def fortunes() = Action.async {
getFortunes.map { dbFortunes =>
val appendedFortunes = Json.obj("_id" -> 0, "message" -> "Additional fortune added at request time.") :: dbFortunes
val sorted = appendedFortunes.sortBy(byMessage(_))
Ok(views.html.fortune(sorted))
}
}
def update(queries: String) = Action.async {
val n = parseCount(queries)
updateWorlds(n).map { worlds =>
Ok(Json.toJson(worlds))
}
}
private def parseCount(s: String): Int = {
try {
val parsed = java.lang.Integer.parseInt(s, 10)
parsed match {
case i if i < 1 => 1
case i if i > 500 => 500
case i => i
}
} catch {
case _: NumberFormatException => 1
}
}
}
| steveklabnik/FrameworkBenchmarks | frameworks/Scala/play2-scala/play2-scala-reactivemongo/app/controllers/Application.scala | Scala | bsd-3-clause | 3,384 |
/*
* ScalaQCMS -- Scala Quantum Circuit Model Simulator
*
* Copyright (c) 2012 Antti Vikman
*/
package models
import solvers.{SolverStatistics, Solver, DummySolver}
/**
* Solvers is a global container of all known solvers
* @todo adding and removing of solvers on runtime
*/
object Solvers {
val _solvers = Map[String, Solver](
"dummy" -> DummySolver
)
def getSolvers = _solvers.keySet
def getStatsOfSolvers(solvers: Set[String] = getSolvers): Map[String, SolverStatistics] = {
var stats = Map.empty[String, SolverStatistics]
for((name, solver) <- _solvers)
if(solvers.contains(name))
stats += ((name, solver.statistics))
stats
}
def solve(equation: EquationEntity, solver: String = "dummy"): EquationEntity = {
if(equation == null)
throw new NullPointerException("Variable equation can not be null")
if(!_solvers.contains(solver))
throw new MatchError("No such solver as \\"" + solver + "\\"")
_solvers.get(solver).get.solve(equation)
}
}
| n-a-g-r-o-m/ScaQCMS | app/models/Solvers.scala | Scala | mit | 1,019 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.common.util.CarbonHiveContext._
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
/**
* This class will test data load in which number of columns in data are more than
* the number of columns in schema
*/
class TestDataLoadWithColumnsMoreThanSchema extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("DROP TABLE IF EXISTS char_test")
sql("DROP TABLE IF EXISTS hive_char_test")
sql("CREATE TABLE char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
sql("CREATE TABLE hive_char_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)row format delimited fields terminated by ','")
sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table char_test")
sql("LOAD DATA local inpath './src/test/resources/character_hive.csv' INTO table hive_char_test")
}
test("test count(*) to check for data loss") {
checkAnswer(sql("select count(*) from char_test"),
sql("select count(*) from hive_char_test"))
}
test("test for invalid value of maxColumns") {
sql("DROP TABLE IF EXISTS max_columns_test")
sql("CREATE TABLE max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
try {
sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table max_columns_test options('MAXCOLUMNS'='avfgd')")
assert(false)
} catch {
case _ => assert(true)
}
}
test("test for valid value of maxColumns") {
sql("DROP TABLE IF EXISTS valid_max_columns_test")
sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
try {
sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='400')")
checkAnswer(sql("select count(*) from valid_max_columns_test"),
sql("select count(*) from hive_char_test"))
} catch {
case _ => assert(false)
}
}
test("test with invalid maxColumns value") {
sql(
"CREATE TABLE max_columns_value_test (imei string,age int,task bigint,num double,level " +
"decimal(10,3),productdate timestamp,mark int,name string) STORED BY 'org.apache.carbondata" +
".format'")
try {
sql(
"LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table " +
"max_columns_value_test options('FILEHEADER='imei,age','MAXCOLUMNS'='2')")
throw new MalformedCarbonCommandException("Invalid")
} catch {
case me: MalformedCarbonCommandException =>
assert(false)
case _ => assert(true)
}
}
test("test for maxcolumns option value greater than threshold value for maxcolumns") {
sql("DROP TABLE IF EXISTS valid_max_columns_test")
sql("CREATE TABLE valid_max_columns_test (imei string,age int,task bigint,num double,level decimal(10,3),productdate timestamp,mark int,name string)STORED BY 'org.apache.carbondata.format'")
try {
sql("LOAD DATA LOCAL INPATH './src/test/resources/character_carbon.csv' into table valid_max_columns_test options('MAXCOLUMNS'='22000')")
checkAnswer(sql("select count(*) from valid_max_columns_test"),
sql("select count(*) from hive_char_test"))
} catch {
case _ => assert(false)
}
}
test("test for boundary value for maxcolumns") {
sql("DROP TABLE IF EXISTS boundary_max_columns_test")
sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
"workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
"projectcode string, projectjoindate String, projectenddate String,attendance double," +
"utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
"workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
try {
sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='14')")
assert(true)
} catch {
case _ => assert(false)
}
}
test("test for maxcolumns value less than columns in 1st line of csv file") {
sql("DROP TABLE IF EXISTS boundary_max_columns_test")
sql("CREATE TABLE boundary_max_columns_test (empno string, empname String, designation String, doj String, " +
"workgroupcategory string, workgroupcategoryname String, deptno string, deptname String, " +
"projectcode string, projectjoindate String, projectenddate String,attendance double," +
"utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES" +
"('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory," +
"workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
try {
sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' into table boundary_max_columns_test options('MAXCOLUMNS'='13')")
assert(true)
} catch {
case _ => assert(false)
}
}
override def afterAll {
sql("DROP TABLE IF EXISTS char_test")
sql("DROP TABLE IF EXISTS hive_char_test")
sql("DROP TABLE IF EXISTS max_columns_value_test")
}
}
| Zhangshunyu/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataLoadWithColumnsMoreThanSchema.scala | Scala | apache-2.0 | 6,920 |
package net.seabears.hockey.util
import scala.util.Random
class UserAgentFactory extends (() => String) {
private[this] val agents = List(
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.6.01001)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.7.01001)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FSL 7.0.5.01003)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8",
"Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux x86_64; de; rv:1.9.2.8) Gecko/20100723 Ubuntu/10.04 (lucid) Firefox/3.6.8",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; .NET CLR 1.0.3705)",
"Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.01",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1",
"Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]")
def apply(): String = agents(Random.nextInt(agents.size))
}
| cberes/hockey-stats-loader | src/main/scala/net/seabears/hockey/util/user_agents.scala | Scala | gpl-3.0 | 1,835 |
package com.sksamuel.elastic4s.requests.searches.aggs.responses.metrics
import com.fasterxml.jackson.annotation.JsonProperty
import com.sksamuel.elastic4s.requests.common.DocumentRef
import com.sksamuel.elastic4s.requests.searches.Total
import com.sksamuel.elastic4s.requests.searches.aggs.responses.{AggSerde, JacksonSupport, MetricAggregation, Transformable}
case class TopHits(name: String,
total: Total,
@JsonProperty("max_score") maxScore: Option[Double],
hits: Seq[TopHit]) extends MetricAggregation
case class TopHit(@JsonProperty("_index") index: String,
@JsonProperty("_type") `type`: String,
@JsonProperty("_id") id: String,
@JsonProperty("_score") score: Option[Double],
sort: Seq[String],
@JsonProperty("_source") source: Map[String, Any]) extends Transformable {
@deprecated("types are deprecated in elasticsearch", "7.7")
def ref: DocumentRef = DocumentRef(index, `type`, id)
override private[elastic4s] val data = source
}
object TopHits {
implicit object TopHitsAggSerde extends AggSerde[TopHits] {
override def read(name: String, data: Map[String, Any]): TopHits = apply(name, data)
}
def apply(name: String, data: Map[String, Any]): TopHits = {
val hits = data("hits").asInstanceOf[Map[String, Any]]
val result = JacksonSupport.mapper.readValue[TopHits](JacksonSupport.mapper.writeValueAsBytes(hits))
result.copy(name = name)
}
}
| sksamuel/elastic4s | elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/responses/metrics/TopHits.scala | Scala | apache-2.0 | 1,532 |
package com.github.tminglei.slickpg
import slick.driver.PostgresDriver
import java.sql.{Date, Timestamp}
import slick.jdbc.{PositionedResult, JdbcType}
// edge type definitions
sealed trait EdgeType
case object `[_,_)` extends EdgeType
case object `(_,_]` extends EdgeType
case object `(_,_)` extends EdgeType
case object `[_,_]` extends EdgeType
case class Range[T](start: T, end: T, edge: EdgeType = `[_,_)`) {
def as[A](convert: (T => A)): Range[A] = {
new Range[A](convert(start), convert(end), edge)
}
override def toString = edge match {
case `[_,_)` => s"[$start,$end)"
case `(_,_]` => s"($start,$end]"
case `(_,_)` => s"($start,$end)"
case `[_,_]` => s"[$start,$end]"
}
}
/**
* simple range support; if all you want is just getting from / saving to db, and using pg range operations/methods, it should be enough
*/
trait PgRangeSupport extends range.PgRangeExtensions with utils.PgCommonJdbcTypes { driver: PostgresDriver =>
import driver.api._
import PgRangeSupportUtils._
private def toTimestamp(str: String) = Timestamp.valueOf(str)
private def toSQLDate(str: String) = Date.valueOf(str)
/// alias
trait RangeImplicits extends SimpleRangeImplicits
trait SimpleRangeImplicits {
implicit val simpleIntRangeTypeMapper = new GenericJdbcType[Range[Int]]("int4range", mkRangeFn(_.toInt))
implicit val simpleLongRangeTypeMapper = new GenericJdbcType[Range[Long]]("int8range", mkRangeFn(_.toLong))
implicit val simpleFloatRangeTypeMapper = new GenericJdbcType[Range[Float]]("numrange", mkRangeFn(_.toFloat))
implicit val simpleTimestampRangeTypeMapper = new GenericJdbcType[Range[Timestamp]]("tsrange", mkRangeFn(toTimestamp))
implicit val simpleDateRangeTypeMapper = new GenericJdbcType[Range[Date]]("daterange", mkRangeFn(toSQLDate))
implicit def simpleRangeColumnExtensionMethods[B0](c: Rep[Range[B0]])(
implicit tm: JdbcType[B0], tm1: JdbcType[Range[B0]]) = {
new RangeColumnExtensionMethods[Range[B0], B0, Range[B0]](c)
}
implicit def simpleRangeOptionColumnExtensionMethods[B0](c: Rep[Option[Range[B0]]])(
implicit tm: JdbcType[B0], tm1: JdbcType[Range[B0]]) = {
new RangeColumnExtensionMethods[Range[B0], B0, Option[Range[B0]]](c)
}
}
trait SimpleRangePlainImplicits {
import utils.PlainSQLUtils._
{
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toInt))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toLong))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toFloat))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(toTimestamp))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(toSQLDate))(r.nextString()))
}
implicit class PgRangePositionedResult(r: PositionedResult) {
def nextIntRange() = nextIntRangeOption().orNull
def nextIntRangeOption() = r.nextStringOption().map(mkRangeFn(_.toInt))
def nextLongRange() = nextLongRangeOption().orNull
def nextLongRangeOption() = r.nextStringOption().map(mkRangeFn(_.toLong))
def nextFloatRange() = nextFloatRangeOption().orNull
def nextFloatRangeOption() = r.nextStringOption().map(mkRangeFn(_.toFloat))
def nextTimestampRange() = nextTimestampRangeOption().orNull
def nextTimestampRangeOption() = r.nextStringOption().map(mkRangeFn(toTimestamp))
def nextDateRange() = nextDateRangeOption().orNull
def nextDateRangeOption() = r.nextStringOption().map(toSQLDate)
}
////////////////////////////////////////////////////////////////////
implicit val getIntRange = mkGetResult(_.nextIntRange())
implicit val getIntRangeOption = mkGetResult(_.nextIntRangeOption())
implicit val setIntRange = mkSetParameter[Range[Int]]("int4range")
implicit val setIntRangeOption = mkOptionSetParameter[Range[Int]]("int4range")
implicit val getLongRange = mkGetResult(_.nextLongRange())
implicit val getLongRangeOption = mkGetResult(_.nextLongRangeOption())
implicit val setLongRange = mkSetParameter[Range[Long]]("int8range")
implicit val setLongRangeOption = mkOptionSetParameter[Range[Long]]("int8range")
implicit val getFloatRange = mkGetResult(_.nextFloatRange())
implicit val getFloatRangeOption = mkGetResult(_.nextFloatRangeOption())
implicit val setFloatRange = mkSetParameter[Range[Float]]("numrange")
implicit val setFloatRangeOption = mkOptionSetParameter[Range[Float]]("numrange")
implicit val getTimestampRange = mkGetResult(_.nextTimestamp())
implicit val getTimestampRangeOption = mkGetResult(_.nextTimestampRangeOption())
implicit val setTimestampRange = mkSetParameter[Range[Timestamp]]("tsrange")
implicit val setTimestampRangeOption = mkOptionSetParameter[Range[Timestamp]]("tsrange")
implicit val getDateRange = mkGetResult(_.nextDateRange())
implicit val getDateRangeOption = mkGetResult(_.nextDateRangeOption())
implicit val setDateRange = mkSetParameter[Range[Date]]("daterange")
implicit val setDateRangeOption = mkOptionSetParameter[Range[Date]]("daterange")
}
}
object PgRangeSupportUtils {
// regular expr matchers to range string
val `[_,_)Range` = """\\["?([^,"]*)"?,[ ]*"?([^,"]*)"?\\)""".r // matches: [_,_)
val `(_,_]Range` = """\\("?([^,"]*)"?,[ ]*"?([^,"]*)"?\\]""".r // matches: (_,_]
val `(_,_)Range` = """\\("?([^,"]*)"?,[ ]*"?([^,"]*)"?\\)""".r // matches: (_,_)
val `[_,_]Range` = """\\["?([^,"]*)"?,[ ]*"?([^,"]*)"?\\]""".r // matches: [_,_]
def mkRangeFn[T](convert: (String => T)): (String => Range[T]) =
(str: String) => str match {
case `[_,_)Range`(start, end) => Range(convert(start), convert(end), `[_,_)`)
case `(_,_]Range`(start, end) => Range(convert(start), convert(end), `(_,_]`)
case `(_,_)Range`(start, end) => Range(convert(start), convert(end), `(_,_)`)
case `[_,_]Range`(start, end) => Range(convert(start), convert(end), `[_,_]`)
}
def toStringFn[T](toString: (T => String)): (Range[T] => String) =
(r: Range[T]) => r.edge match {
case `[_,_)` => s"[${toString(r.start)},${toString(r.end)})"
case `(_,_]` => s"(${toString(r.start)},${toString(r.end)}]"
case `(_,_)` => s"(${toString(r.start)},${toString(r.end)})"
case `[_,_]` => s"[${toString(r.start)},${toString(r.end)}]"
}
///
def mkWithLength[T](start: T, length: Double, edge: EdgeType = `[_,_)`) = {
val upper = (start.asInstanceOf[Double] + length).asInstanceOf[T]
new Range[T](start, upper, edge)
}
def mkWithInterval[T <: java.util.Date](start: T, interval: Interval, edge: EdgeType = `[_,_)`) = {
val end = (start +: interval).asInstanceOf[T]
new Range[T](start, end, edge)
}
}
| bearrito/slick-pg | src/main/scala/com/github/tminglei/slickpg/PgRangeSupport.scala | Scala | bsd-2-clause | 6,872 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.controller
import io.prediction.core.BaseEvaluator
import io.prediction.core.BaseEvaluatorResult
import io.prediction.data.storage.Storage
import io.prediction.workflow.NameParamsSerializer
import com.github.nscala_time.time.Imports.DateTime
import grizzled.slf4j.Logger
import io.prediction.workflow.WorkflowParams
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.json4s.native.Serialization.write
import org.json4s.native.Serialization.writePretty
import scala.language.existentials
import _root_.java.io.PrintWriter
import _root_.java.io.File
case class MetricScores[R](
score: R,
otherScores: Seq[Any])
case class MetricEvaluatorResult[R](
bestScore: MetricScores[R],
bestEngineParams: EngineParams,
bestIdx: Int,
metricHeader: String,
otherMetricHeaders: Seq[String],
engineParamsScores: Seq[(EngineParams, MetricScores[R])],
outputPath: Option[String])
extends BaseEvaluatorResult {
override def toOneLiner(): String = {
val idx = engineParamsScores.map(_._1).indexOf(bestEngineParams)
s"Best Params Index: $idx Score: ${bestScore.score}"
}
override def toJSON(): String = {
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
write(this)
}
override def toHTML(): String = html.metric_evaluator().toString
override def toString: String = {
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
val bestEPStr = writePretty(bestEngineParams)
val strings = (
Seq(
"MetricEvaluatorResult:",
s" # engine params evaluated: ${engineParamsScores.size}") ++
Seq(
"Optimal Engine Params:",
s" $bestEPStr",
"Metrics:",
s" $metricHeader: ${bestScore.score}") ++
otherMetricHeaders.zip(bestScore.otherScores).map {
case (h, s) => s" $h: $s"
} ++
outputPath.toSeq.map {
p => s"The best variant params can be found in $p"
}
)
strings.mkString("\\n")
}
}
object MetricEvaluator {
def apply[EI, Q, P, A, R](
metric: Metric[EI, Q, P, A, R],
otherMetrics: Seq[Metric[EI, Q, P, A, _]],
outputPath: String): MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
otherMetrics,
Some(outputPath))
}
def apply[EI, Q, P, A, R](
metric: Metric[EI, Q, P, A, R],
otherMetrics: Seq[Metric[EI, Q, P, A, _]])
: MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
otherMetrics,
None)
}
def apply[EI, Q, P, A, R](metric: Metric[EI, Q, P, A, R])
: MetricEvaluator[EI, Q, P, A, R] = {
new MetricEvaluator[EI, Q, P, A, R](
metric,
Seq[Metric[EI, Q, P, A, _]](),
None)
}
case class NameParams(name: String, params: Params) {
def this(np: (String, Params)) = this(np._1, np._2)
}
case class EngineVariant(
id: String,
description: String,
engineFactory: String,
datasource: NameParams,
preparator: NameParams,
algorithms: Seq[NameParams],
serving: NameParams) {
def this(evaluation: Evaluation, engineParams: EngineParams) = this(
id = "",
description = "",
engineFactory = evaluation.getClass.getName,
datasource = new NameParams(engineParams.dataSourceParams),
preparator = new NameParams(engineParams.preparatorParams),
algorithms = engineParams.algorithmParamsList.map(np => new NameParams(np)),
serving = new NameParams(engineParams.servingParams))
}
}
private[prediction] class MetricEvaluator[EI, Q, P, A, R] (
val metric: Metric[EI, Q, P, A, R],
val otherMetrics: Seq[Metric[EI, Q, P, A, _]],
val outputPath: Option[String])
extends BaseEvaluator[EI, Q, P, A, MetricEvaluatorResult[R]] {
@transient lazy val logger = Logger[this.type]
@transient val engineInstances = Storage.getMetaDataEngineInstances
def saveEngineJson(
evaluation: Evaluation,
engineParams: EngineParams,
outputPath: String) {
val now = DateTime.now
val evalClassName = evaluation.getClass.getName
val variant = MetricEvaluator.EngineVariant(
id = s"$evalClassName $now",
description = "",
engineFactory = evalClassName,
datasource = new MetricEvaluator.NameParams(engineParams.dataSourceParams),
preparator = new MetricEvaluator.NameParams(engineParams.preparatorParams),
algorithms = engineParams.algorithmParamsList.map(np => new MetricEvaluator.NameParams(np)),
serving = new MetricEvaluator.NameParams(engineParams.servingParams))
implicit lazy val formats = Utils.json4sDefaultFormats
logger.info(s"Writing best variant params to disk ($outputPath)...")
val writer = new PrintWriter(new File(outputPath))
writer.write(writePretty(variant))
writer.close
}
def evaluateBase(
sc: SparkContext,
evaluation: Evaluation,
engineEvalDataSet: Seq[(EngineParams, Seq[(EI, RDD[(Q, P, A)])])],
params: WorkflowParams): MetricEvaluatorResult[R] = {
val evalResultList: Seq[(EngineParams, MetricScores[R])] = engineEvalDataSet
.zipWithIndex
.par
.map { case ((engineParams, evalDataSet), idx) =>
val metricScores = MetricScores[R](
metric.calculate(sc, evalDataSet),
otherMetrics.map(_.calculate(sc, evalDataSet)))
(engineParams, metricScores)
}
.seq
implicit lazy val formats = Utils.json4sDefaultFormats +
new NameParamsSerializer
evalResultList.zipWithIndex.foreach { case ((ep, r), idx) => {
logger.info(s"Iteration $idx")
logger.info(s"EngineParams: ${write(ep)}")
logger.info(s"Result: $r")
}}
// use max. take implicit from Metric.
val ((bestEngineParams, bestScore), bestIdx) = evalResultList
.zipWithIndex
.reduce { (x, y) =>
(if (metric.compare(x._1._2.score, y._1._2.score) >= 0) x else y)
}
// save engine params if it is set.
outputPath.foreach { path => saveEngineJson(evaluation, bestEngineParams, path) }
MetricEvaluatorResult(
bestScore = bestScore,
bestEngineParams = bestEngineParams,
bestIdx = bestIdx,
metricHeader = metric.header,
otherMetricHeaders = otherMetrics.map(_.header),
engineParamsScores = evalResultList,
outputPath = outputPath)
}
}
| ydanilenko/PredictionIO | core/src/main/scala/io/prediction/controller/MetricEvaluator.scala | Scala | apache-2.0 | 7,000 |
package com.github.sweb.marsrover
/**
* Created by Florian on 18.10.2014.
*/
import Direction._
case class Rover(x: Int, y: Int, d: Direction, grid: Grid) {
val numberOfDirections = 4
val (horMovement, vertMovement) = {
d match {
case N => (0,1)
case S => (0,-1)
case E => (-1,0)
case W => (1,0)
}
}
def receiveMoves(moves: Array[Char]): Rover = {
processMoves(moves.toList)
}
def processMoves(moves: List[Char]): Rover = {
moves match {
case Nil => this
case h :: t => processMove(h).processMoves(t)
}
}
private def processMove(c: Char): Rover = {
c match {
case 'f' => move()
case 'b' => move(isBackingUp = true)
case 'l' => rotate(toRight = false)
case 'r' => rotate(toRight = true)
}
}
private def move(isBackingUp: Boolean = false): Rover = {
// May be counter-intuitive but common for 2D programming - Moving up is a negative movement, reducing y
val f = if (isBackingUp) 1 else -1
val newX = (x + horMovement * f + grid.width) % grid.width
val newY = (y + vertMovement * f + grid.height) % grid.height
Rover(newX, newY, d, grid)
}
private def rotate(toRight: Boolean): Rover = {
val leftOrRight = if (toRight) 1 else -1
val newD: Direction = ((d + leftOrRight) + numberOfDirections) % numberOfDirections
Rover(x, y, newD, grid)
}
}
| sweb/scala_rover_kata | src/main/scala/com/github/sweb/marsrover/Rover.scala | Scala | mit | 1,396 |
package org.vitrivr.adampro.query.query
import org.vitrivr.adampro.data.index.Index.IndexTypeName
import org.vitrivr.adampro.data.index.structures.IndexTypes._
/**
* adamtwo
*
* Ivan Giangreco
* November 2015
*/
object QueryHints {
sealed abstract class QueryHint
sealed abstract class SimpleQueryHint extends QueryHint
sealed abstract class IndexQueryHint(val structureType : IndexTypeName) extends SimpleQueryHint
sealed abstract class ComplexQueryHint(val hints : Seq[SimpleQueryHint]) extends QueryHint
sealed abstract class EmpiricalQueryHint(val optimizerName : String = "svm") extends SimpleQueryHint
case object SEQUENTIAL_QUERY extends SimpleQueryHint
case object INDEX_QUERY extends ComplexQueryHint(Seq(VAF_INDEX_QUERY, VAV_INDEX_QUERY, VAP_INDEX_QUERY, PQ_INDEX_QUERY, ECP_INDEX_QUERY, SH_INDEX_QUERY, LSH_INDEX_QUERY))
case object INEXACT_QUERY extends ComplexQueryHint(Seq(PQ_INDEX_QUERY, ECP_INDEX_QUERY, SH_INDEX_QUERY, LSH_INDEX_QUERY))
case object ECP_INDEX_QUERY extends IndexQueryHint(ECPINDEX)
case object LSH_INDEX_QUERY extends IndexQueryHint(LSHINDEX)
case object MI_INDEX_QUERY extends IndexQueryHint(MIINDEX)
case object PQ_INDEX_QUERY extends IndexQueryHint(PQINDEX)
case object SH_INDEX_QUERY extends IndexQueryHint(SHINDEX)
case object EXACT_QUERY extends ComplexQueryHint(Seq(VAF_INDEX_QUERY, VAV_INDEX_QUERY, SEQUENTIAL_QUERY))
case object VA_INDEX_QUERY extends ComplexQueryHint(Seq(VAF_INDEX_QUERY, VAV_INDEX_QUERY, VAP_INDEX_QUERY))
case object VAF_INDEX_QUERY extends IndexQueryHint(VAFINDEX)
case object VAV_INDEX_QUERY extends IndexQueryHint(VAVINDEX)
case object VAP_INDEX_QUERY extends IndexQueryHint(VAPLUSINDEX)
case object EMPIRICAL_QUERY extends EmpiricalQueryHint()
case object EMPIRICAL_SVM_QUERY extends EmpiricalQueryHint("svm")
case object EMPIRICAL_NAIVE_QUERY extends EmpiricalQueryHint("naive")
case object EMPIRICAL_LR_QUERY extends EmpiricalQueryHint("lr")
case object SCORED extends EmpiricalQueryHint("naive")
val FALLBACK_HINTS : QueryHint = EXACT_QUERY
def withName(s : Seq[String]) : Seq[QueryHint] = {
if(s != null){
s.map(withName(_)).filter(_.isDefined).map(_.get)
} else {
Seq()
}
}
def withName(s : String) : Option[QueryHint] = s match {
case "sequential" => Some(SEQUENTIAL_QUERY)
case "index" => Some(INDEX_QUERY)
case "inexact" => Some(INEXACT_QUERY)
case "exact" => Some(EXACT_QUERY)
case "empirical" => Some(EMPIRICAL_QUERY)
case "empirical_lr" => Some(EMPIRICAL_LR_QUERY)
case "empirical_svm" => Some(EMPIRICAL_SVM_QUERY)
case "empirical_naive" => Some(EMPIRICAL_NAIVE_QUERY)
case "scored" => Some(SCORED)
case "ecp" => Some(ECP_INDEX_QUERY)
case "lsh" => Some(LSH_INDEX_QUERY)
case "mi" => Some(MI_INDEX_QUERY)
case "pq" => Some(PQ_INDEX_QUERY)
case "sh" => Some(SH_INDEX_QUERY)
case "va" => Some(VA_INDEX_QUERY)
case "vaf" => Some(VAF_INDEX_QUERY)
case "vav" => Some(VAV_INDEX_QUERY)
case "vap" => Some(VAP_INDEX_QUERY)
case _ => None
}
}
| dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/query/query/QueryHints.scala | Scala | mit | 3,078 |
import leon.lang._
import leon.lang.synthesis._
object ListOperations {
sealed abstract class List
case class Cons(head: Int, tail: List) extends List
case class Nil() extends List
def content(l: List) : Set[Int] = l match {
case Nil() => Set.empty
case Cons(head, tail) => Set(head) ++ content(tail)
}
def isEmpty(l: List) = l match {
case Nil() => true
case Cons(head, tail) => false
}
def isEmptyBad(l: List) = l match {
case Nil() => true
case Cons(head, tail) => true
}
def hasContent(l: List) = l match {
case Nil() => false
case Cons(head, tail) => true
}
// !content(l).isEmpty
def concat(l1: List, l2: List) : List = choose {
(out : List) =>
content(out) == content(l1) ++ content(l2)
}
}
| epfl-lara/leon | testcases/synthesis/condabd/test/lesynth/ListConcatWithEmpty.scala | Scala | gpl-3.0 | 833 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{T, Table}
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* [[Recurrent]] module is a container of rnn cells
* Different types of rnn cells can be added using add() function
*/
class Recurrent[T : ClassTag]()
(implicit ev: TensorNumeric[T]) extends Container[Tensor[T], Tensor[T], T] {
private var hidden: Activity = null
private var gradHidden: Activity = null
private var hiddenShape: Array[Int] = null
private val currentInput = T()
private val currentGradOutput = T()
private var gradInputCell = Tensor[T]()
private var outputCell = Tensor[T]()
private val _input = T()
private val batchDim = 1
private val timeDim = 2
private val inputDim = 1
private val hidDim = 2
private var cellAppendStartIdx = 0
private var preBatchSize = 0
private var (batchSize, times) = (0, 0)
private var topology: Cell[T] = null
private var preTopology: AbstractModule[Activity, Activity, T] = null
private val dropouts: ArrayBuffer[Array[Dropout[T]]] =
new ArrayBuffer[Array[Dropout[T]]]
/**
*
* modules: -- preTopology
* |- topology (cell)
*
* The topology (or cell) will be cloned for N times w.r.t the time dimension.
* The preTopology will be execute only once before the recurrence.
*
* @param module module to be add
* @return this container
*/
override def add(module: AbstractModule[_ <: Activity, _ <: Activity, T]): Recurrent.this.type = {
require(module.isInstanceOf[Cell[T]],
"Recurrent: contained module should be Cell type")
topology = module.asInstanceOf[Cell[T]]
preTopology = topology.preTopology
if (preTopology != null) {
modules += preTopology
}
modules += topology
this
}
// list of cell modules cloned from added modules
private val cells: ArrayBuffer[Cell[T]]
= ArrayBuffer[Cell[T]]()
/**
* Clone N models; N depends on the time dimension of the input
* @param times
* @param batchSize
* @param hiddenSize
*/
private def extend(times: Int, batchSize: Int, hiddenSize: Int,
rows: Int = 1, columns: Int = 1): Unit = {
if (hidden == null) {
require((preTopology == null && modules.length == 1) ||
(topology != null && preTopology != null && modules.length == 2),
"Recurrent extend: should contain only one cell or plus a pre-topology" +
" to process input")
cells.clear()
cells += topology
val cell = cells.head
// The cell will help initialize or resize the hidden variable.
hidden = cell.hidResize(hidden = null, size = batchSize, rows, columns)
/*
* Since the gradHidden is only used as an empty Tensor or Table during
* backward operations. We can reuse the hidden variable by pointing the
* gradHidden to it.
*/
gradHidden = hidden
} else {
cells.head.hidResize(hidden = hidden, size = batchSize, rows, columns)
gradHidden = hidden
}
var t = cells.length
if (t < times) {
val cloneCell = cells.head.cloneModule()
cloneCell.parameters()._1.map(_.set())
cloneCell.parameters()._2.map(_.set())
while (t < times) {
cells += cloneCell.cloneModule()
.asInstanceOf[Cell[T]]
t += 1
}
share(cells)
}
}
/**
* set the cells' output and gradInput to recurrent's output and gradInput
* to decrease the copy expense.
* @param src
* @param dst
*/
private def set(src: ArrayBuffer[Tensor[T]], dst: Tensor[T], offset: Int): Unit = {
var t = 1
while ((t + offset) <= times) {
dst.select(timeDim, t + offset).copy(src(t - 1))
t += 1
}
t = 1
while ((t + offset) <= times) {
src(t - 1).set(dst.select(timeDim, t + offset))
t += 1
}
}
/**
* Sharing weights, bias, gradWeights across all the cells in time dim
* @param cells
*/
def share(cells: ArrayBuffer[Cell[T]]): Unit = {
val params = cells.head.parameters()
cells.foreach(c => {
if (!c.parameters().eq(params)) {
var i = 0
while (i < c.parameters()._1.length) {
c.parameters()._1(i).set(params._1(i))
i += 1
}
i = 0
while (i < c.parameters()._2.length) {
c.parameters()._2(i).set(params._2(i))
i += 1
}
dropouts.append(findDropouts(c))
}
})
val stepLength = dropouts.length
for (i <- dropouts.head.indices) {
val head = dropouts.head(i)
val noise = head.noise
for (j <- 1 until stepLength) {
val current = dropouts(j)(i)
current.noise = noise
current.isResampling = false
}
}
}
def findDropouts(cell: Cell[T]): Array[Dropout[T]] = {
var result: Array[Dropout[T]] = null
cell.cell match {
case container: Container[_, _, T] =>
result = container
.findModules("Dropout")
.toArray
.map(_.asInstanceOf[Dropout[T]])
case _ =>
}
result
}
private def reset(src1: ArrayBuffer[Tensor[T]], src2: Tensor[T]): Unit = {
cellAppendStartIdx = 0
src1.foreach(x => x.set(Tensor[T](1)))
src2.set(Tensor[T](1))
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
require(input.dim == 3 || input.dim == 5,
"Recurrent: input should be a 3D or 5D Tensor, e.g [batch, times, nDim], " +
s"current input.dim = ${input.dim}")
batchSize = input.size(batchDim)
times = input.size(timeDim)
/**
* get previous batchsize.
* If current batchSize is not equal to previous batchSize,
* reset recurrent's output and cells' output to avoid
* address conflicts.
*/
preBatchSize = if (!cells.isEmpty) {
cells.head.output.toTable[Tensor[T]](inputDim).size(batchDim)
} else {
0
}
if (preBatchSize > 0 && preBatchSize != batchSize) {
reset(cells.map(x => x.output.toTable[Tensor[T]](inputDim)), output)
}
outputCell = if (preTopology != null) {
preTopology.updateOutput(input).toTensor[T]
} else {
input
}
val hiddenSize = topology.hiddensShape(0)
if (input.dim() == 3) {
output.resize(batchSize, times, hiddenSize)
// Clone N modules along the sequence dimension.
extend(times, batchSize, hiddenSize)
} else if (input.dim() == 5) {
output.resize(batchSize, times, hiddenSize, input.size(4), input.size(5))
// Clone N modules along the sequence dimension.
extend(times, batchSize, hiddenSize, input.size(4), input.size(5))
}
/**
* currentInput forms a T() type. It contains two elements, hidden and input.
* Each time it will feed the cell with T(hidden, input) (or T(input, hidden) depends on
* your hidDim and inputDim), and the cell will give a table output containing two
* identical elements T(output, output). One of the elements from the cell output is
* the updated hidden. Thus the currentInput will update its hidden element with this output.
*/
currentInput(hidDim) = hidden
var i = 1
while (i <= times) {
currentInput(inputDim) = outputCell.select(timeDim, i)
cells(i - 1).updateOutput(currentInput)
currentInput(hidDim) = cells(i - 1).output.toTable(hidDim)
i += 1
}
if (cellAppendStartIdx == 0 || cellAppendStartIdx < times) {
set(cells.slice(cellAppendStartIdx, times)
.map(x => x.output.toTable[Tensor[T]](inputDim)),
output,
cellAppendStartIdx)
}
output
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T]): Unit = {
cellAppendStartIdx = cells.length
currentGradOutput(hidDim) = gradHidden
/**
* Since we clone module along the time dimension, the output of each
* iteration have been recorded by the cloned modules. Thus, we can
* reuse these outputs during the backward operations by copying the
* outputs to _input variable.
*
* The output of Cell(i-1) should be one of the elements fed to the inputs
* of Cell(i)
* The first module in the cells array accepts zero hidden parameter.
*/
var i = times
while (i >= 1) {
currentGradOutput(inputDim) = gradOutput.select(timeDim, i)
_input(hidDim) = if (i > 1) cells(i - 2).output.toTable(hidDim)
else hidden
_input(inputDim) = outputCell.select(timeDim, i)
if (i == 1) {
cells(i - 1).regluarized(true)
} else {
cells(i - 1).regluarized(false)
}
cells(i - 1).accGradParameters(_input, currentGradOutput)
currentGradOutput(hidDim) = cells(i - 1).gradInput.toTable(hidDim)
i -= 1
}
if (preTopology != null) {
preTopology.accGradParameters(input, gradInputCell)
}
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
/**
* get previous batchsize.
* If current batchSize is not equal to previous batchSize,
* reset recurrent's gradInput and cells' gradInput to avoid
* address conflicts.
*/
if (preBatchSize > 0 && preBatchSize != batchSize ) {
reset(cells.map(x => x.gradInput.toTable[Tensor[T]](inputDim)), gradInputCell)
}
gradInput = if (preTopology != null) {
/**
* if preTopology is Sequential, it has not created gradInput.
* Thus, it needs to create a new Tensor.
*/
if (preTopology.gradInput == null) {
preTopology.gradInput = Tensor[T]()
}
preTopology.gradInput.toTensor[T]
} else {
gradInputCell
}
gradInputCell.resizeAs(outputCell)
currentGradOutput(hidDim) = gradHidden
var i = times
while (i >= 1) {
currentGradOutput(inputDim) = gradOutput.select(timeDim, i)
_input(hidDim) = if (i > 1) cells(i - 2).output.toTable(hidDim)
else hidden
_input(inputDim) = outputCell.select(timeDim, i)
cells(i - 1).updateGradInput(_input, currentGradOutput)
currentGradOutput(hidDim) = cells(i - 1).gradInput.toTable(hidDim)
i -= 1
}
if (cellAppendStartIdx == 0 || cellAppendStartIdx < times) {
set(cells.slice(cellAppendStartIdx, times)
.map(x => x.gradInput.toTable[Tensor[T]](inputDim)),
gradInputCell,
cellAppendStartIdx)
}
if (preTopology != null) {
gradInput = preTopology.updateGradInput(input, gradInputCell).toTensor[T]
}
gradInput
}
override def clearState() : this.type = {
super.clearState()
hidden = null
gradHidden = null
hiddenShape = null
gradInputCell.set()
outputCell.set()
currentInput.clear()
currentGradOutput.clear()
_input.clear()
reset(cells.map(x => x.output.toTable[Tensor[T]](inputDim)), output)
reset(cells.map(x => x.gradInput.toTable[Tensor[T]](inputDim)), gradInputCell)
cells.foreach(x => x.clearState())
cells.clear()
this
}
override def reset(): Unit = {
require((preTopology == null && modules.length == 1) ||
(topology != null && preTopology != null && modules.length == 2),
"Recurrent extend: should contain only one cell or plus a pre-topology" +
" to process input.")
require(topology.isInstanceOf[Cell[T]],
"Recurrent: should contain module with Cell type")
modules.foreach(_.reset())
cells.clear()
}
override def canEqual(other: Any): Boolean = other.isInstanceOf[Recurrent[T]]
override def equals(other: Any): Boolean = other match {
case that: Recurrent[T] =>
super.equals(that) &&
(that canEqual this) &&
cells == that.cells
case _ => false
}
override def hashCode(): Int = {
val state = Seq(super.hashCode(), cells)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
object Recurrent {
def apply[@specialized(Float, Double) T: ClassTag]()
(implicit ev: TensorNumeric[T]) : Recurrent[T] = {
new Recurrent[T]()
}
}
| JerryYanWan/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Recurrent.scala | Scala | apache-2.0 | 12,837 |
package it.unibo.drescue.view
import javafx.stage.WindowEvent
import it.unibo.drescue.controller._
import scalafx.application.JFXApp.PrimaryStage
import scalafx.application.{JFXApp, Platform}
import scalafx.scene.Scene
/**
* A class representing the main view of the civil protection application,
* which is the container of all different grids
*
* @param loginGrid the login grid, which is the first to be shown
* @param loginController the login controller
* @param controller the main controller
* @param homeController the home controller
* @param enrollTeamControllerImpl the enroll team controller
* @param manageRescuesController the manage rescues controller
*/
class MainView(loginGrid: LoginGrid,
loginController: LoginControllerImpl,
controller: MainControllerImpl,
homeController: HomeControllerImpl,
enrollTeamControllerImpl: EnrollTeamControllerImpl,
manageRescuesController: ManageRescuesControllerImpl) extends JFXApp {
var login = new LoginGrid(loginController)
var home = new HomeGrid(homeController)
var team = new EnrollTeamGrid(enrollTeamControllerImpl)
var manage = new ManageRescuesGrid(manageRescuesController, "Stop", controller.alertInManage)
/**
* Sets the type of stage to be shown with relative properties
*/
def setStage(): Unit = {
stage = new PrimaryStage {
title = "D-rescue"
resizable = false
scene = new Scene {
content = loginGrid.grid
}
onCloseRequest_=((event: WindowEvent) => {
event.consume()
Platform.exit()
System.exit(0)
})
}
}
/**
* Performs the views alternation
*
* @param view the next view to be shown
*/
def changeView(view: String): Unit = {
val LoginCase: String = "Login"
val HomeCase: String = "Home"
val EnrollCase: String = "NewTeam"
val ManageRescuesCase: String = "ManageRescues"
val newScene = new Scene {
view match {
case LoginCase =>
login = new LoginGrid(loginController)
content = login.grid
case HomeCase =>
home = new HomeGrid(homeController)
content = home.grid
case EnrollCase =>
team = new EnrollTeamGrid(enrollTeamControllerImpl)
content = team.grid
case ManageRescuesCase =>
val ActiveButton = controller.sendOrStop
val alertInLabel = controller.alertInManage
manage = new ManageRescuesGrid(manageRescuesController, ActiveButton, alertInLabel)
content = manage.grid
case _ =>
val dialog = new CustomDialog(controller).createDialog(CustomDialog.Error)
dialog.showAndWait()
}
}
_stage.hide()
_stage.scene_=(newScene)
_stage.centerOnScreen()
_stage.show()
}
/**
* @return the main view stage
*/
def _stage = stage
}
| SofiaRosetti/S3-16-d-rescue | civilprotection/src/main/scala/it/unibo/drescue/view/MainView.scala | Scala | gpl-3.0 | 2,982 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.metastore
import quasar.contrib.pathy.APath
import quasar.fs.mount.{ConnectionUri, MountType}
final case class PathedMountConfig(path: APath, mt: MountType, uri: ConnectionUri)
| jedesah/Quasar | core/src/main/scala/quasar/metastore/PathedMountConfig.scala | Scala | apache-2.0 | 801 |
package us.feliscat.ir.query
import us.feliscat.ir.Query
import us.feliscat.text.StringOption
/**
* <pre>
* Created on 6/1/15.
* </pre>
* @param keyword keyword
* @author K.Sakamoto
*/
abstract class KeywordQuery(val keyword: StringOption) extends Query {
override val query: StringOption
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/ir/query/KeywordQuery.scala | Scala | apache-2.0 | 301 |
package com.avsystem.commons
package serialization.cbor
import com.avsystem.commons.misc.{AbstractValueEnum, AbstractValueEnumCompanion, EnumCtx}
import com.avsystem.commons.serialization.{InputMetadata, IntWrapperCompanion}
/** [[https://tools.ietf.org/html/rfc7049#section-2.1]] */
final class MajorType(implicit enumCtx: EnumCtx) extends AbstractValueEnum {
def withInfo(info: Int): InitialByte =
InitialByte(this, info)
}
object MajorType extends AbstractValueEnumCompanion[MajorType] {
final val Unsigned, Negative, ByteString, TextString, Array, Map, Tag, Simple: Value = new MajorType
}
/** [[https://tools.ietf.org/html/rfc7049#section-2]] */
final class InitialByte(val value: Byte) extends AnyVal {
def majorType: MajorType = MajorType.values((value & 0xFF) >>> 5)
def additionalInfo: Int = value & 0x1F
override def toString: String = s"major type $majorType with info $additionalInfo"
}
object InitialByte extends InputMetadata[InitialByte] {
import MajorType._
def apply(major: MajorType, info: Int): InitialByte = {
require(info >= 0 && info < 32)
InitialByte(((major.ordinal << 5) | info).toByte)
}
def apply(byte: Byte): InitialByte =
new InitialByte(byte)
def unapply(ib: InitialByte): Opt[(MajorType, Int)] =
Opt((ib.majorType, ib.additionalInfo))
final val SingleByteValueInfo = 24
final val TwoBytesValueInfo = 25
final val FourBytesValueInfo = 26
final val EightBytesValueInfo = 27
final val IndefiniteLengthInfo = 31
object IndefiniteLength {
def apply(major: MajorType): InitialByte = {
require(major.ordinal >= MajorType.ByteString.ordinal && major.ordinal <= MajorType.Map.ordinal)
InitialByte(major, IndefiniteLengthInfo)
}
def unapply(byte: InitialByte): Opt[MajorType] =
if (byte.additionalInfo == IndefiniteLengthInfo &&
byte.majorType.ordinal >= MajorType.ByteString.ordinal && byte.majorType.ordinal <= MajorType.Map.ordinal)
Opt(byte.majorType)
else Opt.Empty
}
// https://tools.ietf.org/html/rfc7049#section-2.3
final val False = Simple.withInfo(20)
final val True = Simple.withInfo(21)
final val Null = Simple.withInfo(22)
final val Undefined = Simple.withInfo(23)
final val HalfPrecisionFloat = Simple.withInfo(25)
final val SinglePrecisionFloat = Simple.withInfo(26)
final val DoublePrecisionFloat = Simple.withInfo(27)
final val Break = Simple.withInfo(31)
}
/**
* [[https://tools.ietf.org/html/rfc7049#section-2.4]]
* [[https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml]]
*/
case class Tag(value: Int) extends AnyVal
object Tag extends IntWrapperCompanion[Tag] {
final val StandardDateTime = Tag(0)
final val EpochDateTime = Tag(1)
final val PositiveBignum = Tag(2)
final val NegativeBignum = Tag(3)
final val DecimalFraction = Tag(4)
final val Bigfloat = Tag(5)
final val ExpectedBase64Url = Tag(21)
final val ExpectedBase64 = Tag(22)
final val ExpectedBase16 = Tag(23)
final val EncodedDataItem = Tag(24)
final val Uri = Tag(32)
final val Base64Url = Tag(33)
final val Base64 = Tag(34)
final val Regexp = Tag(35)
final val MimeMessage = Tag(36)
final val SelfDescribe = Tag(55799)
}
object Tags extends InputMetadata[List[Tag]]
| AVSystem/scala-commons | commons-core/src/main/scala/com/avsystem/commons/serialization/cbor/definitions.scala | Scala | mit | 3,253 |
package net.ssanj.dabble
final case class HistoryCommand(term: Option[String])
final case class DabbleRunConfig(dependencies: Seq[String] = Seq.empty,
resolvers: Seq[String] = Seq.empty,
macroParadiseVersion: Option[String] = None,
historyCommand: Option[HistoryCommand] = None) {
def %(dep: String): DabbleRunConfig = this.copy(dependencies = dependencies :+ dep)
}
trait TerminalSupport {
private def dependencies(op: scopt.OptionParser[DabbleRunConfig]): Unit = {
op.arg[String]("<dep1> + <dep2> + ... <depn>").
unbounded().
optional().
action { (dep, config) => config % dep }.
text("""The list of dependencies to include.""" + newlineAndTab +
"""Multiple dependencies should be separated by a + sign.""" + newline + newlineAndTab +
"Format is one of:" + newlineAndTab +
""""org1" % "name1" % "version1"""" + newlineAndTab +
""""org2" %% "name2" % "version2"""" + newlineAndTab +
""""org3" %% "name3" % "version3 % "config""""" + newlineAndTab +
""""org1" %% "name1" % "version1" + "org2" %% "name2" % "version2"""" + newline + newlineAndTab +
"""Example:""" + newlineAndTab +
""""com.github.scopt" %% "scopt" % "3.4.0" + "org.scalaz" %% "scalaz-core" % "7.2.2"""" + newline
)
}
private def resolvers(op: scopt.OptionParser[DabbleRunConfig]): Unit = {
op.opt[Seq[String]]('r', "resolvers").
valueName(""""<res1>,<res2>, .... <resn>"""").
action { (resolvers, config) => config.copy(resolvers = resolvers.map(_.trim)) }.
text("""The list of additional repositories to resolve dependencies from.""" + newlineAndTab +
"""Multiple dependencies should be separated by commas.""" + newline + newlineAndTab +
"Format is one of:" + newlineAndTab +
"""(sonatype|typesafe|typesafeIvy|sbtPlugin):[s|r]""" + newlineAndTab +
"""(maven2|jcenter)""" + newlineAndTab +
"""bintray(owner:repo)""" + newlineAndTab +
"""name@repo_url""" + newline + newlineAndTab +
"""sonatype:s -- loads only snapshot repo""" + newlineAndTab +
"""sonatype:r -- loads only release repo""" + newlineAndTab +
"""sonatype -- loads both snapshot and release repos""" + newlineAndTab +
"""maven2 -- loads the maven2 resolver""" + newlineAndTab +
"""bintray:user:repo -- loads the bintray resolver for user/repo""" + newlineAndTab +
"""your repo name @ https://your.repo.com/release/maven -- loads a custom resolver""" + newline + newlineAndTab +
"""Example:""" + newlineAndTab +
""""bintray:oncue:releases, sonatype:r"""" + newline
)
}
private def macroParadise(op: scopt.OptionParser[DabbleRunConfig]): Unit = {
op.opt[String]("macro-paradise").
abbr("mp").
action { (version, config) => config.copy(macroParadiseVersion = Option(version)) }.
valueName("""<version>""").
text("""Includes the macro paradise compiler plugin with the supplied version.""" + newline + newlineAndTab +
s"""Example:${newlineAndTab}""" +
"""2.1.0"""
)
}
private def toggle(name: String, shortName: Option[String])(f: DabbleRunConfig => Unit)(op: scopt.OptionParser[DabbleRunConfig]): Unit = {
val opDef = op.opt[Unit](name)
shortName.map(opDef.abbr).getOrElse(opDef) action { (_, c) =>
f(c)
op.terminate(Right(()))
c
}
}
private def historyCommand(op: scopt.OptionParser[DabbleRunConfig]): Unit = {
op.cmd("history").
abbr("hi").
action { (_, c) => c.copy(historyCommand = Option(HistoryCommand(None))) }.
children {
op.opt[String]("term").
abbr("t").
valueName("""<search term>""").
text("""The term to search through history for""").
action { (term, config) =>
if (term.trim.nonEmpty) config.copy(historyCommand = Option(HistoryCommand(Option(term))))
else config
}
}.
text("command history.")
}
lazy val parser = new scopt.OptionParser[DabbleRunConfig]("Dabble") {
head(s"$title")
toggle("help", Option("h"))(_ => showUsage)(this)
toggle("version", Option("v"))(_ => println(s"$title"))(this)
dependencies(this)
resolvers(this)
macroParadise(this)
showUsageOnError
note(s"${newline}Please see https://github.com/ssanj/dabble for more examples.")
checkConfig{ c =>
if (c.dependencies.length < 5 && c.historyCommand.isEmpty) {
failure("Invalid format for dependencies. Please see accepted formats below.")
} else success
}
historyCommand(this)
}
//We turn off all checks, and documentation as we don't need them if this fails
lazy val historyParser = new scopt.OptionParser[DabbleRunConfig]("Dabble-History") {
dependencies(this)
resolvers(this)
macroParadise(this)
}
}
object TerminalSupport extends TerminalSupport
object Terminal extends App with TerminalSupport {
parser.parse(args, DabbleRunConfig()) match {
case Some(conf) => println(s"you got conf: $conf")
case None => println("Could not parse arguments")
}
}
| ssanj/dabble | src/main/scala/net/ssanj/dabble/TerminalSupport.scala | Scala | mit | 5,253 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.perty.recipe
import org.neo4j.cypher.internal.frontend.v2_3.perty.helpers.{LazyVal, StrictVal, TypedVal}
import org.neo4j.cypher.internal.frontend.v2_3.perty.step._
import org.neo4j.cypher.internal.frontend.v2_3.perty.{BreakingDoc, DocLiteral, Doc, DocRecipe}
import scala.reflect.runtime.universe._
// DSL for the easy and well-formed construction of DocRecipes
//
// DocRecipes are flat representations of Docs that are suitable for
// composition without hitting stack-depth problems. In particular,
// recipes can contain abstract content that needs to be replaced
// with actually printable content (PrintableDocSteps instead
// of regular DocSteps).
//
// However, building up DocRecipes is not as straightforward
// as using Docs directly and does not ensure structural well-formedness.
//
// This gap is filled by Pretty. Pretty is a DSL for building up
// a tree of RecipeAppenders. Applying Pretty to a RecipeAppender
// yields a DocRecipe, a flat representation of the tree described
// by the DSL.
//
// Usually Pretty will only be used to render one "layer" (parent node
// without it's children) of pretty printing and thus building a tree of
// RecipeAppenders does not create a stack-depth problem. Multiple layers of
// pretty-printing can then be composed safely by replacing abstract content
// with DocRecipes from other layers. This is exactly what happens
// in DocRecipe.strategyExpander.
//
// Additionally, Pretty contains quite a few helper methods for easing
// the construction of DocRecipes.
//
// To use Pretty, import Pretty._ where needed and call DSL helpers
// from inside a call to Pretty.apply.
//
class Pretty[T : TypeTag] extends LowPriorityPrettyImplicits[T] {
def apply(appender: RecipeAppender[T]): DocRecipe[T] = appender(Seq.empty)
case object nothing extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = append
}
case class doc(doc: Doc) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = AddDoc(doc) +: append
}
case object break extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = AddBreak +: append
}
case class breakBefore(doc: RecipeAppender[T], break: RecipeAppender[T] = break) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) =
doc.test(DocRecipe.IsEmpty)(recipe => quote(recipe))(recipe => break :: doc)(append)
}
val silentBreak: RecipeAppender[T] = breakWith("")
case class breakWith(text: String) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = AddBreak(Some(text)) +: append
}
case object noBreak extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = AddNoBreak +: append
}
case class group(ops: RecipeAppender[T]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = PushGroupFrame +: ops(PopFrame +: append)
}
case class nest(ops: RecipeAppender[T]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = PushNestFrame +: ops(PopFrame +: append)
}
case class nestWith(indent: Int, ops: RecipeAppender[T]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = PushNestFrame(Some(indent)) +: ops(PopFrame +: append)
}
case class page(ops: RecipeAppender[T]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = PushPageFrame +: ops(PopFrame +: append)
}
def quote(recipe: DocRecipe[T]) = RecipeAppender(recipe)
class listAppender(recipes: TraversableOnce[RecipeAppender[T]]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) =
recipes.foldRight(append) {
_.apply(_)
}
}
case object list {
def apply(recipes: TraversableOnce[RecipeAppender[T]]) = new listAppender(recipes)
}
case class breakList(recipes: TraversableOnce[RecipeAppender[T]], break: RecipeAppender[T] = break)
extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = recipes.foldRight(append) {
case (hd, acc) if acc == append => hd(acc)
case (hd, acc) => hd(break(acc))
}
}
case class sepList(recipes: TraversableOnce[RecipeAppender[T]],
sep: RecipeAppender[T] = text(","),
break: RecipeAppender[T] = break)
extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = recipes.foldRight(append) {
case (hd, acc) if acc == append => hd(acc)
case (hd, acc) => hd(sep(break(acc)))
}
}
case class groupedSepList(recipes: TraversableOnce[RecipeAppender[T]],
sep: RecipeAppender[T] = text(","),
break: RecipeAppender[T] = break)
extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = recipes.foldRight(append) {
case (hd: RecipeAppender[T], acc: DocRecipe[T]) =>
if (acc == append)
hd(acc)
else
(group(hd :: sep) :: break)(acc)
}
}
def block(name: RecipeAppender[T], open: RecipeAppender[T] = "(", close: RecipeAppender[T] = ")")(innerDoc: RecipeAppender[T]): RecipeAppender[T] =
group(name :: surrounded(open, close, silentBreak, silentBreak)(innerDoc))
def brackets(innerDoc: RecipeAppender[T], break: RecipeAppender[T] = silentBreak) =
surrounded(open = "[", close = "]", break, break)(innerDoc)
def braces(innerDoc: RecipeAppender[T], break: RecipeAppender[T] = silentBreak) =
surrounded(open = "{", close = "}", break, break)(innerDoc)
def parens(innerDoc: RecipeAppender[T], break: RecipeAppender[T] = silentBreak) =
surrounded(open = "(", close = ")", break, break)(innerDoc)
def comment(innerDoc: RecipeAppender[T], break: RecipeAppender[T] = break) =
surrounded(open = "/*", close = "*/", break, break)(innerDoc)
case class surrounded(open: RecipeAppender[T],
close: RecipeAppender[T],
openBreak: RecipeAppender[T] = break,
closeBreak: RecipeAppender[T] = break)(innerDoc: RecipeAppender[T])
extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = {
val begin: RecipeAppender[T] = open
val middle: RecipeAppender[T] = breakBefore(innerDoc, openBreak)
val end: RecipeAppender[T] = breakBefore(close, closeBreak)
group(begin :: nest(group(middle)) :: end)(append)
}
}
case class section(start: RecipeAppender[T])(inner: RecipeAppender[T], innerBreak: RecipeAppender[T] = break)
extends RecipeAppender[T] {
def apply(append: DocRecipe[T]): DocRecipe[T] = {
val innerDoc = inner(Seq.empty)
if (innerDoc.isEmpty)
innerDoc
else
group(start :: nest(innerBreak :: inner))(append)
}
}
def prettyOption[S <: T : TypeTag](content: Option[S]) = content.map(pretty[S]).getOrElse(nothing)
def prettyEither[L <: T : TypeTag, R <: T : TypeTag](content: Either[L, R]) = content match {
case Left(left) => pretty(left)
case Right(right) => pretty(right)
}
// Abstract "content" that still needs to be rendered into PrintableDocSteps
//
// The actual value is taken as a by name closure to be able to perform error
// handling when dealing with buggy / partially unimplemented classes.
//
class prettyAppender[+S <: T : TypeTag](content: TypedVal[S]) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = new AddPretty(content) +: append
}
def pretty[S <: T : TypeTag](value: S) =
new prettyAppender(StrictVal(value))
def prettyLazy[S <: T : TypeTag](value: => S) =
new prettyAppender(LazyVal(value))
trait PartialConverter {
def unquote: Option[RecipeAppender[T]]
def asPretty: Option[DocRecipe[T]] = unquote.map(apply)
}
trait Converter {
def unquote: RecipeAppender[T]
def asPretty: DocRecipe[T] = apply(unquote)
}
}
protected class LowPriorityPrettyImplicits[T : TypeTag] {
implicit class textAppender(text: String) extends RecipeAppender[T] {
def apply(append: DocRecipe[T]) = AddText(text) +: append
}
case object text {
def apply(value: String) = new textAppender(value)
}
// This allows writing Pretty(...) instead of Some(Pretty(...) when defining DocGens
implicit def liftDocRecipe(opts: DocRecipe[T]): Some[DocRecipe[T]] = Some(opts)
}
object Pretty extends Pretty[Any] {
case class literal(doc: Doc) extends RecipeAppender[Any] {
def apply(append: DocRecipe[Any]) = AddPretty(DocLiteral(doc)) +: append
}
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/perty/recipe/Pretty.scala | Scala | apache-2.0 | 9,206 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.HashMap
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.storage.RDDInfo
/**
* :: DeveloperApi ::
* Stores information about a stage to pass from the scheduler to SparkListeners.
*/
@DeveloperApi
class StageInfo(
val stageId: Int,
val attemptId: Int,
val name: String,
val numTasks: Int,
val rddInfos: Seq[RDDInfo],
val details: String) {
/** When this stage was submitted from the DAGScheduler to a TaskScheduler. */
var submissionTime: Option[Long] = None
/** Time when all tasks in the stage completed or when the stage was cancelled. */
var completionTime: Option[Long] = None
/** If the stage failed, the reason why. */
var failureReason: Option[String] = None
/** Terminal values of accumulables updated during this stage. */
val accumulables = HashMap[Long, AccumulableInfo]()
def stageFailed(reason: String) {
failureReason = Some(reason)
completionTime = Some(System.currentTimeMillis)
}
}
private[spark] object StageInfo {
/**
* Construct a StageInfo from a Stage.
*
* Each Stage is associated with one or many RDDs, with the boundary of a Stage marked by
* shuffle dependencies. Therefore, all ancestor RDDs related to this Stage's RDD through a
* sequence of narrow dependencies should also be associated with this Stage.
*/
def fromStage(stage: Stage, numTasks: Option[Int] = None): StageInfo = {
val ancestorRddInfos = stage.rdd.getNarrowAncestors.map(RDDInfo.fromRdd)
val rddInfos = Seq(RDDInfo.fromRdd(stage.rdd)) ++ ancestorRddInfos
new StageInfo(
stage.id,
stage.attemptId,
stage.name,
numTasks.getOrElse(stage.numTasks),
rddInfos,
stage.details)
}
}
| hengyicai/OnlineAggregationUCAS | core/src/main/scala/org/apache/spark/scheduler/StageInfo.scala | Scala | apache-2.0 | 2,593 |
package org.greencheek.web.filter.memcached
import com.excilys.ebi.gatling.core.Predef._
import com.excilys.ebi.gatling.http.Predef._
import akka.util.duration._
import bootstrap._
/**
* Created by dominictootell on 01/05/2014.
*/
class ExecuteSimpleGetRequestBinaryIT extends Simulation {
val filterInitParams: java.util.Map[String, String] = new java.util.HashMap[String, String](1, 1.0f)
filterInitParams.put(PublishToMemcachedFilter.MEMCACHED_USE_BINARY,"true")
val server = new TomcatServerIT("/filter");
server.setupServlet3Filter(System.getProperty("memcached.hosts","localhost:11211"), null, filterInitParams)
var url: String = server.setupServlet("/simple/*", "simple", "org.greencheek.web.filter.memcached.servlets.SayHelloServletIT", false)
server.startTomcat
url = server.replacePort(url)
System.out.println(url)
sys.ShutdownHookThread {
server.shutdownTomcat
}
val httpConf = httpConfig
.baseURL(url)
.acceptHeader("text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
.acceptCharsetHeader("ISO-8859-1,utf-8;q=0.7,*;q=0.3")
.acceptLanguageHeader("en-US,en;q=0.8,fr;q=0.6")
.acceptEncodingHeader("gzip,deflate,sdch")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11")
val headers_1 = Map(
"Connection" -> "Keep-Alive"
)
val scn = scenario("Scenario Name")
.during(60 seconds) {
exec(http("request_1")
.get("/hey")
.headers(headers_1)
.header("Content-Type","application/json")
.check(status.is(200) )
)
}
setUp(scn.users(150).ramp(100).protocolConfig(httpConf))
}
| tootedom/tomcat-memcached-response-filter | src/integration-test/scala/org/greencheek/web/filter/memcached/ExecuteSimpleGetRequestBinaryIT.scala | Scala | apache-2.0 | 1,689 |
package db.impl.query
import ore.db.DbRef
import ore.models.project.{Project, Version}
import ore.models.user.User
import com.github.tminglei.slickpg.InetString
import doobie._
import doobie.implicits._
object StatTrackerQueries extends WebDoobieOreProtocol {
def addVersionDownload(
projectId: DbRef[Project],
versionId: DbRef[Version],
address: InetString,
cookie: String,
userId: Option[DbRef[User]]
): Update0 =
sql"""|INSERT INTO project_versions_downloads_individual (created_at, project_id, version_id, address, cookie, user_id)
| VALUES (now(), $projectId, $versionId, $address, $cookie, $userId);""".stripMargin.update
def addProjectView(
projectId: DbRef[Project],
address: InetString,
cookie: String,
userId: Option[DbRef[User]]
): Update0 =
sql"""|INSERT INTO project_views_individual (created_at, project_id, address, cookie, user_id)
| VALUES (now(), $projectId, $address, $cookie, $userId);""".stripMargin.update
private def findStatsCookie(table: String, address: InetString, userId: Option[DbRef[User]]) =
sql"""|SELECT cookie
| FROM ${Fragment.const(table)}
| WHERE address = $address
| OR (user_id IS NOT NULL AND user_id = $userId) LIMIT 1;""".stripMargin
def findVersionDownloadCookie(
address: InetString,
userId: Option[DbRef[User]]
): Query0[String] = findStatsCookie("project_versions_downloads_individual", address, userId).query[String]
def findProjectViewCookie(
address: InetString,
userId: Option[DbRef[User]]
): Query0[String] = findStatsCookie("project_views_individual", address, userId).query[String]
private def fillStatsUserIdsFromOthers(table: String): Update0 =
sql"""|UPDATE ${Fragment.const(table)} pvdi
|SET user_id = (SELECT pvdi2.user_id
| FROM ${Fragment.const(table)} pvdi2
| WHERE pvdi2.user_id IS NOT NULL
| AND pvdi2.cookie = pvdi.cookie
| LIMIT 1)
| WHERE pvdi.user_id IS NULL
| AND pvdi.processed = 0;""".stripMargin.update
private def processStatsMain(
individualTable: String,
dayTable: String,
statColumn: String,
withUserId: Boolean,
includeVersionId: Boolean
): Update0 = {
val withUserIdCond = if (withUserId) fr"user_id IS NOT NULL" else fr"user_id IS NULL"
val retColumn = if (withUserId) fr"user_id" else fr"address"
val versionIdColumn: Option[String] => Fragment =
if (includeVersionId) {
case None => fr"version_id,"
case Some(s) => fr"${Fragment.const(s)}.version_id,"
}
else _ => fr""
val conflictColumn = if (includeVersionId) fr"version_id" else fr"project_id"
val statColumnFrag = Fragment.const(statColumn)
sql"""|WITH d AS (
| UPDATE ${Fragment.const(individualTable)} SET processed = processed + 1
| WHERE $withUserIdCond
| RETURNING created_at, project_id, ${versionIdColumn(None)} $retColumn, processed
|)
|INSERT
| INTO ${Fragment.const(dayTable)} AS pvd (day, project_id, ${versionIdColumn(None)} $statColumnFrag)
|SELECT sq.day,
| sq.project_id,
| ${versionIdColumn(Some("sq"))}
| count(DISTINCT sq.$retColumn) FILTER ( WHERE sq.processed <@ ARRAY [1] )
| FROM (SELECT date_trunc('DAY', d.created_at) AS day,
| d.project_id,
| ${versionIdColumn(Some("d"))}
| $retColumn,
| array_agg(d.processed) AS processed
| FROM d
| GROUP BY date_trunc('DAY', d.created_at), d.project_id, ${versionIdColumn(Some("d"))} $retColumn) sq
| GROUP BY sq.day, ${versionIdColumn(Some("sq"))} sq.project_id
|ON CONFLICT (day, $conflictColumn) DO UPDATE SET $statColumnFrag = pvd.$statColumnFrag + excluded.$statColumnFrag""".stripMargin.update
}
private def deleteOldIndividual(individualTable: String) =
sql"""DELETE FROM ${Fragment.const(individualTable)} WHERE processed != 0 AND created_at < now() + '30 days'::INTERVAL""".update
private def processStats(individualTable: String, dayTable: String, statColumn: String, includeVersionId: Boolean) =
Seq(
fillStatsUserIdsFromOthers(individualTable),
processStatsMain(individualTable, dayTable, statColumn, withUserId = true, includeVersionId = includeVersionId),
processStatsMain(individualTable, dayTable, statColumn, withUserId = false, includeVersionId = includeVersionId),
deleteOldIndividual(individualTable)
)
val processVersionDownloads: Seq[Update0] = processStats(
"project_versions_downloads_individual",
"project_versions_downloads",
"downloads",
includeVersionId = true
)
val processProjectViews: Seq[Update0] =
processStats("project_views_individual", "project_views", "views", includeVersionId = false)
}
| SpongePowered/Ore | ore/app/db/impl/query/StatTrackerQueries.scala | Scala | mit | 5,168 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.language.implicitConversions
import org.apache.commons.lang.StringUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.execution.CarbonLateDecodeStrategy
import org.apache.spark.sql.execution.command.{BucketFields, CreateTable, Field}
import org.apache.spark.sql.optimizer.CarbonLateDecodeRule
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.{DecimalType, StructType}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.spark.CarbonOption
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
/**
* Carbon relation provider compliant to data source api.
* Creates carbon relations
*/
class CarbonSource extends CreatableRelationProvider
with SchemaRelationProvider with DataSourceRegister {
override def shortName(): String = "carbondata"
// called by any write operation like INSERT INTO DDL or DataFrame.write API
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
CarbonEnv.init(sqlContext.sparkSession)
// User should not specify path since only one store is supported in carbon currently,
// after we support multi-store, we can remove this limitation
require(!parameters.contains("path"), "'path' should not be specified, " +
"the path to store carbon file is the 'storePath' " +
"specified when creating CarbonContext")
val options = new CarbonOption(parameters)
val storePath = CarbonProperties.getInstance().getProperty(CarbonCommonConstants.STORE_LOCATION)
val tablePath = new Path(storePath + "/" + options.dbName + "/" + options.tableName)
val isExists = tablePath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)
.exists(tablePath)
val (doSave, doAppend) = (mode, isExists) match {
case (SaveMode.ErrorIfExists, true) =>
sys.error(s"ErrorIfExists mode, path $storePath already exists.")
case (SaveMode.Overwrite, true) =>
sqlContext.sparkSession
.sql(s"DROP TABLE IF EXISTS ${ options.dbName }.${ options.tableName }")
(true, false)
case (SaveMode.Overwrite, false) | (SaveMode.ErrorIfExists, false) =>
(true, false)
case (SaveMode.Append, _) =>
(false, true)
case (SaveMode.Ignore, exists) =>
(!exists, false)
}
if (doSave) {
// save data when the save mode is Overwrite.
new CarbonDataFrameWriter(sqlContext, data).saveAsCarbonFile(parameters)
} else if (doAppend) {
new CarbonDataFrameWriter(sqlContext, data).appendToCarbonFile(parameters)
}
createRelation(sqlContext, parameters, data.schema)
}
// called by DDL operation with a USING clause
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
dataSchema: StructType): BaseRelation = {
CarbonEnv.init(sqlContext.sparkSession)
addLateDecodeOptimization(sqlContext.sparkSession)
val path = createTableIfNotExists(sqlContext.sparkSession, parameters, dataSchema)
CarbonDatasourceHadoopRelation(sqlContext.sparkSession, Array(path), parameters,
Option(dataSchema))
}
private def addLateDecodeOptimization(ss: SparkSession): Unit = {
if (ss.sessionState.experimentalMethods.extraStrategies.isEmpty) {
ss.sessionState.experimentalMethods.extraStrategies = Seq(new CarbonLateDecodeStrategy)
ss.sessionState.experimentalMethods.extraOptimizations = Seq(new CarbonLateDecodeRule)
}
}
private def createTableIfNotExists(sparkSession: SparkSession, parameters: Map[String, String],
dataSchema: StructType): String = {
val dbName: String = parameters.getOrElse("dbName", CarbonCommonConstants.DATABASE_DEFAULT_NAME)
val tableName: String = parameters.getOrElse("tableName", "default_table")
if (StringUtils.isBlank(tableName)) {
throw new MalformedCarbonCommandException("The Specified Table Name is Blank")
}
if (tableName.contains(" ")) {
throw new MalformedCarbonCommandException("Table Name Should not have spaces ")
}
val options = new CarbonOption(parameters)
try {
CarbonEnv.get.carbonMetastore.lookupRelation(Option(dbName), tableName)(sparkSession)
CarbonEnv.get.carbonMetastore.storePath + s"/$dbName/$tableName"
} catch {
case ex: NoSuchTableException =>
val fields = dataSchema.map { col =>
val dataType = Option(col.dataType.toString)
// This is to parse complex data types
val f: Field = Field(col.name, dataType, Option(col.name), None, null)
// the data type of the decimal type will be like decimal(10,0)
// so checking the start of the string and taking the precision and scale.
// resetting the data type with decimal
Option(col.dataType).foreach {
case d: DecimalType =>
f.precision = d.precision
f.scale = d.scale
f.dataType = Some("decimal")
case _ => // do nothing
}
f
}
val map = scala.collection.mutable.Map[String, String]()
parameters.foreach { parameter => map.put(parameter._1, parameter._2) }
val bucketFields = if (options.isBucketingEnabled) {
if (options.bucketNumber.toString.contains("-") ||
options.bucketNumber.toString.contains("+") ) {
throw new MalformedCarbonCommandException("INVALID NUMBER OF BUCKETS SPECIFIED" +
options.bucketNumber.toString)
}
else {
Some(BucketFields(options.bucketColumns.split(","), options.bucketNumber))
}
} else {
None
}
val cm = TableCreator.prepareTableModel(false, Option(dbName),
tableName, fields, Nil, bucketFields, map)
CreateTable(cm, false).run(sparkSession)
CarbonEnv.get.carbonMetastore.storePath + s"/$dbName/$tableName"
case ex: Exception =>
throw new Exception("do not have dbname and tablename for carbon table", ex)
}
}
}
| mohammadshahidkhan/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala | Scala | apache-2.0 | 7,242 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations.calculations
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.computations._
class IncomeFromPropertyCalculatorSpec extends WordSpec with Matchers {
"Income from Property calculator" should {
"calculate net income" in new IncomeFromPropertyCalculator {
netIncomeFromProperty(cp507 = CP507(0), cp508 = CP508(0)) shouldBe CP509(0)
}
"calculate negative net income" in new IncomeFromPropertyCalculator {
netIncomeFromProperty(cp507 = CP507(0), cp508 = CP508(123)) shouldBe CP509(-123)
}
"calculate positive net income" in new IncomeFromPropertyCalculator {
netIncomeFromProperty(cp507 = CP507(1000), cp508 = CP508(100)) shouldBe CP509(900)
}
"calculate total income" in new IncomeFromPropertyCalculator {
totalIncomeFromProperty(CP509(0), CP510(Some(0))) shouldBe CP511(0)
}
"calculate negative total income" in new IncomeFromPropertyCalculator {
totalIncomeFromProperty(CP509(-123), CP510(Some(123))) shouldBe CP511(0)
}
"calculate positive total income" in new IncomeFromPropertyCalculator {
totalIncomeFromProperty(CP509(900), CP510(Some(300))) shouldBe CP511(1200)
}
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/calculations/IncomeFromPropertyCalculatorSpec.scala | Scala | apache-2.0 | 1,820 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package logic
import java.util.Properties
import java.util.concurrent.TimeoutException
import akka.NotUsed
import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Source
import cmwell.common.{DeleteAttributesCommand, DeletePathCommand, WriteCommand, _}
import cmwell.domain._
import cmwell.driver.Dao
import cmwell.fts._
import cmwell.irw._
import cmwell.stortill.Strotill.{CasInfo, EsExtendedInfo, ZStoreInfo}
import cmwell.stortill.{Operations, ProxyOperations}
import cmwell.util.concurrent.SingleElementLazyAsyncCache
import cmwell.util.{Box, BoxedFailure, EmptyBox, FullBox}
import cmwell.ws.Settings
import cmwell.ws.qp.Encoder
import cmwell.zcache.ZCache
import cmwell.zstore.ZStore
import com.datastax.driver.core.ConsistencyLevel
import com.typesafe.scalalogging.LazyLogging
import javax.inject._
import k.grid.Grid
import ld.cmw.passiveFieldTypesCacheImpl
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.ByteArrayDeserializer
import org.elasticsearch.action.bulk.BulkResponse
import org.joda.time.{DateTime, DateTimeZone}
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{Failure, Success}
@Singleton
class CRUDServiceFS @Inject()(implicit ec: ExecutionContext, sys: ActorSystem) extends LazyLogging {
import cmwell.ws.Settings._
lazy val passiveFieldTypesCache = new passiveFieldTypesCacheImpl(this, ec, sys)
val level: ConsistencyLevel = ONE
lazy val defaultParallelism = cmwell.util.os.Props.os.getAvailableProcessors
lazy val zStore = ZStore(Dao(irwServiceDaoClusterName, irwServiceDaoKeySpace2, irwServiceDaoHostName, 9042, initCommands = None))
lazy val zCache = new ZCache(zStore)
lazy val irwService = IRWService.newIRW(Dao(irwServiceDaoClusterName, irwServiceDaoKeySpace2, irwServiceDaoHostName, 9042, initCommands = None),
disableReadCache = !Settings.irwReadCacheEnabled)
val ftsService = FTSService(config)
val producerProperties = new Properties
producerProperties.put("bootstrap.servers", kafkaURL)
producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer")
producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer")
producerProperties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG,"lz4")
//With CW there is no kafka writes and no kafka configuration thus the producer is created lazily
lazy val kafkaProducer = new KafkaProducer[Array[Byte], Array[Byte]](producerProperties)
val proxyOps: Operations = ProxyOperations(irwService, ftsService)
val ESMappingsCache =
new SingleElementLazyAsyncCache[Set[String]](Settings.fieldsNamesCacheTimeout.toMillis, Set.empty)(
ftsService.getMappings(withHistory = true)
)
val metaNsCache =
new SingleElementLazyAsyncCache[Set[String]](Settings.fieldsNamesCacheTimeout.toMillis, Set.empty)(
fetchEntireMetaNsAsPredicates
)
private def fetchEntireMetaNsAsPredicates = {
val chunkSize = 512
def fetchFields(offset: Int = 0): Future[Seq[Infoton]] = {
val fieldsFut = search(Some(PathFilter("/meta/ns", descendants = true)),
paginationParams = PaginationParams(offset, chunkSize),
withData = true)
fieldsFut.flatMap{ fields =>
if (fields.length == chunkSize) fetchFields(offset + chunkSize).map(_ ++ fields.infotons)
else Future.successful(fields.infotons)
}
}
fetchFields().map { f =>
val (fieldsInfotons,namespacesInfotons) = f.partition(_.systemFields.path.count('/'.==)>3)
val prefixToUrl = (for {
i <- namespacesInfotons
f <- i.fields
u <- f.get("url")
v <- u.collect{case FString(w,_,_) => w}.headOption
} yield i.systemFields.name -> v).toMap
val fieldsSet = fieldsInfotons
.map { infoton =>
val lastTwoPathParts = infoton.systemFields.path.split('/').reverseIterator.take(2).toArray.reverse
val (prefix,localName) = lastTwoPathParts(0) -> lastTwoPathParts(1)
prefixToUrl.get(prefix).map(_ + localName)
}
.view
.collect {
case Some(x) => x
}.to(Set)
fieldsSet
}
}
def countSearchOpenContexts: Array[(String, Long)] =
ftsService.countSearchOpenContexts()
def getInfotonByPathAsync(path: String): Future[Box[Infoton]] =
irwService.readPathAsync(path, level)
def getInfoton(path: String, offset: Option[Int], length: Option[Int]): Future[Option[ContentPortion]] = {
def listChildrenBoundedTime(path: String,
offset: Option[Int],
length: Option[Int]): Future[Option[FTSSearchResponse]] = {
val fut =
ftsService.listChildren(path, offset.getOrElse(0), length.get)
val dur = cmwell.ws.Settings.esGracfulDegradationTimeout.seconds
cmwell.util.concurrent.timeoutOptionFuture(fut, dur)
}
val infotonFuture = irwService.readPathAsync(path, level)
val reply = if (length.getOrElse(0) > 0) {
val searchResponseFuture = listChildrenBoundedTime(path, offset, length)
// if children requested
for {
infotonBox <- infotonFuture
searchResponseOpt <- searchResponseFuture
} yield {
infotonBox match {
case FullBox(i) => {
searchResponseOpt match {
case None => Some(UnknownNestedContent(i))
case Some(searchResponse) => {
if (searchResponse.infotons.nonEmpty) {
Some(
Everything(
CompoundInfoton(SystemFields(
i.systemFields.path,
i.systemFields.lastModified,
i.systemFields.lastModifiedBy,
i.systemFields.dc,
i.systemFields.indexTime,
"",
"http"),
i.fields,
searchResponse.infotons,
searchResponse.offset,
searchResponse.length,
searchResponse.total)
)
)
} else Some(Everything(i))
}
}
}
case EmptyBox => Option.empty[ContentPortion]
case BoxedFailure(e) =>
logger.error(s"boxed failure for readPathAsync [$path]",e)
Option.empty[ContentPortion]
}
}
} else {
// no children requested, just return infoton from IRW service
infotonFuture.map{
case BoxedFailure(e) =>
logger.error(s"boxed failure for readPathAsync [$path]",e)
None
case box => box.toOption.map(Everything.apply)
}
}
reply
}
def getInfotonHistory(path: String, limit: Int): Future[InfotonHistoryVersions] = {
val (_, uuidVec) = irwService.history(path,limit).sortBy(_._1).unzip
if (uuidVec.isEmpty) Future.successful(InfotonHistoryVersions(Vector.empty[Infoton]))
else
irwService.readUUIDSAsync(uuidVec, level).map(seq => InfotonHistoryVersions(seq.collect { case FullBox(i) => i }))
}
def getRawPathHistory(path: String, limit: Int): Future[Vector[(Long, String)]] =
irwService.historyAsync(path, limit)
def getInfotonHistoryReactive(path: String): Source[Infoton,NotUsed] = {
getRawPathHistoryReactive(path)
.mapAsync(defaultParallelism) {
case (_, uuid) =>
irwService.readUUIDAsync(uuid).andThen {
case Failure(fail) => logger.error(s"uuid [$uuid] could not be fetched from cassandra", fail)
case Success(EmptyBox) => logger.error(s"uuid [$uuid] could not be fetched from cassandra: got EmptyBox")
case Success(BoxedFailure(e)) =>
logger.error(s"uuid [$uuid] could not be fetched from cassandra: got BoxedFailure", e)
}
}
.collect { case FullBox(i) => i }
}
/**
* WARNING!!!
* if used against old IRW, results are unbounded, but NOT(!!!) streamable.
* all the versions are returned as a single in-memory vector, and thus,
* may result in OOM error in the case of heavily updated paths.
*/
def getRawPathHistoryReactive(path: String): Source[(Long,String),NotUsed] =
irwService.historyReactive(path)
def getInfotons(paths: Seq[String]): Future[BagOfInfotons] =
irwService.readPathsAsync(paths, level).map{ infopts =>
BagOfInfotons(infopts.collect{
case FullBox(infoton) => infoton
})
}
def getInfotonsByPathOrUuid(paths: Vector[String] = Vector.empty[String],
uuids: Vector[String] = Vector.empty[String]): Future[BagOfInfotons] = {
val futureInfotonsList: Future[List[Infoton]] = (paths, uuids) match {
case (ps, us) if ps.isEmpty && us.isEmpty => Future.successful(Nil)
case (ps, us) if us.isEmpty => irwService.readPathsAsync(ps, level).map(_.collect { case FullBox(i) => i }.toList)
case (ps, us) if ps.isEmpty => irwService.readUUIDSAsync(us, level).map(_.collect { case FullBox(i) => i }.toList)
case (ps, us) => {
val f1 = irwService.readPathsAsync(ps, level).map(_.collect { case FullBox(i) => i })
val f2 = irwService.readUUIDSAsync(us, level).map(_.collect { case FullBox(i) => i })
Future.sequence(List(f1, f2)).map(_.flatten.distinct)
}
}
futureInfotonsList.map(BagOfInfotons.apply)
}
def getInfotonByUuidAsync(uuid: String): Future[Box[Infoton]] = {
irwService.readUUIDAsync(uuid, level)
}
def getInfotonsByUuidAsync(uuidVec: Seq[String]): Future[Seq[Infoton]] = {
irwService
.readUUIDSAsync(uuidVec, level)
.map(_.collect {
case FullBox(i) => i
})
}
def putInfoton(infoton: Infoton, isPriorityWrite: Boolean = false): Future[Boolean] = {
require(infoton.kind != "DeletedInfoton",
"Writing a DeletedInfoton does not make sense. use proper delete API instead.")
// build a command with infoton
val cmdWrite = WriteCommand(infoton)
// convert the command to Array[Byte] payload
lazy val payload: Array[Byte] = CommandSerializer.encode(cmdWrite)
if (infoton.fields.map(_.map(_._2.size).sum).getOrElse(0) > 10000) {
Future.failed(new IllegalArgumentException("too many fields"))
} else {
val payloadForIndirectLargeInfoton: Future[(Array[Byte],Array[Byte])] = infoton match {
case i @ FileInfoton(_, _, Some(FileContent(Some(data), _, _, _)))
if data.length >= thresholdToUseZStore => {
val fi = i.withoutData
zStore.put(fi.content.flatMap(_.dataPointer).get, data).map { _ =>
val withPossibleEpoch = CommandSerializer.encode(WriteCommand(fi))
if (i.systemFields.lastModified.getMillis == 0L)
withPossibleEpoch -> CommandSerializer.encode(WriteCommand(fi.copy(fi.systemFields.copy(lastModified = new DateTime()))))
else withPossibleEpoch -> withPossibleEpoch
}
}
case i => {
val t =
if (i.systemFields.lastModified.getMillis == 0L)
payload -> CommandSerializer.encode(WriteCommand(i.copyInfoton(i.systemFields.copy(lastModified = new DateTime()))))
else payload -> payload
Future.successful(t)
}
}
payloadForIndirectLargeInfoton.flatMap { payload =>
sendToKafka(infoton.systemFields.path, payload._2, isPriorityWrite).map(_ => true)
}
}
}
def putOverwrites(infotons: Vector[Infoton]): Future[Boolean] = {
val cmds = infotons.map(OverwriteCommand(_))
Future.traverse(cmds)(sendToKafka(_)).map { _ =>
true
}
}
def putInfotons(infotons: Vector[Infoton],
tid: Option[String] = None,
atomicUpdates: Map[String, String] = Map.empty,
isPriorityWrite: Boolean = false) = {
require(
infotons.forall(_.kind != "DeletedInfoton"),
s"Writing a DeletedInfoton does not make sense. use proper delete API instead. malformed paths: ${infotons
.collect {
case DeletedInfoton(systemFields) => systemFields.path
}
.mkString("[", ",", "]")}"
)
def kafkaWritesRes(infos: Vector[Infoton], isPriorityWriteInner: Boolean): Future[Unit] = {
Future
.traverse(infos) {
case infoton if infoton.systemFields.lastModified.getMillis == 0L =>
sendToKafka(
WriteCommand(infoton.copyInfoton(infoton.systemFields.copy(lastModified = DateTime.now(DateTimeZone.UTC))),
validTid(infoton.systemFields.path,tid),
prevUUID = atomicUpdates.get(infoton.systemFields.path)),
isPriorityWriteInner
)
case infoton =>
sendToKafka(WriteCommand(infoton, validTid(infoton.systemFields.path, tid), atomicUpdates.get(infoton.systemFields.path)),
isPriorityWriteInner)
}
.map(_ => ())
}
// writing meta to priority before regular infotons "ensures" (on pe,
// in distributed env it's only an optimization), that when infotons are read,
// the meta data to format & search by already exist.
// So if you read infoton right after an ingest, prefixes will come out properly.
val (meta, regs) = infotons.partition(_.systemFields.path.matches("/meta/n(n|s)/.+"))
val metaWrites = {
if (meta.isEmpty) Future.successful(())
else kafkaWritesRes(meta, isPriorityWriteInner = true)
}
metaWrites
.flatMap { _ =>
if (regs.isEmpty) Future.successful(())
else kafkaWritesRes(regs, isPriorityWrite)
}
.map { _ =>
true
}
}
def deleteInfotons(deletes: List[(String, Option[Map[String, Set[FieldValue]]])],
modifier: String,
tidOpt: Option[String] = None,
atomicUpdates: Map[String, String] = Map.empty,
isPriorityWrite: Boolean = false) = {
val dt = new DateTime()
val commands: List[SingleCommand] = deletes.map {
case (path, Some(fields)) =>
DeleteAttributesCommand(path, fields, dt, modifier, validTid(path, tidOpt), atomicUpdates.get(path))
case (path, None) => DeletePathCommand(path, dt, modifier, validTid(path, tidOpt), atomicUpdates.get(path))
}
Future.traverse(commands)(sendToKafka(_,isPriorityWrite)).map(_ => true)
}
def deleteInfoton(path: String, modifier: String, data: Option[Map[String, Set[FieldValue]]], isPriorityWrite: Boolean = false) = {
val delCommand = data match {
case None => DeletePathCommand(path, new DateTime(), modifier)
case Some(fields) => DeleteAttributesCommand(path, fields, new DateTime(), modifier)
}
val payload = CommandSerializer.encode(delCommand)
sendToKafka(delCommand.path, payload, isPriorityWrite).map(_ => true)
}
/**
* upsert == update & insert
* will delete ALL (!!!) values for a given field!
* to backup values to preserve, you must add it to the inserts vector!
*/
def upsertInfotons(inserts: List[Infoton],
deletes: Map[String, Map[String, Option[Set[FieldValue]]]],
deletesModifier: String,
tid: Option[String] = None,
atomicUpdates: Map[String, String] = Map.empty,
isPriorityWrite: Boolean = false): Future[Boolean] = {
require(
inserts.forall(_.kind != "DeletedInfoton"),
s"Writing a DeletedInfoton does not make sense. use proper delete API instead. malformed paths: ${inserts
.collect {
case DeletedInfoton(systemFields) => systemFields.path
}
.mkString("[", ",", "]")}"
)
//require(!inserts.isEmpty,"if you only have DELETEs, use delete. not upsert!")
require(
inserts.forall(i => deletes.keySet(i.systemFields.path)),
"you can't use upsert for entirely new infotons! split your request into upsertInfotons and putInfotons!\\n" +
s"deletes: ${deletes}\\ninserts: ${inserts}"
)
type FMap = Map[String, Map[String, Option[Set[FieldValue]]]]
val eSet = Set.empty[FieldValue]
val eMap = Map.empty[String, Set[FieldValue]]
if (inserts.isEmpty && deletes.isEmpty) Future.successful(true)
else {
val dt = new DateTime()
val (mixedDeletes, pureDeletes): Tuple2[FMap, FMap] = deletes.partition {
case (k, _) => inserts.exists(_.systemFields.path == k)
}
val dels = pureDeletes.map {
case (path, fieldSet) => {
val m = fieldSet.map {
case (f, None) => f -> eSet
case (f, Some(s)) => f -> s
}
UpdatePathCommand(path, m, eMap, dt, deletesModifier, validTid(path,tid), atomicUpdates.get(path), "http")
}
}.toList
val ups = inserts.map { i =>
{
val del = mixedDeletes(i.systemFields.path).map {
case (f, None) => f -> eSet
case (f, Some(s)) => f -> s
}
val ins = i.fields match {
case Some(fields) => fields
case None => eMap //TODO: should we block this option? regular DELETE could have been used instead...
}
UpdatePathCommand(i.systemFields.path, del, ins, i.systemFields.lastModified, i.systemFields.lastModifiedBy, validTid(i.systemFields.path, tid),
atomicUpdates.get(i.systemFields.path), i.systemFields.protocol)
}
}
val commands:List[SingleCommand] = dels ::: ups
// writing meta to priority before regular infotons "ensures" (on pe,
// in distributed env it's only an optimization), that when infotons are read,
// the meta data to format & search by already exist.
// So if you read infoton right after an ingest, prefixes will come out properly.
val (meta, regs) = commands.partition(_.path.matches("/meta/n(n|s)/.+"))
val metaWrites = {
if (meta.isEmpty) Future.successful(())
else Future.traverse(meta)(sendToKafka(_, isPriorityWrite = true))
}
metaWrites
.flatMap { _ =>
if (regs.isEmpty) Future.successful(())
else
Future.traverse(regs) {
case cmd @ UpdatePathCommand(_, _, _, lastModified, _, _, _, _) if lastModified.getMillis == 0L =>
sendToKafka(cmd.copy(lastModified = DateTime.now(DateTimeZone.UTC)), isPriorityWrite)
case cmd =>
sendToKafka(cmd, isPriorityWrite)
}
}
.map { _ =>
true
}
}
}
def consumeKafka(topic: String, partition: Int, offset: Long, maxLengthOpt: Option[Long]): Source[Array[Byte], Consumer.Control] = {
val byteArrayDeserializer = new ByteArrayDeserializer()
val subscription = Subscriptions.assignmentWithOffset(new TopicPartition(topic, partition) -> offset)
val consumerSettings = ConsumerSettings(Grid.system, byteArrayDeserializer, byteArrayDeserializer)
.withBootstrapServers(kafkaURL)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val source = Consumer.plainSource[Array[Byte], Array[Byte]](consumerSettings, subscription).
map(_.value())
maxLengthOpt.fold{
source
.idleTimeout(5.seconds)
.recoverWithRetries(1, {
//only for the case that the stream ended gracefully complete it. otherwise, pass the exception.
case ex: TimeoutException if ex.getMessage.startsWith("No elements passed in the last") => Source.empty
})
}(source.take)
}
// todo move this logic to InputHandler!
private def validTid(path: String, tid: Option[String]): Option[String] =
tid.fold(Option.empty[String]){ t =>
if(path.matches("/meta/n(n|s)/.+")) None
else tid
}
private def sendToKafka(command: SingleCommand, isPriorityWrite: Boolean = false): Future[Unit] =
sendToKafka(command.path, CommandSerializer.encode(command), isPriorityWrite)
private def sendToKafka(path: String, payload: Array[Byte], isPriorityWrite: Boolean): Future[Unit] = {
val payloadForKafkaFut = if (payload.length > thresholdToUseZStore) {
val key = cmwell.util.string.Hash.md5(payload)
zStore
.put(key, payload, secondsToLive = 7.days.toSeconds.toInt, false)
.map(_ => CommandSerializer.encode(CommandRef(key)))
} else Future.successful(payload)
val topicName = if(isPriorityWrite) s"$persistTopicName.priority" else persistTopicName
payloadForKafkaFut.flatMap { payloadForKafka =>
val pRecord = new ProducerRecord[Array[Byte], Array[Byte]](topicName, path.getBytes("UTF-8"), payloadForKafka)
injectFuture(kafkaProducer.send(pRecord, _)).map { recMD =>
if (isPriorityWrite) {
logger.info(s"sendToKafka priority for path [$path] and record [${recMD.offset()},${recMD.partition()}]")
}
}
}
}
//TODO: add with-deleted to aggregations
def aggregate(pathFilter: Option[PathFilter] = None,
fieldsFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
aggregationFilters: Seq[AggregationFilter],
debugInfo: Boolean = false): Future[AggregationsResponse] = {
ftsService.aggregate(pathFilter,
fieldsFilters,
datesFilter,
paginationParams,
aggregationFilters,
debugInfo = debugInfo)
}
def thinSearch(
pathFilter: Option[PathFilter] = None,
fieldFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
fieldSortParams: SortParam = SortParam.empty,
debugInfo: Boolean = false,
withDeleted: Boolean = false
)(implicit searchTimeout: Option[Duration] = None): Future[SearchThinResults] = {
val searchResultsFuture = {
ftsService.thinSearch(pathFilter,
fieldFilters,
datesFilter,
paginationParams,
fieldSortParams,
withHistory,
withDeleted = withDeleted,
debugInfo = debugInfo,
timeout = searchTimeout)
}
searchResultsFuture.map { ftr =>
SearchThinResults(
ftr.total,
ftr.offset,
ftr.length,
ftr.thinInfotons.view.map { ti =>
SearchThinResult(ti.path, ti.uuid, ti.lastModified, ti.lastModifiedBy ,ti.indexTime, ti.score)
}.to(Vector),
debugInfo = ftr.searchQueryStr
)
}
}
def fullSearch[T](pathFilter: Option[PathFilter] = None,
fieldFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
fieldSortParams: SortParam = SortParam.empty,
debugInfoFlag: Boolean = false,
withDeleted: Boolean = false,
searchTimeout: Option[Duration] = None,
storedFields: Seq[String],
fieldsFromSource: Array[String])(
render: (org.elasticsearch.action.search.SearchResponse, Boolean) => T
)(implicit ec: ExecutionContext): Future[T] = {
ftsService
.fullSearch(
pathFilter,
fieldFilters,
datesFilter,
paginationParams,
withHistory,
fieldSortParams,
withDeleted,
debugInfo = debugInfoFlag,
timeout = searchTimeout,
storedFields = storedFields,
fieldsFromSource = fieldsFromSource
)(render)(ec)
.map(_._2)(ec)
}
// object SearchCacheHelpers {
//
// private val nquadsFormatter = FormatterManager.getFormatter(RdfType(NquadsFlavor))
//
// case class SearchRequest(pathFilter: Option[PathFilter] = None,
// fieldFilters: Option[FieldFilter] = None,
// datesFilter: Option[DatesFilter] = None,
// paginationParams: PaginationParams = DefaultPaginationParams,
// withHistory: Boolean = false,
// withData: Boolean = false,
// fieldSortParams: SortParam = SortParam.empty,
// debugInfo: Boolean = false,
// withDeleted: Boolean = false) {
// def getDigest = cmwell.util.string.Hash.md5(this.toString)
// }
//
//
// def wSearch(searchRequest: SearchRequest): Future[SearchResults] = {
// search(searchRequest.pathFilter, searchRequest.fieldFilters, searchRequest.datesFilter,
// searchRequest.paginationParams, searchRequest.withHistory, searchRequest.withData, searchRequest.fieldSortParams,
// searchRequest.debugInfo, searchRequest.withDeleted)
// }
//
// def serializer(searchResults: SearchResults): Array[Byte] =
// nquadsFormatter.render(searchResults).getBytes("UTF-8")
//
// def deserializer(payload: Array[Byte]): SearchResults = {
// ???
// }
//
// def searchViaCache() = cmwell.zcache.l1l2[SearchRequest,SearchResults](wSearch(_))(_.getDigest, deserializer, serializer)()(zCache)
// }
def search(pathFilter: Option[PathFilter] = None,
fieldFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
withHistory: Boolean = false,
withData: Boolean = false,
fieldSortParams: SortParam = SortParam.empty,
debugInfo: Boolean = false,
withDeleted: Boolean = false)(implicit searchTimeout: Option[Duration] = None): Future[SearchResults] = {
val searchResultsFuture = {
ftsService.search(pathFilter,
fieldFilters,
datesFilter,
paginationParams,
fieldSortParams,
withHistory,
debugInfo = debugInfo,
timeout = searchTimeout,
withDeleted = withDeleted)
}
def ftsResults2Dates(ftsResults: FTSSearchResponse): (Option[DateTime], Option[DateTime]) = {
if (ftsResults.length > 0) {
val to = ftsResults.infotons.maxBy(_.systemFields.lastModified.getMillis)
val from = ftsResults.infotons.minBy(_.systemFields.lastModified.getMillis)
Some(from.systemFields.lastModified) -> Some(to.systemFields.lastModified)
} else (None, None)
}
val results = withData match {
case true =>
searchResultsFuture.flatMap { ftsResults =>
val (fromDate, toDate) = ftsResults2Dates(ftsResults)
val xs = cmwell.util.concurrent.travector(ftsResults.infotons) { i =>
irwService.readUUIDAsync(i.uuid,level).map(_ -> i.fields)
}
xs
/*
irwService.readUUIDSAsync(ftsResults.infotons.map { i =>
i.uuid
}.toVector, level) */ .map { infotonsSeq =>
if(infotonsSeq.exists(_._1.isEmpty)) {
val esUuidsSet: Set[String] =
ftsResults.infotons.view.map(_.uuid).to(Set)
val casUuidsSet: Set[String] = infotonsSeq.view.collect { case (FullBox(i), _) => i.uuid }.to(Set)
logger.error(
"some uuids retrieved from ES, could not be retrieved from cassandra: " + esUuidsSet
.diff(casUuidsSet)
.mkString("[", ",", "]")
)
}
val infotons = {
if (fieldSortParams eq NullSortParam) infotonsSeq.collect { case (FullBox(i), _) => i } else
infotonsSeq.collect { case (FullBox(i), e) => addExtras(i, e) }
}
SearchResults(fromDate,
toDate,
ftsResults.total,
ftsResults.offset,
ftsResults.length,
infotons,
ftsResults.searchQueryStr)
}
}
case false =>
searchResultsFuture.map { ftsResults =>
val (fromDate, toDate) = ftsResults2Dates(ftsResults)
SearchResults(fromDate,
toDate,
ftsResults.total,
ftsResults.offset,
ftsResults.length,
ftsResults.infotons,
ftsResults.searchQueryStr)
}
}
results
}
//FIXME: extra should not contain same keys as fields (all keys should start with '$'), so another ugly hack...
private def addExtras(infoton: Infoton, extra: Option[Map[String,Set[FieldValue]]]): Infoton = infoton match {
case i: ObjectInfoton =>
new ObjectInfoton(SystemFields(
i.systemFields.path,
i.systemFields.lastModified,
i.systemFields.lastModifiedBy,
i.systemFields.dc,
i.systemFields.indexTime,
"",
i.systemFields.protocol),
i.fields.fold(extra)(f => extra.fold(i.fields)(e => Some(f ++ e)))) {
override def uuid = i.uuid
override def kind = i.kind
}
case i: FileInfoton =>
new FileInfoton(SystemFields(
i.systemFields.path,
i.systemFields.lastModified,
i.systemFields.lastModifiedBy,
i.systemFields.dc,
i.systemFields.indexTime,
"",
i.systemFields.protocol),
i.fields.fold(extra)(f => extra.fold(i.fields)(e => Some(f ++ e))),
i.content) {
override def uuid = i.uuid
override def kind = i.kind
}
case i: LinkInfoton =>
new LinkInfoton(SystemFields(
i.systemFields.path,
i.systemFields.lastModified,
i.systemFields.lastModifiedBy,
i.systemFields.dc,
i.systemFields.indexTime,
"",
i.systemFields.protocol),
i.fields.fold(extra)(f => extra.fold(i.fields)(e => Some(f ++ e))),
i.linkTo,
i.linkType) {
override def uuid = i.uuid
override def kind = i.kind
}
case _ => infoton
}
private def addIndexTime(fromCassandra: Seq[Infoton], fromES: Seq[Infoton]): Seq[Infoton] = {
val m = fromES.collect { case i if i.systemFields.indexTime.isDefined => i.uuid -> i.systemFields.indexTime.get }.toMap
fromCassandra.map {
case i: ObjectInfoton if m.isDefinedAt(i.uuid) && i.systemFields.indexTime.isEmpty => i.copy(i.systemFields.copy(indexTime = m.get(i.uuid)))
case i: FileInfoton if m.isDefinedAt(i.uuid) && i.systemFields.indexTime.isEmpty => i.copy(i.systemFields.copy(indexTime = m.get(i.uuid)))
case i: LinkInfoton if m.isDefinedAt(i.uuid) && i.systemFields.indexTime.isEmpty => i.copy(i.systemFields.copy(indexTime = m.get(i.uuid)))
case i: DeletedInfoton if m.isDefinedAt(i.uuid) && i.systemFields.indexTime.isEmpty => i.copy(i.systemFields.copy(indexTime = m.get(i.uuid)))
case i => i
}
}
def getListOfDC: Future[Seq[String]] = {
ftsService.listChildren("/meta/sys/dc",0,20).map { sr =>
Settings.dataCenter +: sr.infotons.map(_.systemFields.path.drop("/meta/sys/dc/".length))
}
}
def getLastIndexTimeFor(dc: String = Settings.dataCenter,
withHistory: Boolean,
fieldFilters: Option[FieldFilter]): Future[Option[VirtualInfoton]] = {
def mkVirtualInfoton(indexTime: Long): VirtualInfoton = {
val fields = Map("lastIdxT" -> Set[FieldValue](FLong(indexTime)), "dc" -> Set[FieldValue](FString(dc)))
val fieldsWithFilter =
fieldFilters.fold(fields)(ff => fields + ("qp" -> Set[FieldValue](FString(Encoder.encodeFieldFilter(ff)))))
val fieldsWithFilterAndWh = fieldsWithFilter + ("with-history" -> Set[FieldValue](FBoolean(withHistory)))
VirtualInfoton(ObjectInfoton(SystemFields(s"/proc/dc/$dc", new DateTime(DateTimeZone.UTC), "VirtualInfoton", Settings.dataCenter,
None, "", "http"), fieldsWithFilterAndWh))
}
ftsService
.getLastIndexTimeFor(dc, withHistory = withHistory, fieldFilters = fieldFilters)
.map(lOpt => Some(mkVirtualInfoton(lOpt.getOrElse(0L))))
}
def getESFieldsVInfoton: Future[VirtualInfoton] = {
val fields = ESMappingsCache.getAndUpdateIfNeeded.map(toFieldValues)
fields.flatMap { f =>
val predicates = metaNsCache.getAndUpdateIfNeeded.map(toFieldValues)
predicates.map { p =>
VirtualInfoton(ObjectInfoton(SystemFields(s"/proc/fields", new DateTime(DateTimeZone.UTC), "VirtualInfoton", Settings.dataCenter, None, "", "http"),
Map("fields" -> f, "predicates" -> p)))
}
}
}
private def toFieldValues(ss: Set[String]): Set[FieldValue] = ss.map(FString.apply)
def startScroll(pathFilter: Option[PathFilter] = None,
fieldsFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long,
withHistory: Boolean = false,
withDeleted: Boolean = false,
debugInfo: Boolean = false,
withData: Boolean = false): Future[IterationResults] = {
logger.info("Creating iterator in ES (first get-chunk request) with: "+
s"pathFilter=$pathFilter fieldsFilters=$fieldsFilters datesFilter=$datesFilter paginationParams=$paginationParams" +
s"scrollTTL=$scrollTTL withHistory=$withHistory withDeleted=$withDeleted debugInfo=$debugInfo withData=$withData" )
val searchResultFuture = ftsService
.startScroll(pathFilter,
fieldsFilters,
datesFilter,
paginationParams,
scrollTTL,
withHistory,
withDeleted,
debugInfo = debugInfo)
val results = withData match {
case false =>
searchResultFuture.map { ftsResults =>
val response = ftsResults.response
IterationResults(response.scrollId, response.total, Some(response.infotons), debugInfo = ftsResults.searchQueryStr)
}
case true =>
searchResultFuture.flatMap { ftsResults =>
val infotons = enrichInfotonsData(ftsResults.response.infotons)
val response = ftsResults.response
infotons.map{infotonSeq => IterationResults(response.scrollId, response.total, Some(infotonSeq), debugInfo = ftsResults.searchQueryStr)}
}
}
results
}
def startSuperScroll(pathFilter: Option[PathFilter] = None,
fieldFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long,
withHistory: Boolean = false,
withDeleted: Boolean = false): Seq[() => Future[IterationResults]] = {
ftsService
.startSuperScroll(pathFilter, fieldFilters, datesFilter, paginationParams, scrollTTL, withHistory, withDeleted)
.map { fun =>
() =>
fun().map { ftsResults =>
val response = ftsResults.response
IterationResults(response.scrollId, response.total, Some(response.infotons), debugInfo = ftsResults.searchQueryStr)
}
}
}
// def startSuperMultiScroll(pathFilter: Option[PathFilter] = None,
// fieldFilters: Option[FieldFilter] = None,
// datesFilter: Option[DatesFilter] = None,
// paginationParams: PaginationParams = DefaultPaginationParams,
// scrollTTL: Long,
// withHistory: Boolean = false,
// withDeleted: Boolean = false): Seq[Future[IterationResults]] = {
// ftsService.startSuperMultiScroll(pathFilter, fieldFilters, datesFilter, paginationParams, scrollTTL, withHistory, withDeleted).map(_.map { ftsResults =>
// IterationResults(ftsResults.scrollId, ftsResults.total)
// })
// }
def startMultiScroll(pathFilter: Option[PathFilter] = None,
fieldFilters: Option[FieldFilter] = None,
datesFilter: Option[DatesFilter] = None,
paginationParams: PaginationParams = DefaultPaginationParams,
scrollTTL: Long,
withHistory: Boolean = false,
withDeleted: Boolean = false): Seq[Future[IterationResults]] = {
ftsService
.startMultiScroll(pathFilter, fieldFilters, datesFilter, paginationParams, scrollTTL, withHistory, withDeleted)
.map(_.map { ftsResults =>
val response = ftsResults.response
IterationResults(response.scrollId, response.total, Some(response.infotons), debugInfo = ftsResults.searchQueryStr)
})
}
def scroll(scrollId: String, scrollTTL: Long, withData: Boolean, debugInfo:Boolean): Future[IterationResults] = {
logger.debug(s"Getting next chunk request received with: scrollId=$scrollId scrollTTL=$scrollTTL withData=$withData debugInfo=$debugInfo")
val searchResultFuture = ftsService.scroll(scrollId, scrollTTL, debugInfo = debugInfo)
val results = withData match {
case false =>
searchResultFuture.map { ftsResults =>
IterationResults(ftsResults.scrollId, ftsResults.total, Some(ftsResults.infotons), debugInfo = ftsResults.searchQueryStr)
}
case true =>
searchResultFuture.flatMap { ftsResults =>
val infotons = enrichInfotonsData(ftsResults.infotons)
infotons.map{infotonSeq => IterationResults(ftsResults.scrollId, ftsResults.total, Some(infotonSeq), debugInfo = ftsResults.searchQueryStr)}
}
}
results
}
def enrichInfotonsData (infotons: Seq[Infoton]): Future[Seq[Infoton]] = {
irwService
.readUUIDSAsync(infotons.map {
_.uuid
}.toVector, level)
.map { infotonsSeq =>
if(infotonsSeq.exists(_.isEmpty)) {
val esUuidsSet: Set[String] =
infotons.view.map(_.uuid).to(Set)
val casUuidsSet: Set[String] = infotonsSeq.view.collect { case FullBox(i) => i.uuid }.to(Set)
logger.error(
"some uuids retrieved from ES, could not be retrieved from cassandra: " + esUuidsSet
.diff(casUuidsSet)
.mkString("[", ",", "]")
)
}
addIndexTime(infotonsSeq.collect{case FullBox(i) => i}, infotons)
}
}
def verify(path: String, limit: Int): Future[Boolean] = proxyOps.verify(path, limit)
def fix(path: String, limit: Int): Future[(Boolean, String)] = {
logger.debug(s"x-fix invoked for path $path")
proxyOps.fix(path, cmwell.ws.Settings.xFixNumRetries, limit)
}
def rFix(path: String, parallelism: Int = 1): Future[Source[(Boolean, String), NotUsed]] = {
logger.debug(s"x-fix&reactive invoked for path $path")
proxyOps.rFix(path, cmwell.ws.Settings.xFixNumRetries, parallelism)
}
def info(path: String, limit: Int): Future[(CasInfo, EsExtendedInfo, ZStoreInfo)] = proxyOps.info(path, limit)
def getRawCassandra(uuid: String): Future[(String, String)] =
irwService.getRawRow(uuid).map(_ -> "text/csv;charset=UTF-8")
def reactiveRawCassandra(uuid: String): Source[String,NotUsed] = irwService.getReactiveRawRow(uuid,QUORUM)
// assuming not the only version of the infoton!
def purgeUuid(infoton: Infoton): Future[Unit] = {
irwService.purgeHistorical(infoton, isOnlyVersion = false, QUORUM).flatMap { _ =>
ftsService.purge(infoton.uuid).map(_ => ())
}
}
def purgeUuidFromIndex(uuid: String, index: String): Future[Unit] = {
ftsService.purgeByUuidsAndIndexes(Vector(uuid -> index)).map(_ => ()) //TODO also purge from ftsServiceNew
}
def purgePath(path: String, includeLast: Boolean, limit: Int): Future[Unit] = {
import scala.language.postfixOps
val casHistory = irwService.history(path, limit)
val lastOpt = if (casHistory.nonEmpty) Some(casHistory.maxBy(_._1)) else None
// union uuids from es and cas (and keeping indexes, if known):
val allPossibleUuidsFut = ftsService.info(path, DefaultPaginationParams, withHistory = true).map { esInfo =>
val allUuids = casHistory.map(_._2).toSet ++ esInfo.map(_._1).toSet
if (includeLast || lastOpt.isEmpty)
esInfo -> allUuids
else
esInfo.filterNot(_._1 == lastOpt.get._2) -> (allUuids - lastOpt.get._2)
}
cmwell.util.concurrent
.retry(3, 1.seconds) {
allPossibleUuidsFut.flatMap {
case allUuids =>
val (uuidsWithIndexes, justUuids) = allUuids
val purgeJustByUuids = {
if (justUuids.nonEmpty)
ftsService.purgeByUuidsFromAllIndexes(justUuids.toVector)
else
Future.successful(new BulkResponse(Array(), 0))
}
purgeJustByUuids.flatMap { bulkResponse =>
if (bulkResponse.hasFailures) {
throw new Exception(
"purge from es by uuids from all Indexes failed: " + bulkResponse.buildFailureMessage()
)
} else {
if (uuidsWithIndexes.nonEmpty)
ftsService.purgeByUuidsAndIndexes(uuidsWithIndexes.toVector)
else
Future.successful(new BulkResponse(Array(), 0))
}.flatMap { bulkResponse =>
if (bulkResponse.hasFailures) {
throw new Exception(
"purge from es by uuids on specific Indexes failed: " + bulkResponse.buildFailureMessage()
)
} else {
val purgeHistoryFut =
if (includeLast || lastOpt.isEmpty) {
// no need to delete from Paths one by one, will delete entire row when purgeHistorical below will be invoked with isOnlyVersion=true
Future.traverse(casHistory.map(_._2))(irwService.purgeFromInfotonsOnly(_))
} else {
Future.traverse(casHistory.filter(lastOpt.get !=)) { h =>
irwService.purgeHistorical(path,
h._2,
h._1,
isOnlyVersion = false,
level = ConsistencyLevel.QUORUM)
}
}
val purgeHistoryForDanglingInfotonsFut = {
val danglingUuids = justUuids -- casHistory.map(_._2).toSet
Future.traverse(danglingUuids)(irwService.purgeFromInfotonsOnly(_))
}
if (includeLast && lastOpt.isDefined)
purgeHistoryFut
.flatMap(_ => purgeHistoryForDanglingInfotonsFut)
.flatMap(
_ =>
irwService.purgeHistorical(path,
lastOpt.get._2,
lastOpt.get._1,
isOnlyVersion = true,
level = ConsistencyLevel.QUORUM)
)
else purgeHistoryFut.flatMap(_ => purgeHistoryForDanglingInfotonsFut).map(_ => ())
}
}
}
}
}
.map(_ => ())
}
/**
* Rollback an Infoton means purging last version of it, and, if there exists one or more history versions, make the
* one with largest lastModified the current version.
*/
/*
def rollback(path: String, limit: Int): Future[Unit] = {
case class Version(lastModified: Long, uuid: String)
irwService
.historyAsync(path, limit)
.map { casHistory =>
if (casHistory.isEmpty) Future.successful(())
else {
val sortedCasHistory = casHistory.sortBy(_._1).map { case (lm, uuid) => Version(lm, uuid) }
val last = sortedCasHistory.last
val prev = sortedCasHistory.init.lastOption
def purgeLast(isTherePrev: Boolean) = cmwell.util.concurrent.retry(3, 1.seconds) {
irwService
.purgeHistorical(path,
last.uuid,
last.lastModified,
isOnlyVersion = !isTherePrev,
ConsistencyLevel.QUORUM)
.flatMap { _ =>
ftsService.purgeByUuidsFromAllIndexes(Vector(last.uuid))
}
}
def setPrevAsLast(pv: Version) = cmwell.util.concurrent.retry(3, 1.seconds) {
irwService
.setPathLast(path, new java.util.Date(pv.lastModified), pv.uuid, ConsistencyLevel.QUORUM)
.flatMap { _ =>
irwService.readUUIDAsync(pv.uuid).flatMap { infpot =>
val prevInfoton = infpot.getOrElse(
throw new RuntimeException(s"Previous infoton for path $path was not found under uuid ${pv.uuid}")
)
ftsService.purgeByUuidsFromAllIndexes(Vector(pv.uuid), partition = "blahblah").flatMap { _ =>
ftsService.index(prevInfoton, None)
}
}
}
}
purgeLast(prev.isDefined).flatMap { _ =>
prev.map(setPrevAsLast).getOrElse(Future.successful(()))
}
}
}
.map(_ => ())
}
*/
def purgePath2(path: String, limit: Int): Future[Unit] = {
import cmwell.util.concurrent.retry
import scala.language.postfixOps
irwService.historyAsync(path, limit).map { casHistory =>
val uuids = casHistory.map(_._2)
retry(3, 1.seconds) {
val purgeEsByUuids = ftsService.purgeByUuidsFromAllIndexes(uuids)
purgeEsByUuids.flatMap { bulkResponse =>
if (bulkResponse.hasFailures) {
throw new Exception("purge from es by uuids from all Indexes failed: " + bulkResponse.buildFailureMessage())
} else {
val purgeFromInfoton = Future.traverse(uuids)(irwService.purgeFromInfotonsOnly(_))
purgeFromInfoton.flatMap(_ => irwService.purgePathOnly(path))
}
}
}
}
}
//var persistTopicOffset = new AtomicLong()
/**
* Converts Kafka Async call to Scala's Future
*/
private def injectFuture(f: Callback => java.util.concurrent.Future[RecordMetadata],
timeout: Duration = FiniteDuration(9, SECONDS)) = {
val p = Promise[RecordMetadata]()
f(new Callback {
override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
if(exception != null) {
p.failure(exception)
} else {
//persistTopicOffset.set(metadata.offset()) // This is ugly but temporary
// val topic = metadata.topic
// val partition = metadata.partition
// BgStateReporter.report(topic, partition, metadata.offset())
p.success(metadata)
}
}
})
TimeoutFuture.withTimeout(p.future, timeout)
}
}
| dudi3001/CM-Well | server/cmwell-ws/app/logic/CRUDServiceFS.scala | Scala | apache-2.0 | 49,497 |
package models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import models.User
import scala.concurrent.Future
/**
* Give access to the user object.
*/
trait UserDAO {
/**
* Finds a user by its login info.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def find(loginInfo: LoginInfo): Future[Option[User]]
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User, userType: String): Future[User]
/**
* Checks if a user exists.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def userExists(loginInfo: LoginInfo): Boolean
}
| BBK-SDP-2015-jtomli03/Morphidose2 | app/models/daos/UserDAO.scala | Scala | apache-2.0 | 836 |
//
// DedupNestedPar.scala -- Scala benchmark DedupNestedPar
// Project OrcTests
//
// Copyright (c) 2019 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.test.item.scalabenchmarks.dedup
import java.io.{ DataOutputStream, FileInputStream, FileOutputStream }
import java.nio.file.{ Files, Paths }
import java.util.concurrent.{ ArrayBlockingQueue, ConcurrentHashMap }
import scala.annotation.tailrec
import orc.test.item.scalabenchmarks.BenchmarkApplication
import orc.test.item.scalabenchmarks.Util.thread
// FIXME: This is not scaling and it's not clear why. Work just seems to execute pretty sparsely.
// GC overhead is a bit of an issue (7% of the runtime), but it doesn't seem to be affecting scaling in a huge way.
object DedupNestedPar extends BenchmarkApplication[Unit, Unit] {
import Dedup._
// Lines: 33 (2)
def dedup(inFn: String, outFn: String): Unit = {
val dedupMap = new ConcurrentHashMap[ArrayKey, CompressedChunk]()
val compressedChunks = new ArrayBlockingQueue[(CompressedChunk, Int, Int)](2 * 1024)
val in = new FileInputStream(inFn)
val loopThread = thread {
for {
(roughChunk, roughID) <- readSegments(largeChunkMin, in).par
(fineChunk, fineID) <- segment(0, roughChunk).par
} {
compressedChunks.put((compress(fineChunk, dedupMap), roughID, fineID))
}
}
val out = new DataOutputStream(new FileOutputStream(outFn))
val alreadyOutput = new ConcurrentHashMap[ArrayKey, Boolean]()
val outputPool = collection.mutable.HashMap[(Int, Int), CompressedChunk]()
@tailrec
def doOutput(roughID: Int, fineID: Int, id: Int): Unit = {
outputPool.get((roughID, fineID)) match {
case Some(cchunk) if cchunk.uncompressedSize == 0 && fineID == 0 => {
outputPool -= ((roughID, fineID))
}
case Some(cchunk) if cchunk.uncompressedSize == 0 => {
outputPool -= ((roughID, fineID))
doOutput(roughID + 1, 0, id)
}
case Some(cchunk) => {
if (cchunk.outputChunkID < 0)
cchunk.outputChunkID = id
writeChunk(out, cchunk, alreadyOutput.containsKey(cchunk.uncompressedSHA1))
alreadyOutput.put(cchunk.uncompressedSHA1, true)
//print(s"$id: ($roughID, $fineID) $roughChunk (${roughChunk.size}), $fineChunk (${fineChunk.size})\\r")
outputPool -= ((roughID, fineID))
doOutput(roughID, fineID + 1, id + 1)
}
case None => {
val (cchunk, rID, fID) = compressedChunks.take()
outputPool += (rID, fID) -> cchunk
doOutput(roughID, fineID, id)
}
}
}
doOutput(0, 0, 0)
loopThread.join()
in.close()
out.close()
}
def benchmark(ctx: Unit): Unit = {
dedup(DedupData.localInputFile, DedupData.localOutputFile)
}
def setup(): Unit = ()
def check(u: Unit) = DedupData.check()
val name: String = "Dedup-nestedpar"
lazy val size: Int = Files.size(Paths.get(DedupData.localInputFile)).toInt
}
| orc-lang/orc | OrcTests/src/orc/test/item/scalabenchmarks/dedup/DedupNestedPar.scala | Scala | bsd-3-clause | 3,245 |
package com.gjos.scala.swoc
import com.gjos.scala.swoc.protocol._
import com.gjos.scala.swoc.util.{Stopwatch, JsonConverters}
import com.gjos.scala.swoc.protocol.MoveRequest
import com.gjos.scala.swoc.protocol.ProcessedMove
import scala.Some
class Engine(private val bot: Bot, private val ioManager: IOManager) {
private var botColor: Option[Player] = None
def run() {
doInitiateRequest()
var winner = doFirstRound()
while (winner == None) {
winner = doNormalRound()
}
}
private def doInitiateRequest() {
val player = JsonConverters.createInitiateRequest(ioManager.readLine())
botColor = Some(player)
bot.handleInitiate(player)
}
private def doFirstRound(): Option[Player] = botColor match {
case Some(Player.White) =>
handleMoveRequest(singleMoveTurn = true)
handleProcessedMove() orElse handleProcessedMove() orElse handleProcessedMove()
case _ => handleProcessedMove()
}
private def doNormalRound(): Option[Player] = {
def handleMoveAndProcess() = {
handleMoveRequest()
handleProcessedMove()
}
handleMoveAndProcess() orElse handleMoveAndProcess() orElse handleProcessedMove() orElse handleProcessedMove()
}
private def handleMoveRequest(singleMoveTurn: Boolean = false) {
val moveRequest: MoveRequest = JsonConverters.createMoveRequest(ioManager.readLine())
val move = bot.handleMove(moveRequest, singleMoveTurn)
val out = if (move < 0) {
System.err.println("Valar morghulis.")
"If I die, what's the point?"
} else {
Stopwatch().tell("I chose move " + Move.toString(move))
JsonConverters.toJson(move)
}
ioManager.writeLine(out)
}
private def handleProcessedMove(): Option[Player] = {
val processedMove: ProcessedMove = JsonConverters.createProcessedMove(ioManager.readLine())
bot.handleProcessedMove(processedMove)
if (processedMove.winner == 0) None else Some(processedMove.winner)
}
}
| Oduig/swoc2014 | Greedy/src/main/scala/com/gjos/scala/swoc/Engine.scala | Scala | apache-2.0 | 1,964 |
/**
* Created on February 14, 2011
* Copyright (c) 2011, Wei-ju Wu
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Wei-ju Wu nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY WEI-JU WU ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL WEI-JU WU BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.dmpp.adf.logical
import java.util.Date
import org.dmpp.adf.physical._
import org.dmpp.adf.util._
/**
* All known primary and secondary block types in AmigaDOS.
*/
object BlockType {
val PtShort = 2
val PtData = 8
val PtList = 16
val PtDirCache = 33
val StRoot = 1
val StUserDir = 2
val StSoftLink = 3
val StLinkDir = 4
val StFile = -3
val StLinkFile = -4
}
/**
* Root of the block hierarchy.
*/
trait LogicalBlock {
def physicalVolume: PhysicalVolume
/**
* Returns this block's underlying sector.
* @return the underlying sector
*/
def sector: Sector
}
| weiju/adf-tools | adf-core/src/main/scala/org/dmpp/adf/logical/LogicalBlock.scala | Scala | bsd-3-clause | 2,181 |
package org.jetbrains.plugins.scala.editor.selectioner
import java.util
import com.intellij.codeInsight.editorActions.ExtendWordSelectionHandlerBase
import com.intellij.lang.ASTNode
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlockStatement
/**
* Selects a statement together with its trailing semicolon.
*
* @author yole
*/
class ScalaSemicolonSelectioner extends ExtendWordSelectionHandlerBase {
def canSelect(e: PsiElement): Boolean = e.isInstanceOf[ScBlockStatement]
override def select(e: PsiElement, editorText: CharSequence, cursorOffset: Int, editor: Editor): util.ArrayList[TextRange] = {
val treeNext: ASTNode = e.getNode.getTreeNext
val result = new util.ArrayList[TextRange]
if (treeNext != null && treeNext.getElementType == ScalaTokenTypes.tSEMICOLON) {
val r = new TextRange(e.getTextRange.getStartOffset, treeNext.getTextRange.getEndOffset)
result.add(r)
}
result
}
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/editor/selectioner/ScalaSemicolonSelectioner.scala | Scala | apache-2.0 | 1,126 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{DataTypes, TableSchema, Types}
import org.apache.flink.table.planner.runtime.utils.BatchTestBase.row
import org.apache.flink.table.planner.runtime.utils.{StreamingTestBase, TestData, TestingAppendSink}
import org.apache.flink.table.planner.utils.{TestDataTypeTableSource, TestFilterableTableSource, TestInputFormatTableSource, TestNestedProjectableTableSource, TestPartitionableTableSource, TestProjectableTableSource, TestStreamTableSource, TestTableSources}
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import java.lang.{Boolean => JBool, Integer => JInt, Long => JLong}
import scala.collection.mutable
class TableSourceITCase extends StreamingTestBase {
@Test
def testProjectWithoutRowtimeProctime(): Unit = {
val data = Seq(
Row.of(new JInt(1), "Mary", new JLong(10L), new JLong(1)),
Row.of(new JInt(2), "Bob", new JLong(20L), new JLong(2)),
Row.of(new JInt(3), "Mike", new JLong(30L), new JLong(2)),
Row.of(new JInt(4), "Liz", new JLong(40L), new JLong(2001)))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, Types.STRING, Types.LONG, Types.LONG)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "val", "rtime"))
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(false, tableSchema, returnType, data, "rtime", "ptime"))
val result = tEnv.sqlQuery("SELECT name, val, id FROM T").toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"Mary,10,1",
"Bob,20,2",
"Mike,30,3",
"Liz,40,4")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testProjectWithoutProctime(): Unit = {
val data = Seq(
Row.of(new JInt(1), "Mary", new JLong(10L), new JLong(1)),
Row.of(new JInt(2), "Bob", new JLong(20L), new JLong(2)),
Row.of(new JInt(3), "Mike", new JLong(30L), new JLong(2)),
Row.of(new JInt(4), "Liz", new JLong(40L), new JLong(2001)))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(
Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, Types.STRING, Types.LONG, Types.LONG)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "val", "rtime"))
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(false, tableSchema, returnType, data, "rtime", "ptime"))
val result = tEnv.sqlQuery("SELECT rtime, name, id FROM T").toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"1970-01-01T00:00:00.001,Mary,1",
"1970-01-01T00:00:00.002,Bob,2",
"1970-01-01T00:00:00.002,Mike,3",
"1970-01-01T00:00:02.001,Liz,4")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testProjectWithoutRowtime(): Unit = {
val data = Seq(
Row.of(new JInt(1), "Mary", new JLong(10L), new JLong(1)),
Row.of(new JInt(2), "Bob", new JLong(20L), new JLong(2)),
Row.of(new JInt(3), "Mike", new JLong(30L), new JLong(2)),
Row.of(new JInt(4), "Liz", new JLong(40L), new JLong(2001)))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, Types.STRING, Types.LONG, Types.LONG)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "val", "rtime"))
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(false, tableSchema, returnType, data, "rtime", "ptime"))
val sqlQuery = "SELECT name, id FROM T"
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"Mary,1",
"Bob,2",
"Mike,3",
"Liz,4")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
def testProjectOnlyProctime(): Unit = {
val data = Seq(
Row.of(new JInt(1), new JLong(1), new JLong(10L), "Mary"),
Row.of(new JInt(2), new JLong(2L), new JLong(20L), "Bob"),
Row.of(new JInt(3), new JLong(2L), new JLong(30L), "Mike"),
Row.of(new JInt(4), new JLong(2001L), new JLong(30L), "Liz"))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, Types.LONG, Types.LONG, Types.STRING)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "rtime", "val", "name"))
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(false, tableSchema, returnType, data, "rtime", "ptime"))
val sqlQuery = "SELECT COUNT(1) FROM T WHERE ptime > 0"
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq("4")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
def testProjectOnlyRowtime(): Unit = {
val data = Seq(
Row.of(new JInt(1), new JLong(1), new JLong(10L), "Mary"),
Row.of(new JInt(2), new JLong(2L), new JLong(20L), "Bob"),
Row.of(new JInt(3), new JLong(2L), new JLong(30L), "Mike"),
Row.of(new JInt(4), new JLong(2001L), new JLong(30L), "Liz"))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.INT, Types.LONG, Types.LONG, Types.STRING)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "rtime", "val", "name"))
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(false, tableSchema, returnType, data, "rtime", "ptime"))
val result = tEnv.sqlQuery("SELECT rtime FROM T").toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:02.001")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testProjectWithMapping(): Unit = {
val data = Seq(
Row.of(new JLong(1), new JInt(1), "Mary", new JLong(10)),
Row.of(new JLong(2), new JInt(2), "Bob", new JLong(20)),
Row.of(new JLong(2), new JInt(3), "Mike", new JLong(30)),
Row.of(new JLong(2001), new JInt(4), "Liz", new JLong(40)))
val tableSchema = new TableSchema(
Array("id", "rtime", "val", "ptime", "name"),
Array(Types.INT, Types.LOCAL_DATE_TIME, Types.LONG, Types.LOCAL_DATE_TIME, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.LONG, Types.INT, Types.STRING, Types.LONG)
.asInstanceOf[Array[TypeInformation[_]]],
Array("p-rtime", "p-id", "p-name", "p-val"))
val mapping = Map("rtime" -> "p-rtime", "id" -> "p-id", "val" -> "p-val", "name" -> "p-name")
tEnv.registerTableSource(
"T",
new TestProjectableTableSource(
false, tableSchema, returnType, data, "rtime", "ptime", mapping))
val result = tEnv.sqlQuery("SELECT name, rtime, val FROM T").toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"Mary,1970-01-01T00:00:00.001,10",
"Bob,1970-01-01T00:00:00.002,20",
"Mike,1970-01-01T00:00:00.002,30",
"Liz,1970-01-01T00:00:02.001,40")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testNestedProject(): Unit = {
val data = Seq(
Row.of(new JLong(1),
Row.of(
Row.of("Sarah", new JInt(100)),
Row.of(new JInt(1000), new JBool(true))
),
Row.of("Peter", new JInt(10000)),
"Mary"),
Row.of(new JLong(2),
Row.of(
Row.of("Rob", new JInt(200)),
Row.of(new JInt(2000), new JBool(false))
),
Row.of("Lucy", new JInt(20000)),
"Bob"),
Row.of(new JLong(3),
Row.of(
Row.of("Mike", new JInt(300)),
Row.of(new JInt(3000), new JBool(true))
),
Row.of("Betty", new JInt(30000)),
"Liz"))
val nested1 = new RowTypeInfo(
Array(Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("name", "value")
)
val nested2 = new RowTypeInfo(
Array(Types.INT, Types.BOOLEAN).asInstanceOf[Array[TypeInformation[_]]],
Array("num", "flag")
)
val deepNested = new RowTypeInfo(
Array(nested1, nested2).asInstanceOf[Array[TypeInformation[_]]],
Array("nested1", "nested2")
)
val tableSchema = new TableSchema(
Array("id", "deepNested", "nested", "name"),
Array(Types.LONG, deepNested, nested1, Types.STRING))
val returnType = new RowTypeInfo(
Array(Types.LONG, deepNested, nested1, Types.STRING).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "deepNested", "nested", "name"))
tEnv.registerTableSource(
"T",
new TestNestedProjectableTableSource(false, tableSchema, returnType, data))
val sqlQuery =
"""
|SELECT id,
| deepNested.nested1.name AS nestedName,
| nested.`value` AS nestedValue,
| deepNested.nested2.flag AS nestedFlag,
| deepNested.nested2.num AS nestedNum
|FROM T
""".stripMargin
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq(
"1,Sarah,10000,true,1000",
"2,Rob,20000,false,2000",
"3,Mike,30000,true,3000")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableSourceWithFilterable(): Unit = {
tEnv.registerTableSource("MyTable", TestFilterableTableSource(false))
val sqlQuery = "SELECT id, name FROM MyTable WHERE amount > 4 AND price < 9"
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq("5,Record_5", "6,Record_6", "7,Record_7", "8,Record_8")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testTableSourceWithPartitionable(): Unit = {
tEnv.registerTableSource("PartitionableTable", new TestPartitionableTableSource(true))
val sqlQuery = "SELECT * FROM PartitionableTable WHERE part2 > 1 and id > 2 AND part1 = 'A'"
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = Seq("3,John,A,2", "4,nosharp,A,2")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testCsvTableSource(): Unit = {
val csvTable = TestTableSources.getPersonCsvTableSource
tEnv.registerTableSource("persons", csvTable)
val sink = new TestingAppendSink()
tEnv.sqlQuery(
"SELECT id, `first`, `last`, score FROM persons WHERE id < 4 ")
.toAppendStream[Row]
.addSink(sink)
env.execute()
val expected = mutable.MutableList(
"1,Mike,Smith,12.3",
"2,Bob,Taylor,45.6",
"3,Sam,Miller,7.89")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testLookupJoinCsvTemporalTable(): Unit = {
val orders = TestTableSources.getOrdersCsvTableSource
val rates = TestTableSources.getRatesCsvTableSource
tEnv.registerTableSource("orders", orders)
tEnv.registerTableSource("rates", rates)
val sql =
"""
|SELECT o.amount, o.currency, r.rate
|FROM (SELECT *, PROCTIME() as proc FROM orders) AS o
|JOIN rates FOR SYSTEM_TIME AS OF o.proc AS r
|ON o.currency = r.currency
""".stripMargin
val sink = new TestingAppendSink()
tEnv.sqlQuery(sql).toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"2,Euro,119",
"1,US Dollar,102",
"50,Yen,1",
"3,Euro,119",
"5,US Dollar,102"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testInputFormatSource(): Unit = {
val tableSchema = TableSchema.builder().fields(
Array("a", "b", "c"),
Array(DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING())).build()
val tableSource = new TestInputFormatTableSource(
tableSchema, tableSchema.toRowType, TestData.smallData3)
tEnv.registerTableSource("MyInputFormatTable", tableSource)
val sink = new TestingAppendSink()
tEnv.sqlQuery("SELECT a, c FROM MyInputFormatTable").toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"1,Hi",
"2,Hello",
"3,Hello world"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
@Test
def testDecimalSource(): Unit = {
val tableSchema = TableSchema.builder().fields(
Array("a", "b", "c", "d"),
Array(
DataTypes.INT(),
DataTypes.DECIMAL(5, 2),
DataTypes.VARCHAR(5),
DataTypes.CHAR(5))).build()
val tableSource = new TestDataTypeTableSource(
tableSchema,
Seq(
row(1, new java.math.BigDecimal(5.1), "1", "1"),
row(2, new java.math.BigDecimal(6.1), "12", "12"),
row(3, new java.math.BigDecimal(7.1), "123", "123")
))
tEnv.registerTableSource("MyInputFormatTable", tableSource)
val sink = new TestingAppendSink()
tEnv.sqlQuery("SELECT a, b, c, d FROM MyInputFormatTable").toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"1,5.10,1,1",
"2,6.10,12,12",
"3,7.10,123,123"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
/**
* StreamTableSource must use type info in DataStream, so it will loose precision.
* Just support default precision decimal.
*/
@Test
def testLegacyDecimalSourceUsingStreamTableSource(): Unit = {
val tableSchema = new TableSchema(
Array("a", "b", "c"),
Array(
Types.INT(),
Types.DECIMAL(),
Types.STRING()
))
val tableSource = new TestStreamTableSource(
tableSchema,
Seq(
row(1, new java.math.BigDecimal(5.1), "1"),
row(2, new java.math.BigDecimal(6.1), "12"),
row(3, new java.math.BigDecimal(7.1), "123")
))
tEnv.registerTableSource("MyInputFormatTable", tableSource)
val sink = new TestingAppendSink()
tEnv.sqlQuery("SELECT a, b, c FROM MyInputFormatTable").toAppendStream[Row].addSink(sink)
env.execute()
val expected = Seq(
"1,5.099999999999999645,1",
"2,6.099999999999999645,12",
"3,7.099999999999999645,123"
)
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/TableSourceITCase.scala | Scala | apache-2.0 | 16,437 |
package org.taylorbrown.randomforest
import scala.util.Random
/**
* Created by taylor on 2/23/14.
*/
class Forest( exs:Seq[Example], nTrees:Int = 500, treeLearner:TreeLearner = new RandomGiniLearner){
val rand = new Random()
val trees = fit(exs)
def sample(seq:Seq[Example]) = for(i <- 0 to seq.length) yield {
seq(rand.nextInt(seq.length-1))
} // generic?
def fit(exs:Seq[Example]) = {
(0 to nTrees).par.map{ t=>
treeLearner.fit(sample(exs))
}
}
def probs(exs:Seq[Example]) = {
for(ex <- exs) yield{
val labels = for(t <- trees)yield{
treeLearner.classify(ex, t)
}
labels.groupBy(l=>l).map{case (k,v ) => k -> v.length}
}
}
def classify(exs:Seq[Example]) = probs(exs).map(lmap => lmap.maxBy(_._2)._1)
}
| taylor-brown/random-forest-scala | src/main/scala/org/taylorbrown/randomforest/Forest.scala | Scala | apache-2.0 | 782 |
/**Copyright 2012 University of Helsinki, Daria Antonova, Herkko Virolainen, Panu Klemola
*
*Licensed under the Apache License, Version 2.0 (the "License");
*you may not use this file except in compliance with the License.
*You may obtain a copy of the License at
*
*http://www.apache.org/licenses/LICENSE-2.0
*
*Unless required by applicable law or agreed to in writing, software
*distributed under the License is distributed on an "AS IS" BASIS,
*WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*See the License for the specific language governing permissions and
*limitations under the License.*/
package test.controllers.elements
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
import anorm.{ NotAssigned, Id }
import java.util.Date
import play.api.libs.json.Json.toJson
import play.api.libs.json._
class ActivitySpec extends Specification {
import models.{ Model, Process, ModelProcess, ProcessElement }
import format.ProcessElementFormat._
def createModel(): Unit = {
Model(NotAssigned, "ModelName1", new Date()).create
Process(NotAssigned, "ProcessName1", new Date()).create
ModelProcess(NotAssigned, 1L, 1L).create
}
"The Activity Controller" should {
"respond in format application/json at path json/activity" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
val Some(result) = routeAndCall(FakeRequest(GET, "/activity"))
status(result) must equalTo(OK)
contentType(result) must beSome("application/json")
charset(result) must beSome("utf-8")
}
}
}
"User should be able to create activities" >> {
"create activites when no activities exist" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
createModel()
routeAndCall(FakeRequest(POST, "/activity").withJsonBody(Json.toJson(ProcessElement(NotAssigned, 1L, 4, "New Activity", 0, 99, 99))))
val Some(result) = routeAndCall(FakeRequest(GET, "/activity/1"))
contentAsString(result) must be equalTo (Json.stringify(Json.toJson(
ProcessElement(Id(1), 1L, 4, "New Activity", 0, 99, 99
))))
}
}
"create activites when one other activity exists" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
createModel()
ProcessElement(NotAssigned, 1L, 4, "Old Activity", 0, 199, 199).create
routeAndCall(FakeRequest(POST, "/activity").withJsonBody(Json.toJson(ProcessElement(NotAssigned, 1L, 4, "New Activity", 0, 99, 99))))
val Some(result) = routeAndCall(FakeRequest(GET, "/activity"))
contentAsString(result) must be equalTo (Json.stringify(Json.toJson(List(
ProcessElement(Id(1), 1L, 4, "Old Activity", 0, 199, 199),
ProcessElement(Id(2), 1L, 4, "New Activity", 0, 99, 99)
))))
}
}
}
"User should be able to list activities" >> {
"return an empty list when there is no elements of this type" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
Model(NotAssigned, "Model", new Date()).create()
val Some(result) = routeAndCall(FakeRequest(GET, "/activity"))
contentAsString(result) must be equalTo ("[]")
}
}
"return a list of one element when there is one activity element" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
Model(NotAssigned, "ModelName1", new Date()).create
Process(NotAssigned, "ProcessName1", new Date()).create
ModelProcess(Id(1), 1L, 1L).create
ProcessElement(NotAssigned, 1L, 4, "Activity", 0, 0, 0, 0).create()
val Some(result) = routeAndCall(FakeRequest(GET, "/activity"))
contentAsString(result) must be equalTo (Json.stringify(Json.toJson(List(
ProcessElement(Id(1), 1L, 4, "Activity", 0, 0, 0, 0)))))
}
}
"return a list of three elements when there is three activity elements in two different processes" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
Model(NotAssigned, "ModelName1", new Date()).create
Process(NotAssigned, "ProcessName1", new Date()).create
ModelProcess(Id(1), 1L, 1L).create
ProcessElement(NotAssigned, 1L, 4, "Activity1", 0, 0, 0).create()
ProcessElement(NotAssigned, 1L, 4, "Activity2", 0, 0, 0).create()
ProcessElement(NotAssigned, 1L, 4, "Activity3", 0, 0, 0).create()
val Some(result) = routeAndCall(FakeRequest(GET, "/activity"))
contentAsString(result) must be equalTo (Json.stringify(Json.toJson(List(
ProcessElement(Id(1), 1L, 4, "Activity1", 0, 0, 0),
ProcessElement(Id(2), 1L, 4, "Activity2", 0, 0, 0),
ProcessElement(Id(3), 1L, 4, "Activity3", 0, 0, 0)
))))
}
}
}
"User should be able to delete activities" >> {
"delete the only activity of a model" in {
running(FakeApplication(additionalConfiguration = inMemoryDatabase())) {
createModel();
ProcessElement(NotAssigned, 1L, 4, "Activity", 0, 99, 99).create
val Some(before) = routeAndCall(FakeRequest(GET, "/activity/1"))
contentAsString(before) must be equalTo (Json.stringify(Json.toJson(ProcessElement(Id(1), 1L, 4, "Activity", 0, 99, 99))))
routeAndCall(FakeRequest(DELETE, "/activity/1"))
val Some(result) = routeAndCall(FakeRequest(GET, "/activity/1"))
contentAsString(result) must be equalTo "null"
}
}
}
} | Herkko/ElasticWorkflow | test/controllers/elements/ActivitySpec.scala | Scala | apache-2.0 | 5,674 |
package com.sksamuel.elastic4s.mappings
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory}
case class DynamicTemplateDefinition(name: String,
mapping: TypedFieldDefinition,
_match: Option[String] = None,
_unmatch: Option[String] = None,
_path_match: Option[String] = None,
_path_unmatch: Option[String] = None,
_match_pattern: Option[String] = None,
_match_mapping_type: Option[String] = None) {
def build: XContentBuilder = build(XContentFactory.jsonBuilder())
def build(builder: XContentBuilder): XContentBuilder = {
builder.startObject()
builder.startObject(name)
_match.foreach(builder.field("match", _))
_unmatch.foreach(builder.field("unmatch", _))
_path_match.foreach(builder.field("path_match", _))
_path_unmatch.foreach(builder.field("path_unmatch", _))
_match_pattern.foreach(builder.field("match_pattern", _))
_match_mapping_type.foreach(builder.field("match_mapping_type", _))
builder.startObject("mapping")
mapping.build(builder, false)
builder.endObject()
builder.endObject()
builder.endObject()
builder
}
def `match`(m: String) = matching(m)
def matching(m: String) = copy(_match = Option(m))
def matchPattern(m: String) = copy(_match_pattern = Option(m))
def unmatch(m: String) = copy(_unmatch = Option(m))
def pathMatch(path: String) = copy(_path_match = Option(path))
def pathUnmatch(path: String) = copy(_path_unmatch = Option(path))
def matchMappingType(`type`: String) = copy(_match_mapping_type = Option(`type`))
}
| sjoerdmulder/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/mappings/DynamicTemplateDefinition.scala | Scala | apache-2.0 | 1,808 |
package com.airbnb.scheduler.mesos
import com.airbnb.scheduler.config.SchedulerConfiguration
import org.apache.mesos.{Scheduler, MesosSchedulerDriver}
import org.apache.mesos.Protos.FrameworkInfo
import org.apache.mesos.Protos.Status
import java.util.logging.Logger
/**
* The mesos driver doesn't allow calling the start() method after stop() has been called, thus we need a factory to
* create a new driver once we call stop() - which will be called if the leader abdicates or is no longer a leader.
* @author Florian Leibert (flo@leibert.de)
*/
class MesosDriverFactory(val mesosScheduler: Scheduler, val frameworkInfo: FrameworkInfo, val config: SchedulerConfiguration) {
private[this] val log = Logger.getLogger(getClass.getName)
var mesosDriver: Option[MesosSchedulerDriver] = None
def makeDriver() {
mesosDriver = Some(new MesosSchedulerDriver( mesosScheduler, frameworkInfo, config.master()))
}
def get(): MesosSchedulerDriver = {
if (mesosDriver.isEmpty) {
makeDriver()
}
mesosDriver.get
}
def start() {
val status = get().start()
if (status != Status.DRIVER_RUNNING) {
log.severe(s"MesosSchedulerDriver start resulted in status:$status. Committing suicide!")
System.exit(1)
}
}
def close() {
assert(mesosDriver.nonEmpty, "Attempted to close a non initialized driver")
if (mesosDriver.isEmpty) {
System.exit(1)
}
mesosDriver.get.stop(true)
mesosDriver = None
}
}
| doronin/chronos | src/main/scala/com/airbnb/scheduler/mesos/MesosDriverFactory.scala | Scala | apache-2.0 | 1,476 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
import com.intellij.psi.{PsiElement, PsiWhiteSpace}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeBoundsOwner
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api.{Any, Nothing}
import org.jetbrains.plugins.scala.lang.psi.types.result.{TypeResult, TypingContext}
trait ScTypeBoundsOwnerImpl extends ScTypeBoundsOwner {
//todo[CYCLIC]
def lowerBound: TypeResult[ScType] = wrapWith(lowerTypeElement, Nothing) flatMap ( _.getType(TypingContext.empty) )
def upperBound: TypeResult[ScType] = wrapWith(upperTypeElement, Any) flatMap ( _.getType(TypingContext.empty) )
override def viewBound: Seq[ScType] = viewTypeElement.flatMap(_.getType(TypingContext.empty).toOption)
override def contextBound: Seq[ScType] = contextBoundTypeElement.flatMap(_.getType(TypingContext.empty).toOption)
override def upperTypeElement: Option[ScTypeElement] = {
val tUpper = findLastChildByType[PsiElement](ScalaTokenTypes.tUPPER_BOUND)
if (tUpper != null) {
ScalaPsiUtil.getNextSiblingOfType(tUpper, classOf[ScTypeElement]) match {
case null => None
case te => Some(te)
}
} else None
}
override def lowerTypeElement: Option[ScTypeElement] = {
val tLower = findLastChildByType[PsiElement](ScalaTokenTypes.tLOWER_BOUND)
if (tLower != null) {
ScalaPsiUtil.getNextSiblingOfType(tLower, classOf[ScTypeElement]) match {
case null => None
case te => Some(te)
}
} else None
}
override def viewTypeElement: Seq[ScTypeElement] = {
for {
v <- findChildrenByType(ScalaTokenTypes.tVIEW)
e = ScalaPsiUtil.getNextSiblingOfType(v, classOf[ScTypeElement])
t <- Option(e)
} yield t
}
override def contextBoundTypeElement: Seq[ScTypeElement] = {
for {
v <- findChildrenByType(ScalaTokenTypes.tCOLON)
t <- Option(ScalaPsiUtil.getNextSiblingOfType(v, classOf[ScTypeElement]))
} yield t
}
override def removeImplicitBounds() {
var node = getNode.getFirstChildNode
while (node != null && !Set(ScalaTokenTypes.tCOLON, ScalaTokenTypes.tVIEW)(node.getElementType)) {
node = node.getTreeNext
}
if (node == null) return
node.getPsi.getPrevSibling match {
case ws: PsiWhiteSpace => ws.delete()
case _ =>
}
node.getTreeParent.removeRange(node, null)
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/ScTypeBoundsOwnerImpl.scala | Scala | apache-2.0 | 2,608 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.brokenj.errors
import ch.usi.inf.l3.sana
import sana.tiny.errors.ErrorCode
import sana.primj
trait ErrorCodes extends primj.errors.ErrorCodes {
case object DOUBLE_LABEL_DEF extends ErrorCode {
val message: String = "Label is already defined"
}
case object NO_LABEL_DEF extends ErrorCode {
val message: String = "Label not found"
}
case object BAD_CONTINUE_STMT extends ErrorCode {
val message: String = "Continue can only appear in iterative statements"
}
case object BAD_BREAK_STMT extends ErrorCode {
val message: String = "Break can only appear in breakable statements"
}
case object NOT_DISTINCT_GUARD extends ErrorCode {
val message: String = "Case guard is not distinct"
}
case object CASE_GUARD_NOT_CONSTANT_EXPRESSION extends ErrorCode {
val message: String = "Case guard is not constant expression"
}
}
object ErrorCodes extends ErrorCodes
| amanjpro/languages-a-la-carte | brokenj/src/main/scala/errors/errorcodes.scala | Scala | bsd-3-clause | 2,505 |
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.supervised.nnet
import scala.util.Random
import org.scalaml.core.Types.ScalaMl.DblVector
import org.scalaml.core.Design.Model
import org.scalaml.util.FormatUtils
import scala.collection.mutable.ListBuffer
import MLP._
/**
* <p>:Class that defines a MLP layer. A MLP layer is built using the
* input vector and add an extra element (or neuron) to account for the intercept
* weight w0. The MLP layer is fully defined by its rank in the Neuron Network with
* input layer having id = 0 and the output layer having id = number of layers -1.</p>
* @constructor Create a layer for a multi-layer perceptron.
* @throws IllegalArgumentException if the class parameters are incorrect
* @param id Identifier or rank of the MLP layer in the network.
* @param len Number of elements or neuron in the MLP layer.
*
* @author Patrick Nicolas
* @since May 6, 2014
* @note Scala for Machine Learning Chapter 9 Artificial Neural Network / Multilayer perceptron
* / Model definition
*/
final protected class MLPLayer(val id: Int, val len: Int) {
import MLPLayer._
check(id, len)
/**
* Values of the output vector (Output layer). It is used in
* forward propagation.
*/
val output = new DblVector(len)
/**
* Difference for the propagated error on the source or upstream
* layer
*/
val delta = new DblVector(len) // used for back propagation
output.update(0, 1.0)
/**
* <p>Initialize the value of the input for this MLP layer.</p>
* @param _input input vector for this layer.
* @throws IllegalArgumentException if the input vector is undefined
*/
def set(_x: DblVector): Unit = {
require( !_x.isEmpty,
s"MLPLayer.set Cannot initialize this MLP layer $id with undefined data")
_x.copyToArray(output, 1)
}
/**
* <p>Compute the sum of squared error of the neurons/elements of this MLP layer.
* The SSE value is divided by 2 in the normalized C-formulation.</p>
* @param labels target output value
* @return sum of squared of errors/2
* @throws IllegalArgumentException if the size of the output vector is not equals to the
* size of the input vector + 1
*/
final def sse(labels: DblVector): Double = {
require( !labels.isEmpty,
"MLPLayer.sse Cannot compute the sum of squared errors with undefined labels")
require(output.size == labels.size+1,
s"MLPLayer.sse The size of the output ${output.size} != to size of target ${labels.size+1}")
// Create a indexed vector of the output minus the first
// element (bias element +1). Then compute the sum of squared
// errors
var _sse = 0.0
output.drop(1).zipWithIndex.foreach(on => {
val err = labels(on._2) - on._1
delta.update(on._2+1, on._1* (1.0- on._1)*err)
_sse += err*err
})
_sse*0.5 // Note that the normalized version of sse is divided by 2
}
/**
* <p>Test if this neural network layer is the output layer (last layer in the network).</p>
* @param lastId id of the output layer in this neural network
* @return true if this layer is the output layer, false, otherwise
*/
@inline
final def isOutput(lastId: Int): Boolean = id == lastId
/**
* Textual and formatted description of a layer in the Multi-layer perceptron
*/
override def toString: String = {
val buf = new StringBuilder
buf.append(s"\\nLayer: $id output: ")
output.foreach(x => buf.append(s"${FormatUtils.format(x,"", FormatUtils.ShortFormat)}"))
buf.toString.substring(0, buf.length-1)
}
}
/**
* Companion object for the MLP layer used to define a default constructor
* and validate its input parameters
* @author Patrick Nicolas
* @note Scala for Machine Learning Chapter 9 Artificial Neural Network / Multilayer perceptron
* / Model definition
*/
object MLPLayer {
/**
* Default constructor for MLPLayer
* @param id Identifier or rank of the MLP layer in the network.
* @param len Number of elements or neuron in the MLP layer.
*/
def apply(id: Int, len: Int): MLPLayer = new MLPLayer(id, len)
private def check(id: Int, len: Int): Unit = {
require(id >= 0, s"MLPLayer Create a MLP layer with incorrect id: $id")
require(len > 0, s"MLPLayer Create a MLP layer with incorrect length $len")
}
}
// ------------------------------------- EOF ------------------------------------------------ | batermj/algorithm-challenger | books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/supervised/nnet/MLPLayer.scala | Scala | apache-2.0 | 4,952 |
package cmwell.analytics.data
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DataTypes
import scala.collection.immutable.ListMap
// TODO: This needs to be reworked to use the separate ES downloader
object InfotonAndIndexWithSystemFields extends EstimateDatasetSize {
// We expect the join to be 1:1, so simply adding the two estimates makes sense.
override def estimateDatasetSize(implicit spark: SparkSession): Long =
InfotonWithSystemFields.estimateDatasetSize + IndexWithSystemFields.estimateDatasetSize
// We have the same uuid from both tables. Should be a no-op since that is the join key.
def isUuidConsistent(dataset: DataFrame): Column =
dataset("infoton_uuid") === dataset("index_uuid")
// Both indexTime fields are Longs, but the representation from infoton is as a string
def isIndexTimeConsistent(dataset: DataFrame): Column =
dataset("infoton_indexTime").cast(DataTypes.LongType) === dataset("index_indexTime")
// The Infoton type uses a different coding scheme between tables, so we need to convert before comparing.
def isTypeConsistent(dataset: DataFrame): Column =
dataset("infoton_type") === lower(substring(dataset("index_kind"), 0, 1))
// In the infoton table, lastModified is an ISO 8601 timestamp with millisecond precision.
// In the index, lastModified is in the form: "Wed Jan 17 16:53:04 EST 2018" (and millisecond precision is lost).
def isLastModifiedConsistent(dataset: DataFrame): Column =
Constraints.areTimestampsConsistent(
iso8601Column = dataset("infoton_lastModified"),
millisColumn = dataset("index_lastModified"))
def isPathConsistent(dataset: DataFrame): Column =
dataset("infoton_path") === dataset("index_path")
def isDcConsistent(dataset: DataFrame): Column =
dataset("infoton_dc") === dataset("index_dc")
def isWellFormed(dataset: DataFrame): Column =
InfotonWithSystemFields.isWellFormed(dataset, "infoton_") &&
IndexWithSystemFields.isWellFormed(dataset, "index_")
// This only makes sense to be applied with well-formedness constraints, or on data that is known to be well-formed.
def isConsistent(dataset: DataFrame): Column =
InfotonWithSystemFields.isConsistent(dataset, "infoton_") && IndexWithSystemFields.isConsistent(dataset, "index_") &&
isUuidConsistent(dataset) &&
isIndexTimeConsistent(dataset) &&
isTypeConsistent(dataset) &&
isLastModifiedConsistent(dataset) &&
isPathConsistent(dataset) &&
isDcConsistent(dataset)
def constraints(dataset: DataFrame): ListMap[String, Column] = {
val joinedConstraints = ListMap(
"isWellFormed" -> isWellFormed(dataset),
"isConsistent" -> isConsistent(dataset),
"isUuidConsistent" -> isUuidConsistent(dataset),
"isIndexTimeConsistent" -> isIndexTimeConsistent(dataset),
"isTypeConsistent" -> isTypeConsistent(dataset),
"isLastModifiedConsistent" -> isLastModifiedConsistent(dataset),
"isPathConsistent" -> isPathConsistent(dataset),
"isDcConsistent" -> isDcConsistent(dataset)
)
val infotonConstraints = InfotonWithSystemFields.constraints(dataset, "infoton_")
val indexConstraints = IndexWithSystemFields.constraints(dataset, "index_")
joinedConstraints ++ infotonConstraints ++ indexConstraints
}
/**
* Create a dataset that joins the infoton table (system fields only) with the path table.
*/
def apply(esExtractPath: Option[String] = None)(implicit spark: SparkSession): DataFrame = {
// Assume: We are reading the entire dataset here (need better estimation for subsets).
// To ensure that we maintain stability (i.e, no OOM) and to join efficiently, calculate a partition size
// where the size of data in each partition is approximately the ideal partition size.
val numPartitions = Spark.idealPartitioning(estimateDatasetSize)
// Get the two Datasets and repartition them along the uuid join key so that the target dataset
// will be (approximately) the ideal partition size for Spark.
// This join was originally done using joinWith, but that seems to cause spurious (wrong) shuffles to be added.
// We rename the columns with a prefix for each table to preserve the identity of the columns.
def prefixColumns[T](dataset: Dataset[T], prefix: String): DataFrame =
dataset.select(dataset.columns.map(columnName => dataset(columnName).as(prefix + columnName)): _*)
val infotonDataset = prefixColumns(InfotonWithSystemFields(), "infoton_").as("infoton")
val repartitionedInfotonDataset = infotonDataset.repartition(numPartitions, infotonDataset("infoton_uuid"))
// If a dataset was not provided (e.g., saved to parquet using the extract-index-from-es tool),
// then use the (unreliable) ES Spark connector to get the data.
val indexWithSystemFieldsRaw: Dataset[Row] =
if (esExtractPath.isEmpty)
IndexWithSystemFields().toDF()
else
spark.read.parquet(esExtractPath.get)
val indexDataset = prefixColumns(indexWithSystemFieldsRaw, "index_").as("index")
val repartitionedIndexDataset = indexDataset.repartition(numPartitions, indexDataset("index_uuid"))
// We do a full join so that we can find pairs where one or the other size is missing.
repartitionedInfotonDataset.join(repartitionedIndexDataset,
joinExprs = repartitionedInfotonDataset("infoton_uuid") === repartitionedIndexDataset("index_uuid"),
joinType = "full_outer")
}
}
| bryaakov/CM-Well | tools/dataConsistencyTool/cmwell-spark-analysis/src/main/scala/cmwell/analytics/data/InfotonAndIndexWithSystemFields.scala | Scala | apache-2.0 | 5,539 |
package tyckiting
trait Ai {
def teamName: String
def makeDecisions(
roundId: Int,
events: List[Event],
bots: List[Bot],
config: GameConfig): List[Action]
implicit class BotMethods(bot: Bot) {
def move(x: Int, y: Int) = MoveAction(bot.botId, Position(x, y))
def radar(x: Int, y: Int) = RadarAction(bot.botId, Position(x, y))
def cannon(x: Int, y: Int) = CannonAction(bot.botId, Position(x, y))
}
}
| vvmann/tyckiting-bot | clients/scala/src/main/scala/tyckiting/Ai.scala | Scala | mit | 438 |
package com.github.jefersonm.sandbox.scala.learnscala
trait Similarity {
def isSimilar(x: Any): Boolean
def isNotSimilar(x: Any): Boolean = !isSimilar(x)
}
class Point(xc: Int, yc: Int) extends Similarity {
var x: Int = xc
var y: Int = yc
def isSimilar(obj: Any) =
obj.isInstanceOf[Point] && obj.asInstanceOf[Point].x == x
}
//Extend Application then it doesn't need to implement a main method
object TraitTest extends App {
val p1 = new Point(2,3)
val p2 = new Point(2,4)
val p3 = new Point(3,4)
println(p1.isSimilar(p2))
println(p1.isSimilar(p3))
println(p1.isNotSimilar(2))
} | jefersonm/sandbox | languages/scala/LearnScala/src/com/github/jefersonm/sandbox/scala/learnscala/TraitExample2.scala | Scala | mit | 627 |
package vggames.shared.task.status;
import vggames.shared.task.JudgedTask
case class Ok() extends JudgedTask {
def ok = true
def reason = "Ok!"
}
| vidageek/games | games/game/src/main/scala/vggames/shared/task/status/Ok.scala | Scala | gpl-3.0 | 155 |
package org.jetbrains.plugins.scala.findUsages.function
import com.intellij.psi._
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.search.{PsiSearchHelper, TextOccurenceProcessor, UsageSearchContext}
import com.intellij.util.{Processor, QueryExecutor}
import org.jetbrains.plugins.scala.extensions.inReadAction
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.light.{ScFunctionWrapper, StaticPsiMethodWrapper}
import scala.collection.mutable
/**
* @author Alefas
* @since 28.02.12
*/
class JavaFunctionUsagesSearcher extends QueryExecutor[PsiReference, ReferencesSearch.SearchParameters] {
def execute(queryParameters: ReferencesSearch.SearchParameters, consumer: Processor[_ >: PsiReference]): Boolean = {
val scope = inReadAction(queryParameters.getEffectiveSearchScope)
val element = queryParameters.getElementToSearch
element match {
case scalaOrNonStatic(method, name) =>
val collectedReferences: mutable.HashSet[PsiReference] = new mutable.HashSet[PsiReference]
val processor = new TextOccurenceProcessor {
def execute(element: PsiElement, offsetInElement: Int): Boolean = {
val references = inReadAction(element.getReferences)
for (ref <- references if ref.getRangeInElement.contains(offsetInElement) && !collectedReferences.contains(ref)) {
inReadAction {
ref match {
case refElement: PsiReferenceExpression =>
refElement.resolve match {
case ScFunctionWrapper(delegate) if delegate == method && !consumer.process(refElement) => return false
case t: StaticPsiMethodWrapper if t.getNavigationElement == method && !consumer.process(refElement) => return false
case _ =>
}
case _ =>
}
}
}
true
}
}
val helper: PsiSearchHelper = PsiSearchHelper.getInstance(queryParameters.getProject)
if (name == "") return true
helper.processElementsWithWord(processor, scope, name, UsageSearchContext.IN_CODE, true)
case _ =>
}
true
}
private object scalaOrNonStatic {
def unapply(method: PsiMethod): Option[(PsiMethod, String)] = {
inReadAction {
if (!method.isValid) return None
method match {
case f: ScFunction => Some((f, f.getName))
case m: PsiMethod if !m.hasModifierProperty(PsiModifier.STATIC) => Some((m, m.getName))
case _ => None
}
}
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/findUsages/function/JavaFunctionUsagesSearcher.scala | Scala | apache-2.0 | 2,679 |
package org.ensime.protocol
import java.io._
import scala.actors._
import org.ensime.model._
import org.ensime.util._
import org.ensime.config.{ProjectConfig, DebugConfig, ReplConfig}
import org.ensime.debug.{DebugUnit, DebugSourceLinePairs}
import org.ensime.server._
case class IncomingMessageEvent(obj:Any)
case class OutgoingMessageEvent(obj:Any)
trait Protocol extends ProtocolConversions{
/**
* Read a message from the socket.
*
* @param reader The reader from which to read the message.
* @return The message, in the intermediate format.
*/
def readMessage(reader:Reader):WireFormat
/**
* Write a message to the socket.
*
* @param value The message to write.
* @param writer The writer to which to write the message.
* @return Void
*/
def writeMessage(value:WireFormat, writer:Writer)
/**
* Send a message in wire format to the client. Message
* will be sent to the outputPeer, and then written to the
* output socket.
*
* @param o The message to send.
* @return Void
*/
def sendMessage(o:WireFormat){
peer ! OutgoingMessageEvent(o)
}
/**
* Handle a message from the client. Generally
* messages encode RPC calls, and will be delegated
* to the rpcTarget.
*
* @param msg The message we've received.
* @return Void
*/
def handleIncomingMessage(msg:Any)
/**
* Send a string to the client editor, to be displayed
* to the user. This is to be used for non-critical messaging
* that the user may choose to ignore.
*
* @param msg The message to write.
* @return Void
*/
def sendBackgroundMessage(msg:String)
/**
* Designate an actor that should receive outgoing
* messages.
* TODO: Perhaps a channel would be more efficient?
*
* @param peer The Actor.
* @return Void
*/
def setOutputActor(peer:Actor)
protected def peer:Actor
/**
* Designate the target to which RPC handling
* should be delegated.
*
* @param target The RPCTarget instance.
* @return Void
*/
def setRPCTarget(target:RPCTarget)
/**
* Send a simple RPC Return with a 'true' value.
* Serves to acknowledge the RPC call when no
* other return value is required.
*
* @param callId The id of the RPC call.
* @return Void
*/
def sendRPCAckOK(callId:Int)
/**
* Send an RPC Return with the given value.
*
* @param value The value to return.
* @param callId The id of the RPC call.
* @return Void
*/
def sendRPCReturn(value:WireFormat, callId:Int)
/**
* Notify the client that the RPC call could not
* be handled.
*
* @param value A message describing the error.
* @param callId The id of the failed RPC call.
* @return Void
*/
def sendRPCError(msg:String, callId:Int)
/**
* Notify the client that a message was received
* that does not conform to the protocol.
*
* @param packet The message that failed.
* @param condition A string describing the problem.
* @return Void
*/
def sendProtocolError(packet:String, condition:String)
/**
* Send a structure describing the connection, protocol and
* server. Probably not necessessary in all clients.
*
* @param callId The id of the failed RPC call.
* @return Void
*/
def sendConnectionInfo(callId:Int)
/**
* Send a notification that the interactive compiler is ready
* to process queries. Editor should not allow commands until
* this notification has been received.
*
* @return Void
*/
def sendCompilerReady()
/**
* Send notes describing errors, warnings that the compiler
* generates. These results are generated asynchronously,
* and not in response to any single RPC call.
*
* @param notes The notes
* @return Void
*/
def sendTypeCheckResult(notes:NoteList)
}
trait ProtocolConversions{
def toWF(config:ReplConfig):WireFormat
def toWF(config:DebugConfig):WireFormat
def toWF(unit:DebugUnit):WireFormat
def toWF(value:Boolean):WireFormat
def toWF(value:DebugSourceLinePairs):WireFormat
def toWF(value:Note):WireFormat
def toWF(values:Iterable[WireFormat]):WireFormat
def toWF(value:SymbolInfoLight):WireFormat
def toWF(value:PackageMemberInfoLight):WireFormat
def toWF(value:SymbolInfo):WireFormat
def toWF(value:NamedTypeMemberInfoLight):WireFormat
def toWF(value:NamedTypeMemberInfo):WireFormat
def toWF(value:EntityInfo):WireFormat
def toWF(value:TypeInfo):WireFormat
def toWF(value:PackageInfo):WireFormat
def toWF(value:CallCompletionInfo):WireFormat
def toWF(value:InterfaceInfo):WireFormat
def toWF(value:TypeInspectInfo):WireFormat
def toWF(value:RefactorFailure):WireFormat
def toWF(value:RefactorEffect):WireFormat
def toWF(value:RefactorResult):WireFormat
def toWF(value: Null): WireFormat
}
| bbatsov/ensime | src/main/scala/org/ensime/protocol/Protocol.scala | Scala | gpl-3.0 | 4,870 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.net.{URI, URISyntaxException}
import java.text.{BreakIterator, DecimalFormat, DecimalFormatSymbols}
import java.util.{HashMap, Locale, Map => JMap}
import java.util.regex.Pattern
import scala.collection.mutable.ArrayBuffer
import org.apache.commons.codec.binary.{Base64 => CommonsBase64}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, TypeCheckResult}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData, TypeUtils}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.UTF8StringBuilder
import org.apache.spark.unsafe.types.{ByteArray, UTF8String}
////////////////////////////////////////////////////////////////////////////////////////////////////
// This file defines expressions for string operations.
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* An expression that concatenates multiple input strings or array of strings into a single string,
* using a given separator (the first child).
*
* Returns null if the separator is null. Otherwise, concat_ws skips all null values.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(sep, [str | array(str)]+) - Returns the concatenation of the strings separated by `sep`.",
examples = """
Examples:
> SELECT _FUNC_(' ', 'Spark', 'SQL');
Spark SQL
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class ConcatWs(children: Seq[Expression])
extends Expression with ImplicitCastInputTypes {
require(children.nonEmpty, s"$prettyName requires at least one argument.")
override def prettyName: String = "concat_ws"
/** The 1st child (separator) is str, and rest are either str or array of str. */
override def inputTypes: Seq[AbstractDataType] = {
val arrayOrStr = TypeCollection(ArrayType(StringType), StringType)
StringType +: Seq.fill(children.size - 1)(arrayOrStr)
}
override def dataType: DataType = StringType
override def nullable: Boolean = children.head.nullable
override def foldable: Boolean = children.forall(_.foldable)
override def eval(input: InternalRow): Any = {
val flatInputs = children.flatMap { child =>
child.eval(input) match {
case s: UTF8String => Iterator(s)
case arr: ArrayData => arr.toArray[UTF8String](StringType)
case null => Iterator(null.asInstanceOf[UTF8String])
}
}
UTF8String.concatWs(flatInputs.head, flatInputs.tail : _*)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (children.forall(_.dataType == StringType)) {
// All children are strings. In that case we can construct a fixed size array.
val evals = children.map(_.genCode(ctx))
val separator = evals.head
val strings = evals.tail
val numArgs = strings.length
val args = ctx.freshName("args")
val inputs = strings.zipWithIndex.map { case (eval, index) =>
if (eval.isNull != TrueLiteral) {
s"""
${eval.code}
if (!${eval.isNull}) {
$args[$index] = ${eval.value};
}
"""
} else {
""
}
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = inputs,
funcName = "valueConcatWs",
extraArguments = ("UTF8String[]", args) :: Nil)
ev.copy(code"""
UTF8String[] $args = new UTF8String[$numArgs];
${separator.code}
$codes
UTF8String ${ev.value} = UTF8String.concatWs(${separator.value}, $args);
boolean ${ev.isNull} = ${ev.value} == null;
""")
} else {
val array = ctx.freshName("array")
val varargNum = ctx.freshName("varargNum")
val idxVararg = ctx.freshName("idxInVararg")
val evals = children.map(_.genCode(ctx))
val (varargCount, varargBuild) = children.tail.zip(evals.tail).map { case (child, eval) =>
child.dataType match {
case StringType =>
("", // we count all the StringType arguments num at once below.
if (eval.isNull == TrueLiteral) {
""
} else {
s"$array[$idxVararg ++] = ${eval.isNull} ? (UTF8String) null : ${eval.value};"
})
case _: ArrayType =>
val size = ctx.freshName("n")
if (eval.isNull == TrueLiteral) {
("", "")
} else {
(s"""
if (!${eval.isNull}) {
$varargNum += ${eval.value}.numElements();
}
""",
s"""
if (!${eval.isNull}) {
final int $size = ${eval.value}.numElements();
for (int j = 0; j < $size; j ++) {
$array[$idxVararg ++] = ${CodeGenerator.getValue(eval.value, StringType, "j")};
}
}
""")
}
}
}.unzip
val codes = ctx.splitExpressionsWithCurrentInputs(evals.map(_.code.toString))
val varargCounts = ctx.splitExpressionsWithCurrentInputs(
expressions = varargCount,
funcName = "varargCountsConcatWs",
returnType = "int",
makeSplitFunction = body =>
s"""
|int $varargNum = 0;
|$body
|return $varargNum;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$varargNum += $funcCall;").mkString("\\n"))
val varargBuilds = ctx.splitExpressionsWithCurrentInputs(
expressions = varargBuild,
funcName = "varargBuildsConcatWs",
extraArguments = ("UTF8String []", array) :: ("int", idxVararg) :: Nil,
returnType = "int",
makeSplitFunction = body =>
s"""
|$body
|return $idxVararg;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$idxVararg = $funcCall;").mkString("\\n"))
ev.copy(
code"""
$codes
int $varargNum = ${children.count(_.dataType == StringType) - 1};
int $idxVararg = 0;
$varargCounts
UTF8String[] $array = new UTF8String[$varargNum];
$varargBuilds
UTF8String ${ev.value} = UTF8String.concatWs(${evals.head.value}, $array);
boolean ${ev.isNull} = ${ev.value} == null;
""")
}
}
}
/**
* An expression that returns the `n`-th input in given inputs.
* If all inputs are binary, `elt` returns an output as binary. Otherwise, it returns as string.
* If any input is null, `elt` returns null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(n, input1, input2, ...) - Returns the `n`-th input, e.g., returns `input2` when `n` is 2.",
examples = """
Examples:
> SELECT _FUNC_(1, 'scala', 'java');
scala
""",
since = "2.0.0")
// scalastyle:on line.size.limit
case class Elt(children: Seq[Expression]) extends Expression {
private lazy val indexExpr = children.head
private lazy val inputExprs = children.tail.toArray
/** This expression is always nullable because it returns null if index is out of range. */
override def nullable: Boolean = true
override def dataType: DataType = inputExprs.map(_.dataType).headOption.getOrElse(StringType)
override def checkInputDataTypes(): TypeCheckResult = {
if (children.size < 2) {
TypeCheckResult.TypeCheckFailure("elt function requires at least two arguments")
} else {
val (indexType, inputTypes) = (indexExpr.dataType, inputExprs.map(_.dataType))
if (indexType != IntegerType) {
return TypeCheckResult.TypeCheckFailure(s"first input to function $prettyName should " +
s"have ${IntegerType.catalogString}, but it's ${indexType.catalogString}")
}
if (inputTypes.exists(tpe => !Seq(StringType, BinaryType).contains(tpe))) {
return TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName should have ${StringType.catalogString} or " +
s"${BinaryType.catalogString}, but it's " +
inputTypes.map(_.catalogString).mkString("[", ", ", "]"))
}
TypeUtils.checkForSameTypeInputExpr(inputTypes, s"function $prettyName")
}
}
override def eval(input: InternalRow): Any = {
val indexObj = indexExpr.eval(input)
if (indexObj == null) {
null
} else {
val index = indexObj.asInstanceOf[Int]
if (index <= 0 || index > inputExprs.length) {
null
} else {
inputExprs(index - 1).eval(input)
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val index = indexExpr.genCode(ctx)
val inputs = inputExprs.map(_.genCode(ctx))
val indexVal = ctx.freshName("index")
val indexMatched = ctx.freshName("eltIndexMatched")
val inputVal = ctx.addMutableState(CodeGenerator.javaType(dataType), "inputVal")
val assignInputValue = inputs.zipWithIndex.map { case (eval, index) =>
s"""
|if ($indexVal == ${index + 1}) {
| ${eval.code}
| $inputVal = ${eval.isNull} ? null : ${eval.value};
| $indexMatched = true;
| continue;
|}
""".stripMargin
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = assignInputValue,
funcName = "eltFunc",
extraArguments = ("int", indexVal) :: Nil,
returnType = CodeGenerator.JAVA_BOOLEAN,
makeSplitFunction = body =>
s"""
|${CodeGenerator.JAVA_BOOLEAN} $indexMatched = false;
|do {
| $body
|} while (false);
|return $indexMatched;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$indexMatched = $funcCall;
|if ($indexMatched) {
| continue;
|}
""".stripMargin
}.mkString)
ev.copy(
code"""
|${index.code}
|final int $indexVal = ${index.value};
|${CodeGenerator.JAVA_BOOLEAN} $indexMatched = false;
|$inputVal = null;
|do {
| $codes
|} while (false);
|final ${CodeGenerator.javaType(dataType)} ${ev.value} = $inputVal;
|final boolean ${ev.isNull} = ${ev.value} == null;
""".stripMargin)
}
}
trait String2StringExpression extends ImplicitCastInputTypes {
self: UnaryExpression =>
def convert(v: UTF8String): UTF8String
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(input: Any): Any =
convert(input.asInstanceOf[UTF8String])
}
/**
* A function that converts the characters of a string to uppercase.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns `str` with all characters changed to uppercase.",
examples = """
Examples:
> SELECT _FUNC_('SparkSql');
SPARKSQL
""",
since = "1.0.1")
case class Upper(child: Expression)
extends UnaryExpression with String2StringExpression {
// scalastyle:off caselocale
override def convert(v: UTF8String): UTF8String = v.toUpperCase
// scalastyle:on caselocale
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"($c).toUpperCase()")
}
}
/**
* A function that converts the characters of a string to lowercase.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns `str` with all characters changed to lowercase.",
examples = """
Examples:
> SELECT _FUNC_('SparkSql');
sparksql
""",
since = "1.0.1")
case class Lower(child: Expression) extends UnaryExpression with String2StringExpression {
// scalastyle:off caselocale
override def convert(v: UTF8String): UTF8String = v.toLowerCase
// scalastyle:on caselocale
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"($c).toLowerCase()")
}
}
/** A base trait for functions that compare two strings, returning a boolean. */
abstract class StringPredicate extends BinaryExpression
with Predicate with ImplicitCastInputTypes with NullIntolerant {
def compare(l: UTF8String, r: UTF8String): Boolean
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any =
compare(input1.asInstanceOf[UTF8String], input2.asInstanceOf[UTF8String])
override def toString: String = s"$nodeName($left, $right)"
}
/**
* A function that returns true if the string `left` contains the string `right`.
*/
case class Contains(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.contains(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).contains($c2)")
}
}
/**
* A function that returns true if the string `left` starts with the string `right`.
*/
case class StartsWith(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.startsWith(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).startsWith($c2)")
}
}
/**
* A function that returns true if the string `left` ends with the string `right`.
*/
case class EndsWith(left: Expression, right: Expression) extends StringPredicate {
override def compare(l: UTF8String, r: UTF8String): Boolean = l.endsWith(r)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => s"($c1).endsWith($c2)")
}
}
/**
* Replace all occurrences with string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, search[, replace]) - Replaces all occurrences of `search` with `replace`.",
arguments = """
Arguments:
* str - a string expression
* search - a string expression. If `search` is not found in `str`, `str` is returned unchanged.
* replace - a string expression. If `replace` is not specified or is an empty string, nothing replaces
the string that is removed from `str`.
""",
examples = """
Examples:
> SELECT _FUNC_('ABCabc', 'abc', 'DEF');
ABCDEF
""",
since = "2.3.0")
// scalastyle:on line.size.limit
case class StringReplace(srcExpr: Expression, searchExpr: Expression, replaceExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
def this(srcExpr: Expression, searchExpr: Expression) = {
this(srcExpr, searchExpr, Literal(""))
}
override def nullSafeEval(srcEval: Any, searchEval: Any, replaceEval: Any): Any = {
srcEval.asInstanceOf[UTF8String].replace(
searchEval.asInstanceOf[UTF8String], replaceEval.asInstanceOf[UTF8String])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (src, search, replace) => {
s"""${ev.value} = $src.replace($search, $replace);"""
})
}
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = srcExpr :: searchExpr :: replaceExpr :: Nil
override def prettyName: String = "replace"
}
object Overlay {
def calculate(input: UTF8String, replace: UTF8String, pos: Int, len: Int): UTF8String = {
val builder = new UTF8StringBuilder
builder.append(input.substringSQL(1, pos - 1))
builder.append(replace)
// If you specify length, it must be a positive whole number or zero.
// Otherwise it will be ignored.
// The default value for length is the length of replace.
val length = if (len >= 0) {
len
} else {
replace.numChars
}
builder.append(input.substringSQL(pos + length, Int.MaxValue))
builder.build()
}
def calculate(input: Array[Byte], replace: Array[Byte], pos: Int, len: Int): Array[Byte] = {
// If you specify length, it must be a positive whole number or zero.
// Otherwise it will be ignored.
// The default value for length is the length of replace.
val length = if (len >= 0) {
len
} else {
replace.length
}
ByteArray.concat(ByteArray.subStringSQL(input, 1, pos - 1),
replace, ByteArray.subStringSQL(input, pos + length, Int.MaxValue))
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(input, replace, pos[, len]) - Replace `input` with `replace` that starts at `pos` and is of length `len`.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL' PLACING '_' FROM 6);
Spark_SQL
> SELECT _FUNC_('Spark SQL' PLACING 'CORE' FROM 7);
Spark CORE
> SELECT _FUNC_('Spark SQL' PLACING 'ANSI ' FROM 7 FOR 0);
Spark ANSI SQL
> SELECT _FUNC_('Spark SQL' PLACING 'tructured' FROM 2 FOR 4);
Structured SQL
> SELECT _FUNC_(encode('Spark SQL', 'utf-8') PLACING encode('_', 'utf-8') FROM 6);
Spark_SQL
> SELECT _FUNC_(encode('Spark SQL', 'utf-8') PLACING encode('CORE', 'utf-8') FROM 7);
Spark CORE
> SELECT _FUNC_(encode('Spark SQL', 'utf-8') PLACING encode('ANSI ', 'utf-8') FROM 7 FOR 0);
Spark ANSI SQL
> SELECT _FUNC_(encode('Spark SQL', 'utf-8') PLACING encode('tructured', 'utf-8') FROM 2 FOR 4);
Structured SQL
""")
// scalastyle:on line.size.limit
case class Overlay(input: Expression, replace: Expression, pos: Expression, len: Expression)
extends QuaternaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(str: Expression, replace: Expression, pos: Expression) = {
this(str, replace, pos, Literal.create(-1, IntegerType))
}
override def dataType: DataType = input.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType),
TypeCollection(StringType, BinaryType), IntegerType, IntegerType)
override def children: Seq[Expression] = input :: replace :: pos :: len :: Nil
override def checkInputDataTypes(): TypeCheckResult = {
val inputTypeCheck = super.checkInputDataTypes()
if (inputTypeCheck.isSuccess) {
TypeUtils.checkForSameTypeInputExpr(
input.dataType :: replace.dataType :: Nil, s"function $prettyName")
} else {
inputTypeCheck
}
}
private lazy val replaceFunc = input.dataType match {
case StringType =>
(inputEval: Any, replaceEval: Any, posEval: Int, lenEval: Int) => {
Overlay.calculate(
inputEval.asInstanceOf[UTF8String],
replaceEval.asInstanceOf[UTF8String],
posEval, lenEval)
}
case BinaryType =>
(inputEval: Any, replaceEval: Any, posEval: Int, lenEval: Int) => {
Overlay.calculate(
inputEval.asInstanceOf[Array[Byte]],
replaceEval.asInstanceOf[Array[Byte]],
posEval, lenEval)
}
}
override def nullSafeEval(inputEval: Any, replaceEval: Any, posEval: Any, lenEval: Any): Any = {
replaceFunc(inputEval, replaceEval, posEval.asInstanceOf[Int], lenEval.asInstanceOf[Int])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (input, replace, pos, len) =>
"org.apache.spark.sql.catalyst.expressions.Overlay" +
s".calculate($input, $replace, $pos, $len);")
}
}
object StringTranslate {
def buildDict(matchingString: UTF8String, replaceString: UTF8String)
: JMap[Character, Character] = {
val matching = matchingString.toString()
val replace = replaceString.toString()
val dict = new HashMap[Character, Character]()
var i = 0
while (i < matching.length()) {
val rep = if (i < replace.length()) replace.charAt(i) else '\\u0000'
if (null == dict.get(matching.charAt(i))) {
dict.put(matching.charAt(i), rep)
}
i += 1
}
dict
}
}
/**
* A function translate any character in the `srcExpr` by a character in `replaceExpr`.
* The characters in `replaceExpr` is corresponding to the characters in `matchingExpr`.
* The translate will happen when any character in the string matching with the character
* in the `matchingExpr`.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(input, from, to) - Translates the `input` string by replacing the characters present in the `from` string with the corresponding characters in the `to` string.",
examples = """
Examples:
> SELECT _FUNC_('AaBbCc', 'abc', '123');
A1B2C3
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class StringTranslate(srcExpr: Expression, matchingExpr: Expression, replaceExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
@transient private var lastMatching: UTF8String = _
@transient private var lastReplace: UTF8String = _
@transient private var dict: JMap[Character, Character] = _
override def nullSafeEval(srcEval: Any, matchingEval: Any, replaceEval: Any): Any = {
if (matchingEval != lastMatching || replaceEval != lastReplace) {
lastMatching = matchingEval.asInstanceOf[UTF8String].clone()
lastReplace = replaceEval.asInstanceOf[UTF8String].clone()
dict = StringTranslate.buildDict(lastMatching, lastReplace)
}
srcEval.asInstanceOf[UTF8String].translate(dict)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val classNameDict = classOf[JMap[Character, Character]].getCanonicalName
val termLastMatching = ctx.addMutableState("UTF8String", "lastMatching")
val termLastReplace = ctx.addMutableState("UTF8String", "lastReplace")
val termDict = ctx.addMutableState(classNameDict, "dict")
nullSafeCodeGen(ctx, ev, (src, matching, replace) => {
val check = if (matchingExpr.foldable && replaceExpr.foldable) {
s"$termDict == null"
} else {
s"!$matching.equals($termLastMatching) || !$replace.equals($termLastReplace)"
}
s"""if ($check) {
// Not all of them is literal or matching or replace value changed
$termLastMatching = $matching.clone();
$termLastReplace = $replace.clone();
$termDict = org.apache.spark.sql.catalyst.expressions.StringTranslate
.buildDict($termLastMatching, $termLastReplace);
}
${ev.value} = $src.translate($termDict);
"""
})
}
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = srcExpr :: matchingExpr :: replaceExpr :: Nil
override def prettyName: String = "translate"
}
/**
* A function that returns the index (1-based) of the given string (left) in the comma-
* delimited list (right). Returns 0, if the string wasn't found or if the given
* string (left) contains a comma.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str, str_array) - Returns the index (1-based) of the given string (`str`) in the comma-delimited list (`str_array`).
Returns 0, if the string was not found or if the given string (`str`) contains a comma.
""",
examples = """
Examples:
> SELECT _FUNC_('ab','abc,b,ab,c,def');
3
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class FindInSet(left: Expression, right: Expression) extends BinaryExpression
with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType)
override protected def nullSafeEval(word: Any, set: Any): Any =
set.asInstanceOf[UTF8String].findInSet(word.asInstanceOf[UTF8String])
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (word, set) =>
s"${ev.value} = $set.findInSet($word);"
)
}
override def dataType: DataType = IntegerType
override def prettyName: String = "find_in_set"
}
trait String2TrimExpression extends Expression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[AbstractDataType] = Seq.fill(children.size)(StringType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
}
object StringTrim {
def apply(str: Expression, trimStr: Expression) : StringTrim = StringTrim(str, Some(trimStr))
def apply(str: Expression) : StringTrim = StringTrim(str, None)
}
/**
* A function that takes a character string, removes the leading and trailing characters matching
* with any character in the trim string, returns the new string.
* If BOTH and trimStr keywords are not specified, it defaults to remove space character from both
* ends. The trim function will have one argument, which contains the source string.
* If BOTH and trimStr keywords are specified, it trims the characters from both ends, and the trim
* function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: A character string to be trimmed from the source string, if it has multiple characters,
* the function searches for each character in the source string, removes the characters from the
* source string until it encounters the first non-match character.
* BOTH: removes any character from both ends of the source string that matches characters in the
* trim string.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the leading and trailing space characters from `str`.
_FUNC_(BOTH FROM str) - Removes the leading and trailing space characters from `str`.
_FUNC_(LEADING FROM str) - Removes the leading space characters from `str`.
_FUNC_(TRAILING FROM str) - Removes the trailing space characters from `str`.
_FUNC_(trimStr FROM str) - Remove the leading and trailing `trimStr` characters from `str`.
_FUNC_(BOTH trimStr FROM str) - Remove the leading and trailing `trimStr` characters from `str`.
_FUNC_(LEADING trimStr FROM str) - Remove the leading `trimStr` characters from `str`.
_FUNC_(TRAILING trimStr FROM str) - Remove the trailing `trimStr` characters from `str`.
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
* BOTH, FROM - these are keywords to specify trimming string characters from both ends of
the string
* LEADING, FROM - these are keywords to specify trimming string characters from the left
end of the string
* TRAILING, FROM - these are keywords to specify trimming string characters from the right
end of the string
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
> SELECT _FUNC_(BOTH FROM ' SparkSQL ');
SparkSQL
> SELECT _FUNC_(LEADING FROM ' SparkSQL ');
SparkSQL
> SELECT _FUNC_(TRAILING FROM ' SparkSQL ');
SparkSQL
> SELECT _FUNC_('SL' FROM 'SSparkSQLS');
parkSQ
> SELECT _FUNC_(BOTH 'SL' FROM 'SSparkSQLS');
parkSQ
> SELECT _FUNC_(LEADING 'SL' FROM 'SSparkSQLS');
parkSQLS
> SELECT _FUNC_(TRAILING 'SL' FROM 'SSparkSQLS');
SSparkSQ
""",
since = "1.5.0")
case class StringTrim(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "trim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trim(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trim()
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trim();
}""")
} else {
val trimString = evals(1)
val getTrimFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trim(${trimString.value});
}"""
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimFunction
}""")
}
}
}
object StringTrimLeft {
def apply(str: Expression, trimStr: Expression): StringTrimLeft =
StringTrimLeft(str, Some(trimStr))
def apply(str: Expression): StringTrimLeft = StringTrimLeft(str, None)
}
/**
* A function that trims the characters from left end for a given string.
* If LEADING and trimStr keywords are not specified, it defaults to remove space character from
* the left end. The ltrim function will have one argument, which contains the source string.
* If LEADING and trimStr keywords are not specified, it trims the characters from left end. The
* ltrim function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: the function removes any character from the left end of the source string which matches
* with the characters from trimStr, it stops at the first non-match character.
* LEADING: removes any character from the left end of the source string that matches characters in
* the trim string.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the leading space characters from `str`.
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
""",
since = "1.5.0")
case class StringTrimLeft(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "ltrim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trimLeft(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trimLeft()
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimLeft();
}""")
} else {
val trimString = evals(1)
val getTrimLeftFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimLeft(${trimString.value});
}"""
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimLeftFunction
}""")
}
}
}
object StringTrimRight {
def apply(str: Expression, trimStr: Expression): StringTrimRight =
StringTrimRight(str, Some(trimStr))
def apply(str: Expression) : StringTrimRight = StringTrimRight(str, None)
}
/**
* A function that trims the characters from right end for a given string.
* If TRAILING and trimStr keywords are not specified, it defaults to remove space character
* from the right end. The rtrim function will have one argument, which contains the source string.
* If TRAILING and trimStr keywords are specified, it trims the characters from right end. The
* rtrim function will have two arguments, the first argument contains trimStr, the second argument
* contains the source string.
* trimStr: the function removes any character from the right end of source string which matches
* with the characters from trimStr, it stops at the first non-match character.
* TRAILING: removes any character from the right end of the source string that matches characters
* in the trim string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str) - Removes the trailing space characters from `str`.
""",
arguments = """
Arguments:
* str - a string expression
* trimStr - the trim string characters to trim, the default value is a single space
""",
examples = """
Examples:
> SELECT _FUNC_(' SparkSQL ');
SparkSQL
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class StringTrimRight(
srcStr: Expression,
trimStr: Option[Expression] = None)
extends String2TrimExpression {
def this(trimStr: Expression, srcStr: Expression) = this(srcStr, Option(trimStr))
def this(srcStr: Expression) = this(srcStr, None)
override def prettyName: String = "rtrim"
override def children: Seq[Expression] = if (trimStr.isDefined) {
srcStr :: trimStr.get :: Nil
} else {
srcStr :: Nil
}
override def eval(input: InternalRow): Any = {
val srcString = srcStr.eval(input).asInstanceOf[UTF8String]
if (srcString == null) {
null
} else {
if (trimStr.isDefined) {
srcString.trimRight(trimStr.get.eval(input).asInstanceOf[UTF8String])
} else {
srcString.trimRight()
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val srcString = evals(0)
if (evals.length == 1) {
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimRight();
}""")
} else {
val trimString = evals(1)
val getTrimRightFunction =
s"""
if (${trimString.isNull}) {
${ev.isNull} = true;
} else {
${ev.value} = ${srcString.value}.trimRight(${trimString.value});
}"""
ev.copy(evals.map(_.code) :+ code"""
boolean ${ev.isNull} = false;
UTF8String ${ev.value} = null;
if (${srcString.isNull}) {
${ev.isNull} = true;
} else {
$getTrimRightFunction
}""")
}
}
}
/**
* A function that returns the position of the first occurrence of substr in the given string.
* Returns null if either of the arguments are null and
* returns 0 if substr could not be found in str.
*
* NOTE: that this is not zero based, but 1-based index. The first character in str has index 1.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, substr) - Returns the (1-based) index of the first occurrence of `substr` in `str`.",
examples = """
Examples:
> SELECT _FUNC_('SparkSQL', 'SQL');
6
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class StringInstr(str: Expression, substr: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = str
override def right: Expression = substr
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def nullSafeEval(string: Any, sub: Any): Any = {
string.asInstanceOf[UTF8String].indexOf(sub.asInstanceOf[UTF8String], 0) + 1
}
override def prettyName: String = "instr"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (l, r) =>
s"($l).indexOf($r, 0) + 1")
}
}
/**
* Returns the substring from string str before count occurrences of the delimiter delim.
* If count is positive, everything the left of the final delimiter (counting from left) is
* returned. If count is negative, every to the right of the final delimiter (counting from the
* right) is returned. substring_index performs a case-sensitive match when searching for delim.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(str, delim, count) - Returns the substring from `str` before `count` occurrences of the delimiter `delim`.
If `count` is positive, everything to the left of the final delimiter (counting from the
left) is returned. If `count` is negative, everything to the right of the final delimiter
(counting from the right) is returned. The function substring_index performs a case-sensitive match
when searching for `delim`.
""",
examples = """
Examples:
> SELECT _FUNC_('www.apache.org', '.', 2);
www.apache
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class SubstringIndex(strExpr: Expression, delimExpr: Expression, countExpr: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, IntegerType)
override def children: Seq[Expression] = Seq(strExpr, delimExpr, countExpr)
override def prettyName: String = "substring_index"
override def nullSafeEval(str: Any, delim: Any, count: Any): Any = {
str.asInstanceOf[UTF8String].subStringIndex(
delim.asInstanceOf[UTF8String],
count.asInstanceOf[Int])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, delim, count) => s"$str.subStringIndex($delim, $count)")
}
}
/**
* A function that returns the position of the first occurrence of substr
* in given string after position pos.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(substr, str[, pos]) - Returns the position of the first occurrence of `substr` in `str` after position `pos`.
The given `pos` and return value are 1-based.
""",
examples = """
Examples:
> SELECT _FUNC_('bar', 'foobarbar');
4
> SELECT _FUNC_('bar', 'foobarbar', 5);
7
> SELECT POSITION('bar' IN 'foobarbar');
4
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class StringLocate(substr: Expression, str: Expression, start: Expression)
extends TernaryExpression with ImplicitCastInputTypes {
def this(substr: Expression, str: Expression) = {
this(substr, str, Literal(1))
}
override def children: Seq[Expression] = substr :: str :: start :: Nil
override def nullable: Boolean = substr.nullable || str.nullable
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType, IntegerType)
override def eval(input: InternalRow): Any = {
val s = start.eval(input)
if (s == null) {
// if the start position is null, we need to return 0, (conform to Hive)
0
} else {
val r = substr.eval(input)
if (r == null) {
null
} else {
val l = str.eval(input)
if (l == null) {
null
} else {
val sVal = s.asInstanceOf[Int]
if (sVal < 1) {
0
} else {
l.asInstanceOf[UTF8String].indexOf(
r.asInstanceOf[UTF8String],
s.asInstanceOf[Int] - 1) + 1
}
}
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val substrGen = substr.genCode(ctx)
val strGen = str.genCode(ctx)
val startGen = start.genCode(ctx)
ev.copy(code = code"""
int ${ev.value} = 0;
boolean ${ev.isNull} = false;
${startGen.code}
if (!${startGen.isNull}) {
${substrGen.code}
if (!${substrGen.isNull}) {
${strGen.code}
if (!${strGen.isNull}) {
if (${startGen.value} > 0) {
${ev.value} = ${strGen.value}.indexOf(${substrGen.value},
${startGen.value} - 1) + 1;
}
} else {
${ev.isNull} = true;
}
} else {
${ev.isNull} = true;
}
}
""")
}
override def prettyName: String = "locate"
}
/**
* Returns str, left-padded with pad to a length of len.
*/
@ExpressionDescription(
usage = """
_FUNC_(str, len[, pad]) - Returns `str`, left-padded with `pad` to a length of `len`.
If `str` is longer than `len`, the return value is shortened to `len` characters.
If `pad` is not specified, `str` will be padded to the left with space characters.
""",
examples = """
Examples:
> SELECT _FUNC_('hi', 5, '??');
???hi
> SELECT _FUNC_('hi', 1, '??');
h
> SELECT _FUNC_('hi', 5);
hi
""",
since = "1.5.0")
case class StringLPad(str: Expression, len: Expression, pad: Expression = Literal(" "))
extends TernaryExpression with ImplicitCastInputTypes {
def this(str: Expression, len: Expression) = {
this(str, len, Literal(" "))
}
override def children: Seq[Expression] = str :: len :: pad :: Nil
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType, StringType)
override def nullSafeEval(str: Any, len: Any, pad: Any): Any = {
str.asInstanceOf[UTF8String].lpad(len.asInstanceOf[Int], pad.asInstanceOf[UTF8String])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, len, pad) => s"$str.lpad($len, $pad)")
}
override def prettyName: String = "lpad"
}
/**
* Returns str, right-padded with pad to a length of len.
*/
@ExpressionDescription(
usage = """
_FUNC_(str, len[, pad]) - Returns `str`, right-padded with `pad` to a length of `len`.
If `str` is longer than `len`, the return value is shortened to `len` characters.
If `pad` is not specified, `str` will be padded to the right with space characters.
""",
examples = """
Examples:
> SELECT _FUNC_('hi', 5, '??');
hi???
> SELECT _FUNC_('hi', 1, '??');
h
> SELECT _FUNC_('hi', 5);
hi
""",
since = "1.5.0")
case class StringRPad(str: Expression, len: Expression, pad: Expression = Literal(" "))
extends TernaryExpression with ImplicitCastInputTypes {
def this(str: Expression, len: Expression) = {
this(str, len, Literal(" "))
}
override def children: Seq[Expression] = str :: len :: pad :: Nil
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType, StringType)
override def nullSafeEval(str: Any, len: Any, pad: Any): Any = {
str.asInstanceOf[UTF8String].rpad(len.asInstanceOf[Int], pad.asInstanceOf[UTF8String])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (str, len, pad) => s"$str.rpad($len, $pad)")
}
override def prettyName: String = "rpad"
}
object ParseUrl {
private val HOST = UTF8String.fromString("HOST")
private val PATH = UTF8String.fromString("PATH")
private val QUERY = UTF8String.fromString("QUERY")
private val REF = UTF8String.fromString("REF")
private val PROTOCOL = UTF8String.fromString("PROTOCOL")
private val FILE = UTF8String.fromString("FILE")
private val AUTHORITY = UTF8String.fromString("AUTHORITY")
private val USERINFO = UTF8String.fromString("USERINFO")
private val REGEXPREFIX = "(&|^)"
private val REGEXSUBFIX = "=([^&]*)"
}
/**
* Extracts a part from a URL
*/
@ExpressionDescription(
usage = "_FUNC_(url, partToExtract[, key]) - Extracts a part from a URL.",
examples = """
Examples:
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'HOST');
spark.apache.org
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'QUERY');
query=1
> SELECT _FUNC_('http://spark.apache.org/path?query=1', 'QUERY', 'query');
1
""",
since = "2.0.0")
case class ParseUrl(children: Seq[Expression])
extends Expression with ExpectsInputTypes with CodegenFallback {
override def nullable: Boolean = true
override def inputTypes: Seq[DataType] = Seq.fill(children.size)(StringType)
override def dataType: DataType = StringType
override def prettyName: String = "parse_url"
// If the url is a constant, cache the URL object so that we don't need to convert url
// from UTF8String to String to URL for every row.
@transient private lazy val cachedUrl = children(0) match {
case Literal(url: UTF8String, _) if url ne null => getUrl(url)
case _ => null
}
// If the key is a constant, cache the Pattern object so that we don't need to convert key
// from UTF8String to String to StringBuilder to String to Pattern for every row.
@transient private lazy val cachedPattern = children(2) match {
case Literal(key: UTF8String, _) if key ne null => getPattern(key)
case _ => null
}
// If the partToExtract is a constant, cache the Extract part function so that we don't need
// to check the partToExtract for every row.
@transient private lazy val cachedExtractPartFunc = children(1) match {
case Literal(part: UTF8String, _) => getExtractPartFunc(part)
case _ => null
}
import ParseUrl._
override def checkInputDataTypes(): TypeCheckResult = {
if (children.size > 3 || children.size < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName function requires two or three arguments")
} else {
super[ExpectsInputTypes].checkInputDataTypes()
}
}
private def getPattern(key: UTF8String): Pattern = {
Pattern.compile(REGEXPREFIX + key.toString + REGEXSUBFIX)
}
private def getUrl(url: UTF8String): URI = {
try {
new URI(url.toString)
} catch {
case e: URISyntaxException => null
}
}
private def getExtractPartFunc(partToExtract: UTF8String): URI => String = {
// partToExtract match {
// case HOST => _.toURL().getHost
// case PATH => _.toURL().getPath
// case QUERY => _.toURL().getQuery
// case REF => _.toURL().getRef
// case PROTOCOL => _.toURL().getProtocol
// case FILE => _.toURL().getFile
// case AUTHORITY => _.toURL().getAuthority
// case USERINFO => _.toURL().getUserInfo
// case _ => (url: URI) => null
// }
partToExtract match {
case HOST => _.getHost
case PATH => _.getRawPath
case QUERY => _.getRawQuery
case REF => _.getRawFragment
case PROTOCOL => _.getScheme
case FILE =>
(url: URI) =>
if (url.getRawQuery ne null) {
url.getRawPath + "?" + url.getRawQuery
} else {
url.getRawPath
}
case AUTHORITY => _.getRawAuthority
case USERINFO => _.getRawUserInfo
case _ => (url: URI) => null
}
}
private def extractValueFromQuery(query: UTF8String, pattern: Pattern): UTF8String = {
val m = pattern.matcher(query.toString)
if (m.find()) {
UTF8String.fromString(m.group(2))
} else {
null
}
}
private def extractFromUrl(url: URI, partToExtract: UTF8String): UTF8String = {
if (cachedExtractPartFunc ne null) {
UTF8String.fromString(cachedExtractPartFunc.apply(url))
} else {
UTF8String.fromString(getExtractPartFunc(partToExtract).apply(url))
}
}
private def parseUrlWithoutKey(url: UTF8String, partToExtract: UTF8String): UTF8String = {
if (cachedUrl ne null) {
extractFromUrl(cachedUrl, partToExtract)
} else {
val currentUrl = getUrl(url)
if (currentUrl ne null) {
extractFromUrl(currentUrl, partToExtract)
} else {
null
}
}
}
override def eval(input: InternalRow): Any = {
val evaluated = children.map{e => e.eval(input).asInstanceOf[UTF8String]}
if (evaluated.contains(null)) return null
if (evaluated.size == 2) {
parseUrlWithoutKey(evaluated(0), evaluated(1))
} else {
// 3-arg, i.e. QUERY with key
assert(evaluated.size == 3)
if (evaluated(1) != QUERY) {
return null
}
val query = parseUrlWithoutKey(evaluated(0), evaluated(1))
if (query eq null) {
return null
}
if (cachedPattern ne null) {
extractValueFromQuery(query, cachedPattern)
} else {
extractValueFromQuery(query, getPattern(evaluated(2)))
}
}
}
}
/**
* Returns the input formatted according do printf-style format strings
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(strfmt, obj, ...) - Returns a formatted string from printf-style format strings.",
examples = """
Examples:
> SELECT _FUNC_("Hello World %d %s", 100, "days");
Hello World 100 days
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class FormatString(children: Expression*) extends Expression with ImplicitCastInputTypes {
require(children.nonEmpty, s"$prettyName() should take at least 1 argument")
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children(0).nullable
override def dataType: DataType = StringType
override def inputTypes: Seq[AbstractDataType] =
StringType :: List.fill(children.size - 1)(AnyDataType)
override def eval(input: InternalRow): Any = {
val pattern = children(0).eval(input)
if (pattern == null) {
null
} else {
val sb = new StringBuffer()
val formatter = new java.util.Formatter(sb, Locale.US)
val arglist = children.tail.map(_.eval(input).asInstanceOf[AnyRef])
formatter.format(pattern.asInstanceOf[UTF8String].toString, arglist: _*)
UTF8String.fromString(sb.toString)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val pattern = children.head.genCode(ctx)
val argListGen = children.tail.map(x => (x.dataType, x.genCode(ctx)))
val argList = ctx.freshName("argLists")
val numArgLists = argListGen.length
val argListCode = argListGen.zipWithIndex.map { case(v, index) =>
val value =
if (CodeGenerator.boxedType(v._1) != CodeGenerator.javaType(v._1)) {
// Java primitives get boxed in order to allow null values.
s"(${v._2.isNull}) ? (${CodeGenerator.boxedType(v._1)}) null : " +
s"new ${CodeGenerator.boxedType(v._1)}(${v._2.value})"
} else {
s"(${v._2.isNull}) ? null : ${v._2.value}"
}
s"""
${v._2.code}
$argList[$index] = $value;
"""
}
val argListCodes = ctx.splitExpressionsWithCurrentInputs(
expressions = argListCode,
funcName = "valueFormatString",
extraArguments = ("Object[]", argList) :: Nil)
val form = ctx.freshName("formatter")
val formatter = classOf[java.util.Formatter].getName
val sb = ctx.freshName("sb")
val stringBuffer = classOf[StringBuffer].getName
ev.copy(code = code"""
${pattern.code}
boolean ${ev.isNull} = ${pattern.isNull};
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${ev.isNull}) {
$stringBuffer $sb = new $stringBuffer();
$formatter $form = new $formatter($sb, ${classOf[Locale].getName}.US);
Object[] $argList = new Object[$numArgLists];
$argListCodes
$form.format(${pattern.value}.toString(), $argList);
${ev.value} = UTF8String.fromString($sb.toString());
}""")
}
override def prettyName: String = getTagValue(
FunctionRegistry.FUNC_ALIAS).getOrElse("format_string")
}
/**
* Returns string, with the first letter of each word in uppercase, all other letters in lowercase.
* Words are delimited by whitespace.
*/
@ExpressionDescription(
usage = """
_FUNC_(str) - Returns `str` with the first letter of each word in uppercase.
All other letters are in lowercase. Words are delimited by white space.
""",
examples = """
Examples:
> SELECT _FUNC_('sPark sql');
Spark Sql
""",
since = "1.5.0")
case class InitCap(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[DataType] = Seq(StringType)
override def dataType: DataType = StringType
override def nullSafeEval(string: Any): Any = {
// scalastyle:off caselocale
string.asInstanceOf[UTF8String].toLowerCase.toTitleCase
// scalastyle:on caselocale
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, str => s"$str.toLowerCase().toTitleCase()")
}
}
/**
* Returns the string which repeat the given string value n times.
*/
@ExpressionDescription(
usage = "_FUNC_(str, n) - Returns the string which repeats the given string value n times.",
examples = """
Examples:
> SELECT _FUNC_('123', 2);
123123
""",
since = "1.5.0")
case class StringRepeat(str: Expression, times: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = str
override def right: Expression = times
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType, IntegerType)
override def nullSafeEval(string: Any, n: Any): Any = {
string.asInstanceOf[UTF8String].repeat(n.asInstanceOf[Integer])
}
override def prettyName: String = "repeat"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (l, r) => s"($l).repeat($r)")
}
}
/**
* Returns a string consisting of n spaces.
*/
@ExpressionDescription(
usage = "_FUNC_(n) - Returns a string consisting of `n` spaces.",
examples = """
Examples:
> SELECT concat(_FUNC_(2), '1');
1
""",
since = "1.5.0")
case class StringSpace(child: Expression)
extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(IntegerType)
override def nullSafeEval(s: Any): Any = {
val length = s.asInstanceOf[Int]
UTF8String.blankString(if (length < 0) 0 else length)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (length) =>
s"""${ev.value} = UTF8String.blankString(($length < 0) ? 0 : $length);""")
}
override def prettyName: String = "space"
}
/**
* A function that takes a substring of its first argument starting at a given position.
* Defined for String and Binary types.
*
* NOTE: that this is not zero based, but 1-based index. The first character in str has index 1.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, pos[, len]) - Returns the substring of `str` that starts at `pos` and is of length `len`, or the slice of byte array that starts at `pos` and is of length `len`.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 5);
k SQL
> SELECT _FUNC_('Spark SQL', -3);
SQL
> SELECT _FUNC_('Spark SQL', 5, 1);
k
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class Substring(str: Expression, pos: Expression, len: Expression)
extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant {
def this(str: Expression, pos: Expression) = {
this(str, pos, Literal(Integer.MAX_VALUE))
}
override def dataType: DataType = str.dataType
override def inputTypes: Seq[AbstractDataType] =
Seq(TypeCollection(StringType, BinaryType), IntegerType, IntegerType)
override def children: Seq[Expression] = str :: pos :: len :: Nil
override def nullSafeEval(string: Any, pos: Any, len: Any): Any = {
str.dataType match {
case StringType => string.asInstanceOf[UTF8String]
.substringSQL(pos.asInstanceOf[Int], len.asInstanceOf[Int])
case BinaryType => ByteArray.subStringSQL(string.asInstanceOf[Array[Byte]],
pos.asInstanceOf[Int], len.asInstanceOf[Int])
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (string, pos, len) => {
str.dataType match {
case StringType => s"$string.substringSQL($pos, $len)"
case BinaryType => s"${classOf[ByteArray].getName}.subStringSQL($string, $pos, $len)"
}
})
}
}
/**
* Returns the rightmost n characters from the string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, len) - Returns the rightmost `len`(`len` can be string type) characters from the string `str`,if `len` is less or equal than 0 the result is an empty string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 3);
SQL
""",
since = "2.3.0")
// scalastyle:on line.size.limit
case class Right(str: Expression, len: Expression, child: Expression) extends RuntimeReplaceable {
def this(str: Expression, len: Expression) = {
this(str, len, If(IsNull(str), Literal(null, StringType), If(LessThanOrEqual(len, Literal(0)),
Literal(UTF8String.EMPTY_UTF8, StringType), new Substring(str, UnaryMinus(len)))))
}
override def flatArguments: Iterator[Any] = Iterator(str, len)
override def sql: String = s"$prettyName(${str.sql}, ${len.sql})"
}
/**
* Returns the leftmost n characters from the string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, len) - Returns the leftmost `len`(`len` can be string type) characters from the string `str`,if `len` is less or equal than 0 the result is an empty string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL', 3);
Spa
""",
since = "2.3.0")
// scalastyle:on line.size.limit
case class Left(str: Expression, len: Expression, child: Expression) extends RuntimeReplaceable {
def this(str: Expression, len: Expression) = {
this(str, len, Substring(str, Literal(1), len))
}
override def flatArguments: Iterator[Any] = Iterator(str, len)
override def sql: String = s"$prettyName(${str.sql}, ${len.sql})"
}
/**
* A function that returns the char length of the given string expression or
* number of bytes of the given binary expression.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the character length of string data or number of bytes of binary data. The length of string data includes the trailing spaces. The length of binary data includes binary zeros.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL ');
10
> SELECT CHAR_LENGTH('Spark SQL ');
10
> SELECT CHARACTER_LENGTH('Spark SQL ');
10
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class Length(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numChars
case BinaryType => value.asInstanceOf[Array[Byte]].length
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numChars()")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length")
}
}
}
/**
* A function that returns the bit length of the given string or binary expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the bit length of string data or number of bits of binary data.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
72
""",
since = "2.3.0")
case class BitLength(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numBytes * 8
case BinaryType => value.asInstanceOf[Array[Byte]].length * 8
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numBytes() * 8")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length * 8")
}
}
override def prettyName: String = "bit_length"
}
/**
* A function that returns the byte length of the given string or binary expression.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the byte length of string data or number of bytes of binary " +
"data.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
9
""",
since = "2.3.0")
case class OctetLength(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, BinaryType))
protected override def nullSafeEval(value: Any): Any = child.dataType match {
case StringType => value.asInstanceOf[UTF8String].numBytes
case BinaryType => value.asInstanceOf[Array[Byte]].length
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
child.dataType match {
case StringType => defineCodeGen(ctx, ev, c => s"($c).numBytes()")
case BinaryType => defineCodeGen(ctx, ev, c => s"($c).length")
}
}
override def prettyName: String = "octet_length"
}
/**
* A function that return the Levenshtein distance between the two given strings.
*/
@ExpressionDescription(
usage = "_FUNC_(str1, str2) - Returns the Levenshtein distance between the two given strings.",
examples = """
Examples:
> SELECT _FUNC_('kitten', 'sitting');
3
""",
since = "1.5.0")
case class Levenshtein(left: Expression, right: Expression) extends BinaryExpression
with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType)
override def dataType: DataType = IntegerType
protected override def nullSafeEval(leftValue: Any, rightValue: Any): Any =
leftValue.asInstanceOf[UTF8String].levenshteinDistance(rightValue.asInstanceOf[UTF8String])
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (left, right) =>
s"${ev.value} = $left.levenshteinDistance($right);")
}
}
/**
* A function that return Soundex code of the given string expression.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns Soundex code of the string.",
examples = """
Examples:
> SELECT _FUNC_('Miller');
M460
""",
since = "1.5.0")
case class SoundEx(child: Expression) extends UnaryExpression with ExpectsInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(StringType)
override def nullSafeEval(input: Any): Any = input.asInstanceOf[UTF8String].soundex()
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"$c.soundex()")
}
}
/**
* Returns the numeric value of the first character of str.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Returns the numeric value of the first character of `str`.",
examples = """
Examples:
> SELECT _FUNC_('222');
50
> SELECT _FUNC_(2);
50
""",
since = "1.5.0")
case class Ascii(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = IntegerType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(string: Any): Any = {
val bytes = string.asInstanceOf[UTF8String].getBytes
if (bytes.length > 0) {
bytes(0).asInstanceOf[Int]
} else {
0
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
val bytes = ctx.freshName("bytes")
s"""
byte[] $bytes = $child.getBytes();
if ($bytes.length > 0) {
${ev.value} = (int) $bytes[0];
} else {
${ev.value} = 0;
}
"""})
}
}
/**
* Returns the ASCII character having the binary equivalent to n.
* If n is larger than 256 the result is equivalent to chr(n % 256)
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the ASCII character having the binary equivalent to `expr`. If n is larger than 256 the result is equivalent to chr(n % 256)",
examples = """
Examples:
> SELECT _FUNC_(65);
A
""",
since = "2.3.0")
// scalastyle:on line.size.limit
case class Chr(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(LongType)
protected override def nullSafeEval(lon: Any): Any = {
val longVal = lon.asInstanceOf[Long]
if (longVal < 0) {
UTF8String.EMPTY_UTF8
} else if ((longVal & 0xFF) == 0) {
UTF8String.fromString(Character.MIN_VALUE.toString)
} else {
UTF8String.fromString((longVal & 0xFF).toChar.toString)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, lon => {
s"""
if ($lon < 0) {
${ev.value} = UTF8String.EMPTY_UTF8;
} else if (($lon & 0xFF) == 0) {
${ev.value} = UTF8String.fromString(String.valueOf(Character.MIN_VALUE));
} else {
char c = (char)($lon & 0xFF);
${ev.value} = UTF8String.fromString(String.valueOf(c));
}
"""
})
}
}
/**
* Converts the argument from binary to a base 64 string.
*/
@ExpressionDescription(
usage = "_FUNC_(bin) - Converts the argument from a binary `bin` to a base 64 string.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
U3BhcmsgU1FM
""",
since = "1.5.0")
case class Base64(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType)
protected override def nullSafeEval(bytes: Any): Any = {
UTF8String.fromBytes(CommonsBase64.encodeBase64(bytes.asInstanceOf[Array[Byte]]))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
s"""${ev.value} = UTF8String.fromBytes(
${classOf[CommonsBase64].getName}.encodeBase64($child));
"""})
}
}
/**
* Converts the argument from a base 64 string to BINARY.
*/
@ExpressionDescription(
usage = "_FUNC_(str) - Converts the argument from a base 64 string `str` to a binary.",
examples = """
Examples:
> SELECT _FUNC_('U3BhcmsgU1FM');
Spark SQL
""",
since = "1.5.0")
case class UnBase64(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def dataType: DataType = BinaryType
override def inputTypes: Seq[DataType] = Seq(StringType)
protected override def nullSafeEval(string: Any): Any =
CommonsBase64.decodeBase64(string.asInstanceOf[UTF8String].toString)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (child) => {
s"""
${ev.value} = ${classOf[CommonsBase64].getName}.decodeBase64($child.toString());
"""})
}
}
/**
* Decodes the first argument into a String using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(bin, charset) - Decodes the first argument using the second argument character set.",
examples = """
Examples:
> SELECT _FUNC_(encode('abc', 'utf-8'), 'utf-8');
abc
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class Decode(bin: Expression, charset: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = bin
override def right: Expression = charset
override def dataType: DataType = StringType
override def inputTypes: Seq[DataType] = Seq(BinaryType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any = {
val fromCharset = input2.asInstanceOf[UTF8String].toString
UTF8String.fromString(new String(input1.asInstanceOf[Array[Byte]], fromCharset))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (bytes, charset) =>
s"""
try {
${ev.value} = UTF8String.fromString(new String($bytes, $charset.toString()));
} catch (java.io.UnsupportedEncodingException e) {
org.apache.spark.unsafe.Platform.throwException(e);
}
""")
}
}
/**
* Encodes the first argument into a BINARY using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(str, charset) - Encodes the first argument using the second argument character set.",
examples = """
Examples:
> SELECT _FUNC_('abc', 'utf-8');
abc
""",
since = "1.5.0")
// scalastyle:on line.size.limit
case class Encode(value: Expression, charset: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = value
override def right: Expression = charset
override def dataType: DataType = BinaryType
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
protected override def nullSafeEval(input1: Any, input2: Any): Any = {
val toCharset = input2.asInstanceOf[UTF8String].toString
input1.asInstanceOf[UTF8String].toString.getBytes(toCharset)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (string, charset) =>
s"""
try {
${ev.value} = $string.toString().getBytes($charset.toString());
} catch (java.io.UnsupportedEncodingException e) {
org.apache.spark.unsafe.Platform.throwException(e);
}""")
}
}
/**
* Formats the number X to a format like '#,###,###.##', rounded to D decimal places,
* and returns the result as a string. If D is 0, the result has no decimal point or
* fractional part.
*/
@ExpressionDescription(
usage = """
_FUNC_(expr1, expr2) - Formats the number `expr1` like '#,###,###.##', rounded to `expr2`
decimal places. If `expr2` is 0, the result has no decimal point or fractional part.
`expr2` also accept a user specified format.
This is supposed to function like MySQL's FORMAT.
""",
examples = """
Examples:
> SELECT _FUNC_(12332.123456, 4);
12,332.1235
> SELECT _FUNC_(12332.123456, '##################.###');
12332.123
""",
since = "1.5.0")
case class FormatNumber(x: Expression, d: Expression)
extends BinaryExpression with ExpectsInputTypes {
override def left: Expression = x
override def right: Expression = d
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] =
Seq(NumericType, TypeCollection(IntegerType, StringType))
private val defaultFormat = "#,###,###,###,###,###,##0"
// Associated with the pattern, for the last d value, and we will update the
// pattern (DecimalFormat) once the new coming d value differ with the last one.
// This is an Option to distinguish between 0 (numberFormat is valid) and uninitialized after
// serialization (numberFormat has not been updated for dValue = 0).
@transient
private var lastDIntValue: Option[Int] = None
@transient
private var lastDStringValue: Option[String] = None
// A cached DecimalFormat, for performance concern, we will change it
// only if the d value changed.
@transient
private lazy val pattern: StringBuffer = new StringBuffer()
// SPARK-13515: US Locale configures the DecimalFormat object to use a dot ('.')
// as a decimal separator.
@transient
private lazy val numberFormat = new DecimalFormat("", new DecimalFormatSymbols(Locale.US))
override protected def nullSafeEval(xObject: Any, dObject: Any): Any = {
right.dataType match {
case IntegerType =>
val dValue = dObject.asInstanceOf[Int]
if (dValue < 0) {
return null
}
lastDIntValue match {
case Some(last) if last == dValue =>
// use the current pattern
case _ =>
// construct a new DecimalFormat only if a new dValue
pattern.delete(0, pattern.length)
pattern.append(defaultFormat)
// decimal place
if (dValue > 0) {
pattern.append(".")
var i = 0
while (i < dValue) {
i += 1
pattern.append("0")
}
}
lastDIntValue = Some(dValue)
numberFormat.applyLocalizedPattern(pattern.toString)
}
case StringType =>
val dValue = dObject.asInstanceOf[UTF8String].toString
lastDStringValue match {
case Some(last) if last == dValue =>
case _ =>
pattern.delete(0, pattern.length)
lastDStringValue = Some(dValue)
if (dValue.isEmpty) {
numberFormat.applyLocalizedPattern(defaultFormat)
} else {
numberFormat.applyLocalizedPattern(dValue)
}
}
}
x.dataType match {
case ByteType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Byte]))
case ShortType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Short]))
case FloatType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Float]))
case IntegerType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Int]))
case LongType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Long]))
case DoubleType => UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Double]))
case _: DecimalType =>
UTF8String.fromString(numberFormat.format(xObject.asInstanceOf[Decimal].toJavaBigDecimal))
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (num, d) => {
def typeHelper(p: String): String = {
x.dataType match {
case _ : DecimalType => s"""$p.toJavaBigDecimal()"""
case _ => s"$p"
}
}
val sb = classOf[StringBuffer].getName
val df = classOf[DecimalFormat].getName
val dfs = classOf[DecimalFormatSymbols].getName
val l = classOf[Locale].getName
// SPARK-13515: US Locale configures the DecimalFormat object to use a dot ('.')
// as a decimal separator.
val usLocale = "US"
val numberFormat = ctx.addMutableState(df, "numberFormat",
v => s"""$v = new $df("", new $dfs($l.$usLocale));""")
right.dataType match {
case IntegerType =>
val pattern = ctx.addMutableState(sb, "pattern", v => s"$v = new $sb();")
val i = ctx.freshName("i")
val lastDValue =
ctx.addMutableState(CodeGenerator.JAVA_INT, "lastDValue", v => s"$v = -100;")
s"""
if ($d >= 0) {
$pattern.delete(0, $pattern.length());
if ($d != $lastDValue) {
$pattern.append("$defaultFormat");
if ($d > 0) {
$pattern.append(".");
for (int $i = 0; $i < $d; $i++) {
$pattern.append("0");
}
}
$lastDValue = $d;
$numberFormat.applyLocalizedPattern($pattern.toString());
}
${ev.value} = UTF8String.fromString($numberFormat.format(${typeHelper(num)}));
} else {
${ev.value} = null;
${ev.isNull} = true;
}
"""
case StringType =>
val lastDValue = ctx.addMutableState("String", "lastDValue", v => s"""$v = null;""")
val dValue = ctx.freshName("dValue")
s"""
String $dValue = $d.toString();
if (!$dValue.equals($lastDValue)) {
$lastDValue = $dValue;
if ($dValue.isEmpty()) {
$numberFormat.applyLocalizedPattern("$defaultFormat");
} else {
$numberFormat.applyLocalizedPattern($dValue);
}
}
${ev.value} = UTF8String.fromString($numberFormat.format(${typeHelper(num)}));
"""
}
})
}
override def prettyName: String = "format_number"
}
/**
* Splits a string into arrays of sentences, where each sentence is an array of words.
* The 'lang' and 'country' arguments are optional, and if omitted, the default locale is used.
*/
@ExpressionDescription(
usage = "_FUNC_(str[, lang, country]) - Splits `str` into an array of array of words.",
examples = """
Examples:
> SELECT _FUNC_('Hi there! Good morning.');
[["Hi","there"],["Good","morning"]]
""",
since = "2.0.0")
case class Sentences(
str: Expression,
language: Expression = Literal(""),
country: Expression = Literal(""))
extends Expression with ImplicitCastInputTypes with CodegenFallback {
def this(str: Expression) = this(str, Literal(""), Literal(""))
def this(str: Expression, language: Expression) = this(str, language, Literal(""))
override def nullable: Boolean = true
override def dataType: DataType =
ArrayType(ArrayType(StringType, containsNull = false), containsNull = false)
override def inputTypes: Seq[AbstractDataType] = Seq(StringType, StringType, StringType)
override def children: Seq[Expression] = str :: language :: country :: Nil
override def eval(input: InternalRow): Any = {
val string = str.eval(input)
if (string == null) {
null
} else {
val languageStr = language.eval(input).asInstanceOf[UTF8String]
val countryStr = country.eval(input).asInstanceOf[UTF8String]
val locale = if (languageStr != null && countryStr != null) {
new Locale(languageStr.toString, countryStr.toString)
} else {
Locale.US
}
getSentences(string.asInstanceOf[UTF8String].toString, locale)
}
}
private def getSentences(sentences: String, locale: Locale) = {
val bi = BreakIterator.getSentenceInstance(locale)
bi.setText(sentences)
var idx = 0
val result = new ArrayBuffer[GenericArrayData]
while (bi.next != BreakIterator.DONE) {
val sentence = sentences.substring(idx, bi.current)
idx = bi.current
val wi = BreakIterator.getWordInstance(locale)
var widx = 0
wi.setText(sentence)
val words = new ArrayBuffer[UTF8String]
while (wi.next != BreakIterator.DONE) {
val word = sentence.substring(widx, wi.current)
widx = wi.current
if (Character.isLetterOrDigit(word.charAt(0))) words += UTF8String.fromString(word)
}
result += new GenericArrayData(words)
}
new GenericArrayData(result)
}
}
| matthewfranglen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/stringExpressions.scala | Scala | mit | 81,426 |
/*
* Multithreaded relay in Scala
*
* @author Giovanni Ruggiero
* @email giovanni.ruggiero@gmail.com
*/
import org.zeromq.ZMQ
object mtRelay {
def main(args : Array[String]) {
val context = ZMQ.context(1)
// Bind to inproc: endpoint, then start upstream thread
val receiver = context.socket(ZMQ.PAIR)
receiver.bind("inproc://step3")
// Step 2 relays the signal to step 3
new Thread {
// val context1 = ZMQ.context(1)
try {
// Bind to inproc: endpoint, then start upstream thread
// println(context1)
} catch {
case e => e.printStackTrace()
}
// val receiver = context.socket(ZMQ.PAIR)
receiver.bind("inproc://step2")
new Thread{
// Signal downstream to step 2
val sender = context.socket(ZMQ.PAIR)
sender.connect("inproc://step2")
sender.send("".getBytes(),0)
}
// Wait for signal
val message=receiver.recv(0)
Thread.sleep (1000)
// Signal downstream to step 3
val sender = context.socket(ZMQ.PAIR)
sender.connect("inproc://step3")
sender.send(message,0)
}
// Wait for signal
val message = receiver.recv(0)
println ("Test successful!")
}
}
| soscpd/bee | root/tests/zguide/examples/Scala/MultiThreadedRelay.scala | Scala | mit | 1,155 |
package io.sqooba.oss.timeseries.immutable
import io.sqooba.oss.timeseries.TimeSeries
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
class LooseDomainSpec extends AnyFlatSpec with Matchers with BeforeAndAfterEach with BeforeAndAfterAll {
private def assertDomain(looseDomain: TimeDomain, start: Long, until: Long) = {
// Check internal bounds
assert(looseDomain contains start)
assert(looseDomain.contains(until - 1))
// Check external bounds
assert(!looseDomain.contains(start - 1))
assert(!looseDomain.contains(until))
}
private val (start, end, validity) = (0, 10, 3)
private val niceAndLongTimeSeries: TimeSeries[None.type] = {
val smallestEntry = TSEntry(start, None, validity)
val biggestEntry = TSEntry(end, None, validity)
val builder = TimeSeries.newBuilder[None.type]()
builder += smallestEntry
builder += TSEntry(2, None, 1)
builder += TSEntry(9, None, 2)
builder += biggestEntry
builder.result()
}
private val timeseriesSeq: Seq[TimeSeries[None.type]] = List[TimeSeries[None.type]](
niceAndLongTimeSeries,
EmptyTimeSeries,
TSEntry(3, None, 2)
)
"The loose domain" should "be empty for empty TimeSeries" in {
EmptyTimeSeries.looseDomain should equal(EmptyTimeDomain)
}
it should "be trivial for single entry" in {
val entry = TSEntry[Any](10, None, 2)
val looseDomain = entry.looseDomain
val untilDomain = entry.timestamp + entry.validity
assertDomain(looseDomain, entry.timestamp, untilDomain)
}
it should "contains the bounds of a multi-value TimeSeries" in {
val ts = niceAndLongTimeSeries
val looseDomain = ts.looseDomain
val untilDomain = end + validity
assertDomain(looseDomain, start, untilDomain)
}
"The union of loose domains" should "work with any subtype of TimeSeries" in {
val looseDomain = TimeSeries.unionLooseDomains(timeseriesSeq)
assertDomain(looseDomain, start, end + validity)
}
it should "be empty if an empty Seq is given" in {
TimeSeries.unionLooseDomains(Seq.empty) should equal(EmptyTimeDomain)
}
it should "be empty if a Seq of empty is given" in {
TimeSeries.unionLooseDomains(List(EmptyTimeSeries, EmptyTimeSeries)) should equal(EmptyTimeDomain)
}
"The intersection of loose domains" should "be empty if there is at least one EmptyTimeSeries" in {
val looseDomainOpt = TimeSeries.intersectLooseDomains(timeseriesSeq)
looseDomainOpt should equal(EmptyTimeDomain)
}
it should "consider the smaller case if there is no empty time series" in {
val tss = timeseriesSeq filter (_.nonEmpty)
val looseDomain = TimeSeries.intersectLooseDomains(tss)
assertDomain(looseDomain, 3, 5)
}
it should "be empty if the Seq is empty" in {
TimeSeries.intersectLooseDomains(Seq.empty) should equal(EmptyTimeDomain)
}
it should "be empty if the two loose domains are not overlapping" in {
val xs = List(
TSEntry(0, None, 3),
TSEntry(10, None, 1)
)
TimeSeries.intersectLooseDomains(xs) should equal(EmptyTimeDomain)
}
}
| Shastick/scala-timeseries-lib | src/test/scala/io/sqooba/oss/timeseries/immutable/LooseDomainSpec.scala | Scala | apache-2.0 | 3,191 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.utils
import scala.concurrent.duration.Duration
import scala.concurrent.duration.DurationInt
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import scala.language.postfixOps
object retry {
/**
* Retry a method which returns a value or throws an exception on failure, up to N times,
* and optionally sleeping up to specified duration between retries.
*
* @param fn the method to retry, fn is expected to throw an exception if it fails, else should return a value of type T
* @param N the maximum number of times to apply fn, must be >= 1
* @param waitBeforeRetry an option specifying duration to wait before retrying method, will not wait if none given
* @return the result of fn iff it is successful
* @throws exception from fn (or an illegal argument exception if N is < 1)
*/
def apply[T](fn: => T, N: Int = 3, waitBeforeRetry: Option[Duration] = Some(1 millisecond)): T = {
require(N >= 1, "maximum number of fn applications must be greater than 1")
waitBeforeRetry map { t => Thread.sleep(t.toMillis) } // initial wait if any
Try { fn } match {
case Success(r) => r
case _ if N > 1 =>
waitBeforeRetry map { t => Thread.sleep(t.toMillis) }
retry(fn, N - 1, waitBeforeRetry)
case Failure(t) => throw t
}
}
}
| CrowdFlower/incubator-openwhisk | common/scala/src/main/scala/whisk/utils/Retry.scala | Scala | apache-2.0 | 2,010 |
package com.softwaremill.react.kafka.commit.native
import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.softwaremill.react.kafka.KafkaTest
import com.softwaremill.react.kafka.commit.OffsetMap
import kafka.api._
import kafka.common.{ErrorMapping, OffsetMetadataAndError, TopicAndPartition}
import kafka.consumer.KafkaConsumer
import kafka.network.BlockingChannel
import org.mockito.BDDMockito._
import org.mockito.Matchers.{any, eq => meq}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, Matchers, fixture}
import scala.language.existentials
import scala.util.{Failure, Success, Try}
class NativeCommitterSpec extends TestKit(ActorSystem("NativeCommitterSpec"))
with fixture.FlatSpecLike with Matchers with KafkaTest with MockitoSugar with BeforeAndAfterEach {
behavior of "Native committer"
val offsetMap = OffsetMap(Map(TopicAndPartition("topic", 0) -> 1L))
case class CommitterFixture(
consumer: KafkaConsumer[_],
offsetManagerResolver: OffsetManagerResolver,
initialChannel: BlockingChannel,
var sendCommit: (BlockingChannel, OffsetCommitRequest) => Try[OffsetCommitResponse] = KafkaSendCommit,
var sendFetch: (BlockingChannel, OffsetFetchRequest) => Try[OffsetFetchResponse] = KafkaSendFetch
)
type FixtureParam = CommitterFixture
def withFixture(test: OneArgTest) = {
val kafka = newKafka()
val properties = consumerProperties(FixtureParam("topic", "groupId", kafka))
val consumer = new KafkaConsumer(properties)
val managerResolver = mock[OffsetManagerResolver]
val initialChannel = mock[BlockingChannel]
val theFixture = CommitterFixture(consumer, managerResolver, initialChannel)
withFixture(test.toNoArgTest(theFixture))
}
private def newCommitter(
kafkaConsumer: KafkaConsumer[_],
offsetManagerResolver: OffsetManagerResolver,
channel: BlockingChannel,
sendCommit: (BlockingChannel, OffsetCommitRequest) => Try[OffsetCommitResponse],
sendFetch: (BlockingChannel, OffsetFetchRequest) => Try[OffsetFetchResponse]
): NativeCommitter = {
new NativeCommitter(kafkaConsumer, offsetManagerResolver, channel, sendCommit, sendFetch)
}
private def newCommitter(f: CommitterFixture): NativeCommitter =
newCommitter(f.consumer, f.offsetManagerResolver, f.initialChannel, f.sendCommit, f.sendFetch)
it should "Fail on exception when setting offsets" in { f =>
// given
val exception = new IllegalStateException("fatal!")
given(f.initialChannel.send(any[RequestOrResponse])).willThrow(exception)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
result.failed.get.getCause should equal(exception)
}
it should "Fail when trying to switch to new manager on commit" in { implicit f =>
// given
val channelResolvingException = new IllegalStateException("fatal!")
givenResponseWithErrorCode(ErrorMapping.NotCoordinatorForConsumerCode)
given(f.offsetManagerResolver.resolve(meq(f.consumer), any[Int], any[Option[Throwable]]))
.willReturn(Failure(channelResolvingException))
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
result.failed.get.getCause should equal(channelResolvingException)
}
it should "Eventually succeed when a new manager gets found" in { implicit f =>
// given
val channelResolvingException = new IllegalStateException("fatal!")
givenResponseWithErrorCode(ErrorMapping.NotCoordinatorForConsumerCode, howManyTimes = 1)
given(f.offsetManagerResolver.resolve(meq(f.consumer), any[Int], any[Option[Throwable]]))
.willReturn(Failure(channelResolvingException))
givenFetchSuccess()
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(false)
}
it should "Eventually succeed when a failing commit finally passes" in { implicit f =>
// given
givenResponseWithErrorCode(ErrorMapping.MessageSizeTooLargeCode, howManyTimes = 3)
givenFetchSuccess()
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(false)
}
it should "Fail when a commit fails with too many retries" in { implicit f =>
// given
givenResponseWithErrorCode(ErrorMapping.MessageSizeTooLargeCode, howManyTimes = 6)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
val expectedError = "Received statuses: Map([topic,0] -> 10)"
val cause: Throwable = result.failed.get.getCause
cause.getClass should equal(classOf[KafkaErrorException])
cause.getMessage should equal(expectedError)
}
it should "Fail when fetch fails with too many retries" in { implicit f =>
// given
givenSuccessfullCommit()
givenFetchResponse(ErrorMapping.InvalidTopicCode, howManyTimes = 10)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
val expectedError = "Received statuses: Map([topic,0] -> 17)"
val cause: Throwable = result.failed.get.getCause
cause.getClass should equal(classOf[KafkaErrorException])
cause.getMessage should equal(expectedError)
}
it should "Fail when fetch fails on resolving new coordinator" in { implicit f =>
// given
givenSuccessfullCommit()
val expectedError = new IllegalStateException("Cannot the coordinator")
givenFetchResponse(ErrorMapping.NotCoordinatorForConsumerCode, howManyTimes = 10)
given(f.offsetManagerResolver.resolve(meq(f.consumer), any[Int], any[Option[Throwable]]))
.willReturn(Failure(expectedError))
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
result.failed.get.getCause should equal(expectedError)
}
it should "Fail when offset load is continously in progress" in { implicit f =>
// given
givenSuccessfullCommit()
givenFetchResponse(ErrorMapping.OffsetsLoadInProgressCode, howManyTimes = 10)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(true)
result.failed.get.getCause should equal(OffssetLoadInProgressException)
}
it should "Succeed when offset load in progress a few times but then fine" in { implicit f =>
// given
givenSuccessfullCommit()
givenFetchResponse(ErrorMapping.OffsetsLoadInProgressCode, howManyTimes = 3)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(false)
}
it should "Succeed when finds new offset manager" in { implicit f =>
// given
givenSuccessfullCommit()
val newChannel = mock[BlockingChannel]
given(f.offsetManagerResolver.resolve(meq(f.consumer), any[Int], any[Option[Throwable]]))
.willReturn(Success(newChannel))
givenFetchResponse(ErrorMapping.NotCoordinatorForConsumerCode, howManyTimes = 1, okForChannel = newChannel)
val committer = newCommitter(f)
// when
val result: Try[OffsetMap] = committer.commit(OffsetMap())
// then
result.isFailure should equal(false)
}
private def givenSuccessfullCommit()(implicit f: FixtureParam): Unit =
givenResponseWithErrorCode(ErrorMapping.NoError, 1)
private def givenResponseWithErrorCode(code: Short, howManyTimes: Int = 10)(implicit f: FixtureParam): Unit = {
var retries = 0
f.sendCommit = (channel, req) => {
val finalCode = if (retries <= howManyTimes)
code
else
ErrorMapping.NoError
retries = retries + 1
Success(OffsetCommitResponse(Map(TopicAndPartition("topic", 0) -> finalCode)))
}
}
private def givenFetchSuccess()(implicit f: FixtureParam) =
givenFetchResponse(ErrorMapping.NoError, 0)
private def givenFetchResponse(code: Short, howManyTimes: Int = 10,
okForChannel: BlockingChannel = mock[BlockingChannel])(implicit f: FixtureParam): Unit = {
def resp(code: Short) =
Success(OffsetFetchResponse(Map(TopicAndPartition("topic", 0) -> OffsetMetadataAndError(0, error = code))))
var retries = 0
f.sendFetch = (channel, req) => {
val finalCode = if (retries <= howManyTimes && channel != okForChannel)
code
else
ErrorMapping.NoError
retries = retries + 1
resp(finalCode)
}
}
}
| anand-singh/reactive-kafka | core/src/test/scala/com/softwaremill/react/kafka/commit/native/NativeCommitterSpec.scala | Scala | apache-2.0 | 8,859 |
package io.reactors
import org.apache.commons.lang3.StringEscapeUtils
import scalajson.ast._
package object json {
implicit class StringJsonInterpolationOps(val ctx: StringContext) {
def json(args: Any*): JValue = {
var content = ""
for ((part, arg) <- ctx.parts.zip(args)) {
content += part
def appendValue(x: Any) {
x match {
case b: Boolean =>
content += b
case n: Number =>
content += n
case xs: Array[t] =>
content += "["
if (xs.nonEmpty) {
for (x <- xs.init) {
appendValue(x)
content += ","
}
appendValue(xs.last)
}
content += "]"
case xs: Map[k, v] =>
content += "{"
if (xs.nonEmpty) {
for ((key, value) <- xs.init) {
content += s""""${key}": """
appendValue(value)
content += ","
}
val (lastKey, lastValue) = xs.last
content += s""""${lastKey}": """
appendValue(lastValue)
}
content += "}"
case xs: Traversable[t] =>
content += "["
if (xs.nonEmpty) {
for (x <- xs.init) {
appendValue(x)
content += ","
}
appendValue(xs.last)
}
content += "]"
case Some(v) =>
appendValue(v)
case null =>
content += "null"
case None =>
content += "null"
case jv: JValue =>
content += jv.jsonString
case _ =>
content += s""""${x.toString}""""
}
}
appendValue(arg)
}
content += ctx.parts.last
JsonParser.parse(content)
}
}
implicit class JsonOps(val jv: JValue) {
def jsonString: String = jv match {
case JNull => "null"
case JString(s) => s""""${s}""""
case JNumber(num) => num
case JTrue => "true"
case JFalse => "false"
case JArray(xs) => s"[${xs.map(_.jsonString).mkString(", ")}]"
case JObject(xs) => {
val strings = for ((k, v) <- xs) yield s""""${k}": ${v.jsonString}"""
s"{ ${strings.mkString(", ")} }"
}
}
def asJObject: JObject = jv match {
case x @ JObject(_) => x
case _ => sys.error("Not an instance of JObject.")
}
def asString: String = jv match {
case JString(s) => s
case _ => sys.error("Not an instance of JString.")
}
def asLong: Long = jv match {
case JNumber(num) => java.lang.Long.parseLong(num)
case _ => sys.error("Not an instance of JNumber.")
}
def asList[T](convert: JValue => T): List[T] = jv match {
case JArray(xs) => xs.map(convert).toList
case _ => sys.error("Not an instance of JArray.")
}
def ++(that: JObject): JObject = {
val self = asJObject
JObject(self.value ++ that.value)
}
}
object JsonParser {
def parse(s: String): JValue = {
val jackson = org.json4s.jackson.JsonMethods.parse(s)
def convert(jv: org.json4s.JValue): JValue = jv match {
case org.json4s.JNull => JNull
case org.json4s.JString(s) => JString(s)
case org.json4s.JDouble(num) => JNumber(num)
case org.json4s.JDecimal(num) => JNumber(num)
case org.json4s.JInt(num) => JNumber(num)
case org.json4s.JLong(num) => JNumber(num)
case org.json4s.JBool(true) => JTrue
case org.json4s.JBool(false) => JFalse
case org.json4s.JArray(xs) =>
val values = for (x <- xs) yield convert(x)
JArray(values.toVector)
case org.json4s.JObject(xs) =>
val fields = for ((k, v) <- xs) yield (k, convert(v))
JObject(fields.toMap)
}
convert(jackson)
}
}
}
| reactors-io/reactors | reactors-http/src/main/scala/io/reactors/json/package.scala | Scala | bsd-3-clause | 4,049 |
/*
* Copyright 2010 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.fusesource.fabric.webui
import system.Principal
import java.util.Date
import java.text.SimpleDateFormat
import scala.concurrent.ops._
import javax.servlet.{ServletContext, ServletContextEvent, ServletContextListener}
import org.osgi.framework.{FrameworkUtil, Bundle}
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
class ContextListener extends ServletContextListener {
def contextInitialized(ctxEvent: ServletContextEvent) {
Services.LOG.info("-- FMC Context Initialized --")
Services.LOG.info("Patch upload directory at {}", Services.patch_dir)
Services.LOG.info("Profile name is {}", Services.profile_name)
Services.LOG.info("Create managed container : {}", Services.managed)
Services.LOG.info("JAAS realm: {}", Services.realm);
Services.LOG.info("JAAS authorized role: {}", Services.role);
}
def contextDestroyed(ctx: ServletContextEvent) {
Services.LOG.info("-- FMC Context Destroyed --")
}
}
| janstey/fuse | fmc/fmc-rest/src/main/scala/org/fusesource/fabric/webui/ContextListener.scala | Scala | apache-2.0 | 1,626 |
package uk.gov.gds.ier.validation.constraints
import uk.gov.gds.ier.validation.{FormKeys, ErrorMessages, NinoValidator}
import play.api.data.validation.{Valid, Invalid, Constraint}
import uk.gov.gds.ier.model.{Nino}
import uk.gov.gds.ier.transaction.overseas.InprogressOverseas
trait NinoConstraints {
self: ErrorMessages
with FormKeys =>
lazy val overseasNinoOrNoNinoReasonDefined = Constraint[InprogressOverseas](keys.nino.key) {
application =>
if (application.nino.isDefined) {
Valid
}
else {
Invalid("Please enter your National Insurance number", keys.nino.nino)
}
}
lazy val ninoIsValidIfProvided = Constraint[Nino](keys.nino.nino.key) {
nino =>
nino match {
case Nino(Some(nino), _) if NinoValidator.isValid(nino) => Valid
case Nino(Some(nino), _) if !NinoValidator.isValid(nino) => {
Invalid("Your National Insurance number is not correct", keys.nino.nino)
}
case _ => Valid
}
}
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/validation/constraints/NinoConstraints.scala | Scala | mit | 1,008 |
package com.github.ponkin.bloom
import org.scalacheck.{ Gen, Arbitrary }
package object server {
val mapGen: Gen[Map[String, String]] = for {
keyValue <- Gen.listOf(Gen.alphaStr)
} yield (keyValue zip keyValue).toMap
val strOfLen: Int => Gen[String] = Gen.listOfN(_, Gen.alphaChar).map(_.mkString)
val filterDescGen: Gen[FilterDescriptor] = for {
name <- strOfLen(10)
filterType <- Gen.oneOf(BloomType.Standart, BloomType.Stable, BloomType.Cuckoo)
maxElements <- Gen.posNum[Long]
fpp <- Gen.posNum[Double]
dataPath <- Gen.option(Gen.alphaStr)
options <- mapGen
} yield FilterDescriptor(name, filterType, maxElements, fpp, dataPath, options)
val listOfDescriptors: Gen[List[FilterDescriptor]] = Gen.listOfN(10, filterDescGen)
}
| ponkin/bloom | server/src/test/scala/com/github/ponkin/bloom/server/package.scala | Scala | apache-2.0 | 774 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.store.catalog
import com.treode.async.{Async, Callback, Fiber, Scheduler}
import com.treode.async.implicits._
import com.treode.buffer.ArrayBuffer
import com.treode.cluster.{Cluster, MessageDescriptor, Peer}
import com.treode.disk.{Disk, DiskLaunch, ObjectId, PageDescriptor, Position}
import com.treode.store.{Bytes, CatalogDescriptor, CatalogId}
import com.treode.pickle.PicklerRegistry
import Async.guard
import Callback.ignore
import Handler.pager
private class Broker (
private var catalogs: Map [CatalogId, Handler]
) (implicit
scheduler: Scheduler,
disk: Disk
) {
private val fiber = new Fiber
private val ports = PicklerRegistry [Any] {id: Long => ()}
private def _get (id: CatalogId): Handler = {
catalogs get (id) match {
case Some (cat) =>
cat
case None =>
val cat = Handler (id)
catalogs += id -> cat
cat
}}
private def deliver (id: CatalogId, cat: Handler): Unit =
scheduler.execute {
ports.unpickle (id.id, cat.bytes.bytes)
}
def listen [C] (desc: CatalogDescriptor [C]) (f: C => Any): Unit =
fiber.execute {
ports.register (desc.id.id, desc.pcat) (f)
catalogs get (desc.id) match {
case Some (cat) if cat.bytes.murmur32 != 0 => deliver (desc.id, cat)
case _ => ()
}}
def get (cat: CatalogId): Async [Handler] =
fiber.supply {
_get (cat)
}
def diff [C] (desc: CatalogDescriptor [C]) (version: Int, cat: C): Async [Patch] =
guard {
val bytes = Bytes (desc.pcat, cat)
fiber.supply {
_get (desc.id) diff (version, bytes)
}}
def patch (id: CatalogId, update: Update): Async [Unit] =
fiber.supply {
val cat = _get (id)
if (cat.patch (update))
deliver (id, cat)
}
private def _status: Ping =
for ((id, cat) <- catalogs.toSeq)
yield (id, cat.version)
def status: Async [Ping] =
fiber.supply (_status)
def ping (values: Ping): Async [Sync] =
fiber.supply {
val _values = values.toMap.withDefaultValue (0)
for {
(id, cat) <- catalogs.toSeq
update = cat.diff (_values (id))
if !update.isEmpty
} yield (id -> update)
}
def ping (peer: Peer): Unit =
fiber.execute {
Broker.ping (_status) (peer)
}
def sync (updates: Sync): Unit =
fiber.execute {
for ((id, update) <- updates) {
val cat = _get (id)
if (cat.patch (update))
deliver (id, cat)
}}
def gab () (implicit cluster: Cluster) {
scheduler.delay (200) {
cluster.rpeer match {
case Some (peer) => ping (peer)
case None => ()
}
gab()
}}
def compact (obj: ObjectId, gens: Set [Long]): Async [Unit] =
for {
cat <- get (obj.id)
_ <- cat.compact (gens)
} yield ()
def checkpoint(): Async [Unit] =
for {
cats <- fiber.supply (catalogs.values)
_ <- cats.latch (_.checkpoint())
} yield ()
def attach () (implicit launch: DiskLaunch, cluster: Cluster) {
Broker.ping.listen { (values, from) =>
val task = for {
updates <- ping (values)
} yield {
if (!updates.isEmpty)
Broker.sync (updates) (from)
}
task run (ignore)
}
Broker.sync.listen { (updates, from) =>
sync (updates)
}
gab()
}}
private object Broker {
val ping: MessageDescriptor [Ping] = {
import CatalogPicklers._
MessageDescriptor (
0xFF8D38A840A7E6BCL,
seq (tuple (catId, uint)))
}
val sync: MessageDescriptor [Sync] = {
import CatalogPicklers._
MessageDescriptor (
0xFF632A972A814B35L,
seq (tuple (catId, update)))
}}
| Treode/store | store/src/com/treode/store/catalog/Broker.scala | Scala | apache-2.0 | 4,325 |
/**
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.frame.internal.ops.statistics.covariance
import org.trustedanalytics.sparktk.frame.{ Column, FrameSchema, DataTypes }
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import org.apache.spark.sql.catalyst.expressions.GenericRow
import org.scalatest.Matchers
import org.apache.spark.sql.Row
import org.trustedanalytics.sparktk.testutils.TestingSparkContextWordSpec
class CovarianceMatrixTest extends TestingSparkContextWordSpec with Matchers {
val inputArray: Array[Array[Double]] = Array(Array(90.0, 60.0, 90.0), Array(90.0, 90.0, 30.0),
Array(60.0, 60.0, 60.0), Array(60.0, 60.0, 90.0), Array(30.0, 30.0, 30.0))
"CovarianceFunctions matrix calculations" should {
"return the correct values" in {
val arrGenericRow: Array[Row] = inputArray.map(row => {
val temp: Array[Any] = row.map(x => x)
new GenericRow(temp)
})
val rdd = sparkContext.parallelize(arrGenericRow)
val columnsList = List("col_0", "col_1", "col_2")
val inputDataColumnNamesAndTypes: Vector[Column] = columnsList.map({ name => Column(name, DataTypes.float64) }).toVector
val schema = FrameSchema(inputDataColumnNamesAndTypes)
val frameRdd = new FrameRdd(schema, rdd)
val result = CovarianceFunctions.covarianceMatrix(frameRdd, columnsList).collect()
result.size shouldBe 3
result(0) shouldBe Row(630.0, 450.0, 225.0)
result(1) shouldBe Row(450.0, 450.0, 0.0)
result(2) shouldBe Row(225.0, 0.0, 900.0)
}
"return the correct values for vector data types" in {
val arrGenericRow: Array[Row] = inputArray.map(row => {
val temp: Array[Any] = Array(DataTypes.toVector(3)(row))
new GenericRow(temp)
})
val rdd = sparkContext.parallelize(arrGenericRow)
val schema = FrameSchema(Vector(Column("col_0", DataTypes.vector(3))))
val frameRdd = new FrameRdd(schema, rdd)
val result = CovarianceFunctions.covarianceMatrix(frameRdd, List("col_0"), outputVectorLength = Some(3)).collect()
result.size shouldBe 3
result(0)(0) shouldBe Vector(630.0, 450.0, 225.0)
result(1)(0) shouldBe Vector(450.0, 450.0, 0.0)
result(2)(0) shouldBe Vector(225.0, 0.0, 900.0)
}
"return the correct values for mixed vector and numeric data types" in {
val arrGenericRow: Array[Row] = inputArray.map(row => {
val temp: Array[Any] = Array(DataTypes.toVector(2)(row.slice(0, 2)), row(2))
new GenericRow(temp)
})
val rdd = sparkContext.parallelize(arrGenericRow)
val schema = FrameSchema(Vector(Column("col_0", DataTypes.vector(2)), Column("col_1", DataTypes.float64)))
val frameRdd = new FrameRdd(schema, rdd)
val result = CovarianceFunctions.covarianceMatrix(frameRdd, List("col_0", "col_1")).collect()
result.size shouldBe 3
result(0) shouldBe Row(630.0, 450.0, 225.0)
result(1) shouldBe Row(450.0, 450.0, 0.0)
result(2) shouldBe Row(225.0, 0.0, 900.0)
}
}
}
| shibanis1/spark-tk | core/src/test/scala/org/trustedanalytics/sparktk/frame/internal/ops/statistics/covariance/CovarianceMatrixTest.scala | Scala | apache-2.0 | 3,721 |
package speedtools
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
class LoadTestRestApi extends Simulation {
val httpConf = http.
baseURL("http://localhost:8080").
acceptCharsetHeader("utf-8").
acceptHeader("application/json").
acceptLanguageHeader("en-US").
disableFollowRedirect
val headers = Map(
"Accept" -> "application/json")
val scenarioWarmUp = scenario("Warm-Up").
exec(http("first-thread").get("/example/1/person").headers(headers)).
exec(http("first-future").get("/example/2/person").headers(headers))
val scenarioNonAkkaImplementation = scenario("NonAkkaImplementation").
exec(http("NonAkkaImplementation").get("/example/1/person").headers(headers))
val scenarioAkkaImplementation = scenario("AkkaImplementation").
exec(http("AkkaImplementation").get("/example/2/person").headers(headers))
setUp(
scenarioWarmUp.inject(atOnceUsers(1)),
scenarioAkkaImplementation.inject(nothingFor(10 seconds), rampUsers(5000) over (30 seconds))).
//scenarioAkkaImplementation.inject(nothingFor(180 seconds), rampUsers(5000) over (30 seconds))).
protocols(httpConf).
assertions(global.successfulRequests.percent.is(100))
}
| tomtom-international/speedtools-examples | gatling-charts-highcharts-bundle-2.3.0/user-files/simulations/speedtools/LoadTestRestApi.scala | Scala | apache-2.0 | 1,250 |
package fasqlitate
import java.sql.PreparedStatement
object Impl {
trait OptTypeSetter {
def apply(arg: Any, pos: Int): Option[PreparedStatement => Unit]
}
def describedFun(f: PreparedStatement => Unit, desc: String) = new Function1[PreparedStatement, Unit] {
def apply(ps: PreparedStatement): Unit = f(ps)
override def toString(): String = desc
}
object OptIntSetter extends OptTypeSetter {
def apply(arg: Any, pos: Int): Option[(PreparedStatement) => Unit] = arg match {
case i: Int => Some(describedFun(_.setInt(pos, i), s"_.setInt($pos, $i)"))
case _ => None
}
}
object OptStringSetter extends OptTypeSetter {
def apply(arg: Any, pos: Int): Option[(PreparedStatement) => Unit] = arg match {
case s: String => Some(describedFun(_.setString(pos, s), s"_.setString($pos, $s)"))
case _ => None
}
}
class TypeSetterImpl(optTypeSetters: List[OptTypeSetter]) extends TypeSetter {
def apply(arg: Any, pos: Int): PreparedStatement => Unit = {
optTypeSetters
.toStream
.flatMap(ots => ots(arg, pos))
.headOption match {
case Some(setter) => setter
case None => throw new Exception(s"No OptTypeSetter found for type ${arg.getClass.getName}")
}
}
}
}
| davidpeklak/fasqlitate | src/main/scala/fasqlitate/Impl.scala | Scala | mit | 1,286 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.actormonitor
import akka.actor._
import org.squbs.actormonitor.ActorMonitorBean._
import org.squbs.lifecycle.GracefulStopHelper
import org.squbs.unicomplex.JMX._
import scala.collection.JavaConverters._
private[actormonitor] case class ActorMonitorConfig(maxActorCount: Int, maxChildrenDisplay: Int)
private[actormonitor] class ActorMonitor(_monitorConfig: ActorMonitorConfig) extends Actor with GracefulStopHelper {
val configBean = "org.squbs.unicomplex:type=ActorMonitor"
val monitorConfig = _monitorConfig
register(new ActorMonitorConfigBean(monitorConfig, self, context), prefix + configBean )
context.actorSelection("/*") ! Identify(monitorConfig)
override def postStop(): Unit = {
unregister(prefix + configBean)
totalBeans.asScala.foreach(unregister)
}
def receive = {
case "refresh" =>
totalBeans.asScala.foreach(unregister)
context.actorSelection("/*") ! Identify(monitorConfig)
case ActorIdentity(monitorConfig: ActorMonitorConfig , Some(actor))=>
implicit val config = monitorConfig
process(actor)
case Terminated(actor) =>
unregisterBean(actor)
}
def process(actor: ActorRef) (implicit monitorConfig: ActorMonitorConfig , context: ActorContext) : Unit= {
context.watch(actor)
registerBean(actor)
getDescendant(actor) foreach process
}
}
| anilgursel/squbs | squbs-actormonitor/src/main/scala/org/squbs/actormonitor/ActorMonitor.scala | Scala | apache-2.0 | 1,972 |
package com.taxis99.amazon.sns
import akka.Done
import akka.stream.QueueOfferResult
import com.taxis99.amazon.serializers.{ISerializer, PlayJson}
import play.api.libs.json.{Json, Writes}
import scala.concurrent.{ExecutionContext, Future, Promise}
trait SnsPublisher[T] {
implicit def ec: ExecutionContext
implicit def sns: SnsClient
protected lazy val topicConfig: Future[SnsTopic] = sns.getTopic(topic)
private lazy val publisher = sns.publisher(topicConfig)
/**
* Defines the serialization method to produce messages.
* @return The serialization object
*/
implicit def serializer: ISerializer = PlayJson
/**
* The topic name in the configuration file
*/
def topic: String
/**
* Publishes a new message to the topic. The message must be serializable to Json.
* @param message The message to be sent
* @return A future completed when the message was sent
*/
def publish(message: T)(implicit tjs: Writes[T]): Future[Done] = {
val done = Promise[Done]
publisher flatMap { queue =>
queue.offer(Json.toJson(message) -> done) flatMap {
case QueueOfferResult.Enqueued => done.future
case r: QueueOfferResult => Future.failed(new Exception(s"Could not enqueue $r"))
}
}
}
}
| 99Taxis/common-sqs | src/main/scala/com/taxis99/amazon/sns/SnsPublisher.scala | Scala | apache-2.0 | 1,273 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.testkit
import org.scalatest.Tag
object SlowTest extends Tag("org.squbs.testkit.tags.SlowTest")
object DbTest extends Tag("org.squbs.testkit.tags.DbTest")
| akara/squbs | squbs-testkit/src/main/scala/org/squbs/testkit/Tags.scala | Scala | apache-2.0 | 772 |
package adapter
import org.scalatest.FunSuite
class BitFlyerTest extends FunSuite {
import BitFlyer._
val key = "###"
val secret = "###"
ignore("") {
val executions: Seq[MyExecution] = myExecutions(key, secret)
executions.reverse.foreach(e => {
val price = (e.price * e.size * (if (e.side == domain.Side.Sell) -1 else 1)).toLong
println(s"[${e.side}] ${e.exec_date} price ${e.price} size ${e.size}")
})
}
}
| rysh/scalatrader | scalatrader/test/adapter/BitFlyerTest.scala | Scala | mit | 446 |
package api.handler
import akka.actor.ActorSystem
import com.github.vonnagy.service.container.http.routing.Rejection.{DuplicateRejection, NotFoundRejection}
import db.WidgetPersistence
import model.Widget
import spray.http.StatusCodes._
import scala.util.{Failure, Success}
import spray.httpx.marshalling.{Marshaller, ToResponseMarshaller}
import spray.routing._
import java.util.UUID
import java.io.IOException
import scala.concurrent.{ExecutionContextExecutor, Future}
import java.util.concurrent.ConcurrentHashMap
import spray.client.pipelining._
import spray.util._
import com.github.vonnagy.service.container.http.DefaultMarshallers
trait WidgetHandler { self:DefaultMarshallers =>
implicit val system: ActorSystem
import system.dispatcher
implicit val unmarshaller = jsonUnmarshaller[OrderRequest]
implicit val unmarshaller2 = jsonUnmarshaller[UserRequest]
implicit val unmarshaller3 = jsonUnmarshaller[User]
implicit val marshaller = jsonMarshaller
lazy val orders = new ConcurrentHashMap[String, OrderInner]
lazy val users = new ConcurrentHashMap[String, User]
val pipeline = sendReceive ~> unmarshal[User]
val pipeline2 = sendReceive ~> unmarshal[String]
def createOrder[T](order: OrderRequest)(implicit marshaller: Marshaller[T]): Route = ctx => {
implicit def userRequestMarshaller = marshaller.asInstanceOf[Marshaller[UserRequest]]
val future = pipeline2 {
Post("http://localhost:9000/user/", order.user)
}
future.foreach(x => orders.put(x, OrderInner(order, x)))
future onSuccess {
case uuid:String => ctx.complete(uuid)
}
future onFailure {
case _ => ctx.reject(NotFoundRejection(s"bad order create request"))
}
}
def getOrder(uuid:String):Option[OrderInner] = Option(orders.get(uuid))
def fetchOrder[T](uuid: String)(implicit marshaller: Marshaller[T]): Route = ctx => {
val order = getOrder(uuid).map { orderInner =>
val future:Future[User] = pipeline {
Get(s"http://localhost:9000/user/$uuid")
}
future.map(x => Order(orderInner, x))
}
order match {
case Some(future) =>
future onSuccess {
case orderResult =>
implicit def orderMarshaller = marshaller.asInstanceOf[Marshaller[Order]]
ctx.complete(orderResult)
}
future onFailure { case x =>
ctx.reject(NotFoundRejection(s"user not found $uuid"))
}
case _ => ctx.reject(NotFoundRejection(s"user not found $uuid"))
}
}
def fetchUser[T](uuid: String)(implicit marshaller: Marshaller[T]): Route = ctx => {
val user = users.get(uuid)
if (user != null) {
implicit def x = marshaller.asInstanceOf[Marshaller[User]]
ctx.complete(user)
} else {
ctx.reject(NotFoundRejection(s"user not found $uuid"))
}
}
def createUser[T](user: UserRequest)(implicit marshaller: Marshaller[T]): Route = ctx => {
val uuid = UUID.randomUUID().toString
users.put(uuid, User(user, uuid))
ctx.complete(uuid)
}
}
case class Order(user: User, product: String, deliveryAddress: String, price: Double)
case class User(firstName: String, lastName: String, uuid: String)
case class OrderRequest(user: UserRequest, product: String, deliveryAddress: String, price: Double)
case class UserRequest(firstName: String, lastName: String)
case class OrderInner(product: String, deliveryAddress: String, price: Double, uuid: String)
object Order {
def apply(order: OrderInner, user: User): Order = Order(user, order.product, order.deliveryAddress, order.price)
}
object OrderInner {
def apply(order: OrderRequest, uuid: String): OrderInner = OrderInner(order.product, order.deliveryAddress, order.price, uuid)
}
object User {
def apply(user: UserRequest, uuid: String): User = User(user.firstName, user.lastName, uuid)
}
| Alekseylv/spray-microservice | src/main/scala/api/handler/WidgetHandler.scala | Scala | apache-2.0 | 3,846 |
package com.github.neysofu.tyche
/** Defines a probability mass functcom.github. and the associated cumulative density
* function for discrete random variables.
*/
trait MassFunction[A] {
type MassMap[A] = Map[A, Double]
/** The probability mass function. It contains all the possible outcomes
* and their respective weights. The weights are supposed to:
* 1. be nonnegative, and
* 2. sum up to 1.
*
* @example {{{
* scala> val coinToss = new MassFunction[Boolean] {
* | val mass = Map(true -> 0.5, false -> 0.5)
* | }
* coinToss: com.github.neysofu.tyche.MassFunction[Boolean] = \\$...
*
* }}}
*/
val mass: MassMap[A]
protected lazy val outcomes = mass.unzip._1.toList
protected lazy val cdf = mass.unzip._2.scanLeft(0.0)(_+_).tail.toList
}
| neysofu/tyche | src/main/scala/com/github/neysofu/tyche/MassFunction.scala | Scala | mit | 817 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.text.SimpleDateFormat
import java.time.{LocalDate, ZoneId}
import java.util.{Date, Locale}
import org.apache.commons.lang3.time.FastDateFormat
import org.apache.spark.sql.catalyst.util.DateTimeUtils._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy._
sealed trait DateFormatter extends Serializable {
def parse(s: String): Int // returns days since epoch
def format(days: Int): String
def format(date: Date): String
def format(localDate: LocalDate): String
def validatePatternString(): Unit
}
class Iso8601DateFormatter(
pattern: String,
zoneId: ZoneId,
locale: Locale,
legacyFormat: LegacyDateFormats.LegacyDateFormat,
isParsing: Boolean)
extends DateFormatter with DateTimeFormatterHelper {
@transient
private lazy val formatter = getOrCreateFormatter(pattern, locale, isParsing)
@transient
private lazy val legacyFormatter = DateFormatter.getLegacyFormatter(
pattern, zoneId, locale, legacyFormat)
override def parse(s: String): Int = {
val specialDate = convertSpecialDate(s.trim, zoneId)
specialDate.getOrElse {
try {
val localDate = toLocalDate(formatter.parse(s))
localDateToDays(localDate)
} catch checkParsedDiff(s, legacyFormatter.parse)
}
}
override def format(localDate: LocalDate): String = {
try {
localDate.format(formatter)
} catch checkFormattedDiff(toJavaDate(localDateToDays(localDate)),
(d: Date) => format(d))
}
override def format(days: Int): String = {
format(LocalDate.ofEpochDay(days))
}
override def format(date: Date): String = {
legacyFormatter.format(date)
}
override def validatePatternString(): Unit = {
try {
formatter
} catch checkLegacyFormatter(pattern, legacyFormatter.validatePatternString)
}
}
trait LegacyDateFormatter extends DateFormatter {
def parseToDate(s: String): Date
override def parse(s: String): Int = {
fromJavaDate(new java.sql.Date(parseToDate(s).getTime))
}
override def format(days: Int): String = {
format(DateTimeUtils.toJavaDate(days))
}
override def format(localDate: LocalDate): String = {
format(localDateToDays(localDate))
}
}
/**
* The legacy formatter is based on Apache Commons FastDateFormat. The formatter uses the default
* JVM time zone intentionally for compatibility with Spark 2.4 and earlier versions.
*
* Note: Using of the default JVM time zone makes the formatter compatible with the legacy
* `DateTimeUtils` methods `toJavaDate` and `fromJavaDate` that are based on the default
* JVM time zone too.
*
* @param pattern `java.text.SimpleDateFormat` compatible pattern.
* @param locale The locale overrides the system locale and is used in parsing/formatting.
*/
class LegacyFastDateFormatter(pattern: String, locale: Locale) extends LegacyDateFormatter {
@transient
private lazy val fdf = FastDateFormat.getInstance(pattern, locale)
override def parseToDate(s: String): Date = fdf.parse(s)
override def format(d: Date): String = fdf.format(d)
override def validatePatternString(): Unit = fdf
}
// scalastyle:off line.size.limit
/**
* The legacy formatter is based on `java.text.SimpleDateFormat`. The formatter uses the default
* JVM time zone intentionally for compatibility with Spark 2.4 and earlier versions.
*
* Note: Using of the default JVM time zone makes the formatter compatible with the legacy
* `DateTimeUtils` methods `toJavaDate` and `fromJavaDate` that are based on the default
* JVM time zone too.
*
* @param pattern The pattern describing the date and time format.
* See <a href="https://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html">
* Date and Time Patterns</a>
* @param locale The locale whose date format symbols should be used. It overrides the system
* locale in parsing/formatting.
*/
// scalastyle:on line.size.limit
class LegacySimpleDateFormatter(pattern: String, locale: Locale) extends LegacyDateFormatter {
@transient
private lazy val sdf = new SimpleDateFormat(pattern, locale)
override def parseToDate(s: String): Date = sdf.parse(s)
override def format(d: Date): String = sdf.format(d)
override def validatePatternString(): Unit = sdf
}
object DateFormatter {
import LegacyDateFormats._
val defaultLocale: Locale = Locale.US
val defaultPattern: String = "yyyy-MM-dd"
private def getFormatter(
format: Option[String],
zoneId: ZoneId,
locale: Locale = defaultLocale,
legacyFormat: LegacyDateFormat = LENIENT_SIMPLE_DATE_FORMAT,
isParsing: Boolean): DateFormatter = {
val pattern = format.getOrElse(defaultPattern)
if (SQLConf.get.legacyTimeParserPolicy == LEGACY) {
getLegacyFormatter(pattern, zoneId, locale, legacyFormat)
} else {
val df = new Iso8601DateFormatter(pattern, zoneId, locale, legacyFormat, isParsing)
df.validatePatternString()
df
}
}
def getLegacyFormatter(
pattern: String,
zoneId: ZoneId,
locale: Locale,
legacyFormat: LegacyDateFormat): DateFormatter = {
legacyFormat match {
case FAST_DATE_FORMAT =>
new LegacyFastDateFormatter(pattern, locale)
case SIMPLE_DATE_FORMAT | LENIENT_SIMPLE_DATE_FORMAT =>
new LegacySimpleDateFormatter(pattern, locale)
}
}
def apply(
format: String,
zoneId: ZoneId,
locale: Locale,
legacyFormat: LegacyDateFormat,
isParsing: Boolean): DateFormatter = {
getFormatter(Some(format), zoneId, locale, legacyFormat, isParsing)
}
def apply(format: String, zoneId: ZoneId, isParsing: Boolean = false): DateFormatter = {
getFormatter(Some(format), zoneId, isParsing = isParsing)
}
def apply(zoneId: ZoneId): DateFormatter = {
getFormatter(None, zoneId, isParsing = false)
}
}
| witgo/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateFormatter.scala | Scala | apache-2.0 | 6,776 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zk
import kafka.consumer.ConsumerConfig
import kafka.utils.ZkUtils
import kafka.utils.TestUtils
import org.junit.{Test, Assert}
class ZKEphemeralTest extends ZooKeeperTestHarness {
var zkSessionTimeoutMs = 1000
@Test
def testEphemeralNodeCleanup = {
val config = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, "test", "1"))
var zkClient = ZkUtils.createZkClient(zkConnect, zkSessionTimeoutMs, config.zkConnectionTimeoutMs)
try {
ZkUtils.createEphemeralPathExpectConflict(zkClient, "/tmp/zktest", "node created")
} catch {
case e: Exception =>
}
var testData: String = null
testData = ZkUtils.readData(zkClient, "/tmp/zktest")._1
Assert.assertNotNull(testData)
zkClient.close
zkClient = ZkUtils.createZkClient(zkConnect, zkSessionTimeoutMs, config.zkConnectionTimeoutMs)
val nodeExists = ZkUtils.pathExists(zkClient, "/tmp/zktest")
Assert.assertFalse(nodeExists)
}
}
| bmistry13/kafka | core/src/test/scala/unit/kafka/zk/ZKEphemeralTest.scala | Scala | apache-2.0 | 1,797 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.sst
import quasar.contrib.matryoshka.arbitrary._
import quasar.ejson.{EJson, Common, Extension, CommonEJson, ExtEJson, Meta, Type => EType, SizedType => ESizedType, EJsonArbitrary, Null => ENull}
import quasar.ejson.implicits._
import quasar.fp._
import quasar.pkg.tests._
import matryoshka._
import matryoshka.implicits._
import scalaz.scalacheck.ScalaCheckBinding._
import scalaz._, Scalaz._
/** EJson that contains `_ejson.type` metadata. */
final case class TypedEJson[T[_[_]]](ejson: T[EJson])
object TypedEJson extends TypedEJsonInstances {
type TEJson[A] = Coproduct[TypeMetadata, EJson, A]
def absorbMetadata[J](implicit J: Birecursive.Aux[J, EJson]): Transform[J, TEJson, EJson] = {
case TM(TypeMetadata.Type(tag, j)) => ExtEJson(Meta(j, EType(tag)))
case TM(TypeMetadata.SizedType(tag, size, j)) => ExtEJson(Meta(j, ESizedType(tag, size)))
case TM(TypeMetadata.Absent(j)) => J.project(j)
case TM(TypeMetadata.Null()) => CommonEJson(ENull())
case CJ(cj) => CommonEJson(cj)
case EJ(ej) => ExtEJson(ej)
}
////
private val TM = Inject[TypeMetadata, TEJson]
private val CJ = Inject[Common, TEJson]
private val EJ = Inject[Extension, TEJson]
}
sealed abstract class TypedEJsonInstances extends TypedEJsonInstances0 {
import EJsonArbitrary._
implicit def arbitrary[T[_[_]]: BirecursiveT]: Arbitrary[TypedEJson[T]] =
corecursiveArbitrary[T[TypedEJson.TEJson], TypedEJson.TEJson] map { v =>
TypedEJson(v.transCata[T[EJson]](TypedEJson.absorbMetadata[T[EJson]]))
}
implicit def order[T[_[_]]: BirecursiveT]: Order[TypedEJson[T]] =
Order[T[EJson]].contramap(_.ejson)
implicit def show[T[_[_]]: ShowT]: Show[TypedEJson[T]] =
Show[T[EJson]].contramap(_.ejson)
}
sealed abstract class TypedEJsonInstances0 {
implicit def corecursive[T[_[_]]: CorecursiveT]: Corecursive.Aux[TypedEJson[T], EJson] =
new Corecursive[TypedEJson[T]] {
type Base[B] = EJson[B]
def embed(bt: EJson[TypedEJson[T]])(implicit BF: Functor[EJson]) =
TypedEJson(bt.map(_.ejson).embed)
}
implicit def recursive[T[_[_]]: RecursiveT]: Recursive.Aux[TypedEJson[T], EJson] =
new Recursive[TypedEJson[T]] {
type Base[B] = EJson[B]
def project(bt: TypedEJson[T])(implicit BF: Functor[EJson]) =
bt.ejson.project map (TypedEJson(_))
}
}
| drostron/quasar | frontend/src/test/scala/quasar/sst/TypedEJson.scala | Scala | apache-2.0 | 3,067 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala
import org.apache.flink.annotation.Internal
import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.api.java.typeutils.TupleTypeInfoBase
/**
* SelectByMinFunction to work with Scala tuples
*/
@Internal
class SelectByMinFunction[T](t : TupleTypeInfoBase[T], fields : Array[Int])
extends ReduceFunction[T] {
for(f <- fields) {
if (f < 0 || f >= t.getArity()) {
throw new IndexOutOfBoundsException(
"SelectByMinFunction field position " + f + " is out of range.")
}
// Check whether type is comparable
if (!t.getTypeAt(f).isKeyType()) {
throw new IllegalArgumentException(
"SelectByMinFunction supports only key(Comparable) types.")
}
}
override def reduce(value1: T, value2: T): T = {
for (f <- fields) {
val element1 = value1.asInstanceOf[Product].productElement(f).asInstanceOf[Comparable[Any]]
val element2 = value2.asInstanceOf[Product].productElement(f).asInstanceOf[Comparable[Any]]
val comp = element1.compareTo(element2)
// If comp is bigger than 0 comparable 1 is bigger.
// Return the smaller value.
if (comp < 0) {
return value1
} else if (comp > 0) {
return value2
}
}
value1
}
}
| apache/flink | flink-scala/src/main/scala/org/apache/flink/api/scala/SelectByMinFunction.scala | Scala | apache-2.0 | 2,124 |
package com.datastax.sparkstress.WriteTaskTests
import java.util.concurrent.TimeoutException
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql.CassandraConnector
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import com.datastax.sparkstress._
import org.apache.spark.{ExposeJobListener, SparkConf}
import com.datastax.bdp.fs.client.{DseFsClient, DseFsClientConf}
import com.datastax.bdp.fs.model.HostAndPort
import com.datastax.bdp.fs.model.FilePath
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.{Duration, SECONDS}
@RunWith(classOf[JUnitRunner])
class WriteTaskTests extends FlatSpec
with BeforeAndAfterAll
with Matchers{
val ss = ConnectHelper.getSparkSession()
def clearCache(): Unit = CassandraConnector.evictCache()
// Allow us to rerun tests with a clean slate
val conn = CassandraConnector(ss.sparkContext.getConf)
conn.withSessionDo { session =>
session.execute(s"""DROP KEYSPACE IF EXISTS test1 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test2 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test3 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test4 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test5 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test6 """)
session.execute(s"""DROP KEYSPACE IF EXISTS test7 """)
}
Thread.sleep(5000)
conn.withSessionDo { session =>
session.execute(s"""CREATE KEYSPACE test1 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test2 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test3 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test4 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test5 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test6 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
session.execute(s"""CREATE KEYSPACE test7 WITH REPLICATION = { 'class': 'SimpleStrategy', 'replication_factor': 1 } """)
}
// Wipe content from DSEFS too
val dseFsClient = new DseFsClient(new DseFsClientConf(Seq(HostAndPort.defaultPublicDseFsEndpoint("localhost"))))
val parquetTableName = "parquet_test"
val textTableName = "text_test"
val jsonTableName = "json_test"
val csvTableName = "csv_test"
val datasetTestKS = "test2"
try {
Await.result(Future {dseFsClient.deleteRecursive(FilePath(s"/$datasetTestKS.$parquetTableName"))}, Duration(60, SECONDS))
Await.result(Future {dseFsClient.deleteRecursive(FilePath(s"/$datasetTestKS.$textTableName"))}, Duration(60, SECONDS))
Await.result(Future {dseFsClient.deleteRecursive(FilePath(s"/$datasetTestKS.$jsonTableName"))}, Duration(60, SECONDS))
Await.result(Future {dseFsClient.deleteRecursive(FilePath(s"/$datasetTestKS.$csvTableName"))}, Duration(60, SECONDS))
} catch {
case ex: TimeoutException => {
println(s"We timed out waiting to clear DSEFS before testing.")
}
}
"The RDD" should "have the correct configurations" in {
val config = new Config(keyspace = "test1", numPartitions = 1, totalOps = 20, numTotalKeys = 1)
val writer = new WriteShortRow(config, ss)
val rdd = writer.getRDD
rdd.partitions.length should be (config.numPartitions)
rdd.count should be (config.totalOps)
}
"WriteShortRow" should "save correctly" in {
val config = new Config(keyspace = "test3", numPartitions = 1, totalOps = 6, numTotalKeys = 1)
val writer = new WriteShortRow(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace,config.table).count should be (6)
}
"WriteWideRow" should "save correctly" in {
val config = new Config(
testName = "WriteWideRow",
keyspace = "test4",
numPartitions = 1,
totalOps = 8,
numTotalKeys = 1)
val writer = new WriteWideRow(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace,config.table).count should be (8)
}
"WriteRandomWideRow" should "save correctly" in {
val config = new Config(
testName = "WriteRandomWideRow",
keyspace = "test5",
numPartitions = 10,
totalOps = 20,
numTotalKeys = 1)
val writer = new WriteRandomWideRow(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace,config.table).count should be (20)
}
"WriteWideRowByPartition" should "save correctly" in {
val config = new Config(
testName = "WriteWideRowByPartition",
keyspace = "test6",
numPartitions = 1,
totalOps = 40,
numTotalKeys = 5)
val writer = new WriteWideRowByPartition(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace,config.table).count should be (40)
}
"WritePerfRow" should " generate the correct number of pks" in {
val config = new Config(
testName = "WritePerfRow",
keyspace = "test7",
numPartitions = 5,
totalOps = 1000,
numTotalKeys = 200)
val writer = new WritePerfRow(config, ss)
val results = writer.getRDD.map(_.store).countByValue()
results should have size (200)
}
it should "generate the correct number of cks per pk" in {
val config = new Config(
testName = "WritePerfRow",
keyspace = "test7",
numPartitions = 2,
totalOps = 40,
numTotalKeys = 4)
val writer = new WritePerfRow(config, ss)
val rowLengths = writer.getRDD.groupBy( u=> u.store).map(row => row._2).collect
for (row <- rowLengths)
row should have size 10
}
it should " write to C*" in {
val config = new Config(
testName = "WritePerfRow",
keyspace = "test7",
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200)
val writer = new WritePerfRow(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace, config.table).count should be (1000)
}
it should " save to C* using Dataset API" in {
val config = new Config(
testName = "WritePerfRow_DS_Cass",
keyspace = datasetTestKS,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Driver)
val writer = new WritePerfRow(config, ss)
writer.setupCQL
writer.run
ss.sparkContext.cassandraTable(config.keyspace, config.table).count should be (1000)
}
it should " copy a table using Dataset API" in {
val config = new Config(
testName = "CopyTable",
table = "copyds",
keyspace = datasetTestKS,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Driver)
val writer = new WritePerfRow(config, ss)
writer.setupCQL
writer.run
val copier = new CopyTable(config, ss)
copier.setupCQL
copier.run
ss.sparkContext.cassandraTable(config.keyspace, s"${config.table}_copy").count should be (1000)
}
it should " copy a table using RDD API" in {
val config = new Config(
testName = "CopyTableRDD",
table = "copyrdd",
keyspace = datasetTestKS,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.RDD,
saveMethod = SaveMethod.Driver)
val writer = new WritePerfRow(config, ss)
writer.setupCQL
writer.run
val copier = new CopyTable(config, ss)
copier.setupCQL
copier.run
ss.sparkContext.cassandraTable(config.keyspace, s"${config.table}_copy").count should be (1000)
}
it should " save to DSEFS using parquet format" in {
val config = new Config(
testName = "WritePerfRow_Parquet",
keyspace = datasetTestKS,
table = parquetTableName,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Parquet)
val writer = new WritePerfRow(config, ss)
writer.run
ss.read.parquet(s"dsefs:///${config.keyspace}.${config.table}").count should be (1000)
}
it should " save to DSEFS using text format" in {
val config = new Config(
testName = "WritePerfRow_Text",
keyspace = datasetTestKS,
table = textTableName,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Text)
val writer = new WritePerfRow(config, ss)
writer.run
ss.read.text(s"dsefs:///${config.keyspace}.${config.table}").count should be (1000)
}
it should " save to DSEFS using json format" in {
val config = new Config(
testName = "WritePerfRow_JSON",
keyspace = datasetTestKS,
table = jsonTableName,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Json)
val writer = new WritePerfRow(config, ss)
writer.run
ss.read.json(s"dsefs:///${config.keyspace}.${config.table}").count should be (1000)
}
it should " save to DSEFS using csv format" in {
val config = new Config(
testName = "WritePerfRow_CSV",
keyspace = datasetTestKS,
table = csvTableName,
numPartitions = 10,
totalOps = 1000,
numTotalKeys = 200,
distributedDataType = DistributedDataType.DataFrame,
saveMethod = SaveMethod.Csv)
val writer = new WritePerfRow(config, ss)
writer.run
ss.read.csv(s"dsefs:///${config.keyspace}.${config.table}").count should be (1000)
}
} | datastax/spark-cassandra-stress | src/test/scala/com/datastax/sparkstress/WriteTaskTests/WriteTaskTests.scala | Scala | apache-2.0 | 10,065 |
package com.bradbrok.filmomatic
import akka.actor.Actor
object JobIdActor {
import com.bradbrok.filmomatic.Message._
type JobId = Long
trait IncrementAndGet extends Request
case class UniqueJobId(id: JobId, request: IncrementAndGet) extends Response
}
class JobIdActor extends Actor {
import JobIdActor._
var id: JobId = 0
override def receive: Receive = {
case request: IncrementAndGet =>
id = id + 1
sender ! UniqueJobId(id, request)
}
}
| bradbrok/Film-O-Matic | core/src/main/scala/com/bradbrok/filmomatic/JobIdActor.scala | Scala | mit | 479 |
/*
* Copyright (C) 2017 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.tikitakka.core
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import com.stratio.tikitakka.common.message._
import com.stratio.tikitakka.common.model.discovery.DiscoveryAppInfo
import com.stratio.tikitakka.common.state.ApplicationState
import scalaz.Reader
class OrchestratorActor(upAndDownActorRef: ActorRef) extends Actor with ActorLogging {
var applications: Map[String, ApplicationState] = Map.empty[String, ApplicationState]
def addApplicationState(applicationState: DiscoveryAppInfo): Unit =
applications = applications + (applicationState.id -> ApplicationState(applicationState))
def removeApplicationState(applicationId: String): Option[ApplicationState] = {
val applicationState = applications.get(applicationId)
applications = applications - applicationId
applicationState
}
// TODO manage the errors!!
def receive: Receive = {
case GetApplicationInfo(appId) =>
val app = applications.get(appId)
app.fold(
log.info(s"Get application info: $appId, Failure"))(_ =>
log.info(s"Get application info: $appId, Success"))
sender ! ResponseApplicationState(app)
case UnregisterApplication(appId) =>
val app = removeApplicationState(appId)
app.fold(
log.info(s"Unregister Application: $appId, Failure"))(_ =>
log.info(s"Unregister Application: $appId, Success"))
case RegisterApplication(app) =>
applications.get(app.id).fold(
log.info(s"Register new Application: ${app.id}"))(_ =>
log.info(s"Update an Application: ${app.id}"))
addApplicationState(app)
}
}
object OrchestratorActor {
def props = Reader {
(dependencies: Dependencies) =>
Props(classOf[OrchestratorActor], dependencies.upAndDownActorRef)
}
}
| compae/tiki-takka | core/src/main/scala/com/stratio/tikitakka/core/OrchestratorActor.scala | Scala | apache-2.0 | 2,415 |
package com.esri
trait Snap extends Serializable {
def x: Double
def y: Double
def distanceOnLine: Double
def distanceToLine: Double
def rho: Double // 1-d/D
def side: Byte // TODO - use const 99:NoSnap, 0:OnLine, 1:Right, -1:Left
def >(that: Snap) = this.rho > that.rho
def snapped() = 99 != side
}
| mraad/spark-snap-points | src/main/scala/com/esri/Snap.scala | Scala | apache-2.0 | 324 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.dummysvcactor
import akka.actor.{ActorRef, ActorLogging, Actor}
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import org.squbs.unicomplex.WebContext
case object RegisterTimeoutHandler
case object GetWebContext
class DummySvcActor extends Actor with WebContext with ActorLogging {
var timeoutListeners = Seq.empty[ActorRef]
implicit val am = ActorMaterializer()
import context.dispatcher
def receive: Receive = {
case req@HttpRequest(_, Uri(_, _, Path("/dummysvcactor/ping"), _, _), _, _, _) =>
log.debug("Received request " + req.uri)
sender() ! HttpResponse(StatusCodes.OK, entity = "pong")
case req @ HttpRequest(_, Uri(_, _, Path("/dummysvcactor/chunks"), _, _), _, _, _) =>
var chunkCount = 0L
var byteCount = 0L
val future = req.entity.dataBytes.filter(_.length > 0).runForeach{ b =>
chunkCount += 1
byteCount += b.length
}
val origSender = sender()
future onSuccess {
case byteCount => origSender ! HttpResponse(StatusCodes.OK, entity = s"Received $chunkCount chunks and $byteCount bytes.")
}
case req @ HttpRequest(_, Uri(_, _, Path("/dummysvcactor/timeout"), _, _), _, _, _) =>
case req @ HttpRequest(_, Uri(_, _, Path("/dummysvcactor/chunktimeout"), _, _), _, _, _) =>
req.entity.dataBytes.runWith(Sink.ignore)
// TODO Missing feature? How does this actor get notified if an akka-http request-timeout happens?
// case t: Timedout =>
// timeoutListeners foreach { _ ! t }
//
// case RegisterTimeoutHandler =>
// timeoutListeners = timeoutListeners :+ sender()
case GetWebContext => sender() ! webContext
}
}
| SarathChandran/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/dummysvcactor/DummySvcActor.scala | Scala | apache-2.0 | 2,387 |
package fpscala.chapter3
/**
* Created by sajit on 5/14/15.
*/
sealed trait BList[+A]
case object BNil extends BList[Nothing]
case class BCons[+A](head:A,tail:BList[A]) extends BList[A]
object BList{
def sum(ints:BList[Int]):Int = ints match {
case BNil => 0
case BCons(x,xs) => x + sum(xs)
}
def product(ints:BList[Int]):Int = ints match {
case BNil => 1
case BCons(x,xs) => x * product(xs)
}
def tail[A](l: BList[A]): BList[A] =
l match {
case BNil => sys.error("tail of empty list")
case BCons(_,t) => t
}
def setHead[A](l: BList[A],newHead:A): BList[A] =
l match {
case BNil => BCons(newHead,BNil)
case BCons(_,t) => BCons(newHead,t)
}
def drop[A](l: BList[A],n:Int): BList[A] =
l match {
case BNil => sys.error("drop on empty list")
case BCons(h,t) => n match {
case x if x<0 => sys.error("bad argument")
case 0 => BCons(h,t)
case x if x>0 => drop(t,n-1)
}
}
def dropWhile[A](l: BList[A],predicate: A => Boolean): BList[A] =
l match {
case BNil => BNil
case BCons(h,t) => {
if(predicate(h)){
BCons(h,dropWhile(t,predicate))
}
else{
dropWhile(t,predicate)
}
}
}
def dropWhile2[A](l: BList[A],predicate: A => Boolean): BList[A] =
l match {
case BNil => BNil
case BCons(h,t) => predicate(h) match {
case true => BCons(h,dropWhile2(t,predicate))
case false => dropWhile2(t,predicate)
}
}
def append[A](a1:BList[A],a2:BList[A]):BList[A] = (a1,a2) match {
case (BNil,a2) => a2
case (BCons(h,t),a2) => BCons(h,append(t,a2))
}
def init[A](al:BList[A]):BList[A] = al match {
case BNil => sys.error("impossible")
case BCons(h,BNil) => BNil
case BCons(h,t) => BCons(h,init(t))
}
def last[A](al:BList[A]):A = al match {
case BNil => sys.error("nooo")
case BCons(x,BNil) => x
case BCons(h,t) => last(t)
}
def foldRight[A,B] (as:BList[A],z:B)(f:(A,B) => B):B = as match {
case BNil => z
case BCons(x,xs) => f(x,foldRight(xs,z)(f))
}
def length[A](as:BList[A]):Int = foldRight(as,0)((_,acc) => acc + 1)
def foldLeft[A,B](as:BList[A],z:B)(f:(B,A) => B):B = as match {
case BNil => z
case BCons(x,xs) => foldLeft(xs,f(z,x))(f)
}
def reverse[A](aList:BList[A]):BList[A] = aList match {
case BNil => BNil
case BCons(x,xs) => append(reverse(xs),BCons(x,BNil))
}
//from solutions
def revV2[A](a:BList[A]):BList[A] = foldLeft(a, BList[A]())((acc,h) => BCons(h,acc))
def appendv2[A](a1:BList[A],a2:BList[A]):BList[A] = (a1,a2) match {
case (BNil,a2) => a2
case (BCons(h,t),a2) => BCons(h,foldRight(t,a2)((curr,acc) => appendv2(BCons(curr,BNil),acc)))
}
@annotation.tailrec
def mfl[A,B](as:BList[A],z:B)(f:(B,A) => B): B = as match{
case BNil => z
case _ => {
mfl(init(as),f(z,last(as)))(f)
}
}
def sumIsh(as:BList[Int]):Int = mfl(as,0)((a,b) => a + b)
def productIs(as:BList[Int]) = mfl(as,1)((a,b) => a * b)
def map[A,B] (as:BList[A])(f: A => B):BList[B] = as match {
case BNil => BNil
case BCons(h,t) => BCons(f(h),map(t)(f))
}
def filter[A](as:BList[A],f:A => Boolean):BList[A] = as match {
case BNil => BNil
case BCons(h,t) => f(h) match {
case true => BCons(h,filter(t,f))
case false => filter(t,f)
}
}
def myFlatMap[A](ll:BList[BList[A]]):BList[A] = ll match {
case BNil => BNil
case BCons(h,t) => append(h,myFlatMap(t))
}
def transform(a:BList[Int]):BList[Int] = a match {
case BNil => BNil
case BCons(h,t) => BCons((h+1),transform(t))
}
def d2s(d:BList[Double]):BList[String] = d match {
case BNil => BNil
case BCons(h,t) => BCons(d.toString, d2s(t))
}
def concat[A](l: BList[BList[A]]): BList[A] =
foldRight(l, BNil:BList[A])(append)
def flatMap[A,B](as:BList[A])(f:A => BList[B]):BList[B] = as match {
case BNil => BNil
case BCons(h,t) => append(f(h),flatMap(t)(f))
}
def addup(a1:BList[Int],a2:BList[Int]):BList[Int] = (a1,a2) match {
case (BNil,BNil) => BNil
case (BNil,_) => sys.error("wont fly")
case (_,BNil) => sys.error("wont walk")
case (BCons(h1,t1),BCons(h2,t2)) => BCons(h1+h2,addup(t1,t2))
}
def zipWith[A](a1:BList[A],a2:BList[A],f:(A,A) => A):BList[A] = (a1,a2) match {
case (BNil,BNil) => BNil
case (_,BNil) => sys.error("wont talk")
case (BNil,_) => sys.error("dont bull")
case (BCons(h1,t1),BCons(h2,t2)) => BCons(f(h1,h2),zipWith(t1,t2,f))
}
def hasSubsequence[A](sup:BList[A],sub:BList[A]):Boolean = (sup,sub) match {
case (_,BNil) => true
case (BNil,_) => false
case (BCons(h1,t1),BCons(h2,t2)) => (h1 == h2) match {
case true => hasSubsequence(t1,t2)
case false => hasSubsequence(t1,sub)
}
}
/**
* Apply is a variadic function. A variadic function accepts zero or more arguments of that type.
* In this example the type of argument A.
* @param as
* @tparam A
* @return
*/
def apply[A](as:A*):BList[A]= if(as.isEmpty) BNil else BCons(as.head,apply(as.tail: _*))
}
| sajit/learnyou | scala/minimal-scala/src/main/scala/fpscala/chapter3/Samples.scala | Scala | mit | 5,176 |
package pep_079
object Wip {
object Attempt1 {
def printStats(l: List[List[Char]]): Unit = {
println(s"----- total number of lines: ${l.size} ------------------------------")
l.flatten.groupBy(identity).map { case (a, b) => a -> b.size }.toList.sorted foreach println
println("--------------------------------------------------------------")
println("First digit – 7, 3, 6, 1, 2, 8 – (not 0,4,5,9)")
l.map(_.head).groupBy(identity).map { case (a, b) => a -> b.size }.toList.sorted foreach println
println("--------------------------------------------------------------")
println("Second digit – 1, 2, 6, 8, 9, 3 – (leftmost should be centermost)")
l.map(_(1)).groupBy(identity).map { case (a, b) => a -> b.size }.toList.sorted foreach println
println("--------------------------------------------------------------")
println("Third digit – 1, 6, 2, 8, 0, 9 – (not 3,4,5,7)")
l.map(_.last).groupBy(identity).map { case (a, b) => a -> b.size }.toList.sorted foreach println
println("--------------------------------------------------------------")
}
def verifySolution(a: Char, b: Char, c: Char, l: List[Char]): Int =
if (l.dropWhile(_ != a).drop(1).dropWhile(_ != b).drop(1).dropWhile(_ != c).nonEmpty)
0
else
1
def verifySolutions(llc: List[List[Char]], l: List[Char]): Int =
llc.foldLeft(0) { case (err, List(a, b, c)) => err + verifySolution(a, b, c, l) }
def solveS4(): Unit = {
val s = "1234".toList
def r(cs: List[Char]): List[Char] = {
val r1 = 1 + scala.util.Random.nextInt(cs.size - 1)
val (a, b) = cs.splitAt(r1)
val r2 = scala.util.Random.nextInt(b.size)
List(a.last, b(r2))
}
val llc = List.fill(100)(r(s))
printStats(llc)
}
// 7, 3, 6, 1, 2, 8
// 3, 9, 8, 6, 2, 1
// 1, 2, 6, 8, 9, 3
// 1, 6, 2, 8, 0, 9
// 7, 3, 1, 6, 2, 8, 9, 0
}
}
| filippovitale/pe | pe-solution/src/main/scala/pep_079/Wip.scala | Scala | mit | 1,998 |
// Copyright (c) 2011-2017 ScalaMock Contributors (https://github.com/paulbutcher/ScalaMock/graphs/contributors)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package org.scalamock.util
trait MacroAdapter {
protected val ctx: MacroAdapter.Context
import ctx.universe._
def freshTerm(prefix: String): TermName = ctx.freshName(TermName(prefix))
def internalTypeRef(pre: Type, sym: Symbol, args: List[Type]) = internal.typeRef(pre, sym, args)
def internalSuperType(thistpe: Type, supertpe: Type): Type = internal.superType(thistpe, supertpe)
def internalThisType(thistpe: Symbol) = internal.thisType(thistpe)
def internalTypeDef(p: Symbol): TypeDef = internal.typeDef(p)
}
object MacroAdapter {
type Context = scala.reflect.macros.blackbox.Context
}
| paulbutcher/ScalaMock | shared/src/main/scala/org/scalamock/util/MacroAdapter.scala | Scala | mit | 1,797 |
//
// Scaled - a scalable editor extensible via JVM languages
// http://github.com/scaled/scaled/blob/master/LICENSE
package scaled.major
import reactual.Promise
import scaled._
@Major(name="mini-readopt", tags=Array("mini"), desc="""
A minibuffer mode that queries the user for a single key press from a large(ish) selection. In
addition to the supplied options, C-h will be bound to a fn that displays the help text in the
minibuffer completion area.
""")
class MiniReadOptMode (
env :Env,
miniui :MiniUI,
promise :Promise[String],
prompt :String,
/** A map from key trigger (e.g. `y`, `C-r`, `!`, etc.) to a help string for the option. */
opts :Seq[(String,String)]
) extends MinibufferMode(env, promise) {
val optMap = opts.toMap
def optprompt = prompt + opts.map(_._1).mkString(" (", ", ", ", C-h)")
miniui.setPrompt(optprompt)
override def keymap = Seq(
"C-g" -> "abort",
"C-h" -> "show-help"
)
// disable our default fn; route everything to unknownCommand (our missedFn)
override def defaultFn = None
override def unknownCommand (trigger :String) = {
if (optMap.contains(trigger)) promise.succeed(trigger)
else editor.popStatus("Type C-h for help.")
}
@Fn("Displays the option descriptions in the minibuffer completion area.")
def showHelp () {
val maxWidth = opts.map(_._1).map(_.length).max
miniui.showCompletions(opts map { case (k, v) =>
String.format(s"%-${maxWidth}s - %s", k, v)
})
}
}
| swhgoon/scaled | api/src/main/scala/scaled/major/MiniReadOptMode.scala | Scala | bsd-3-clause | 1,504 |
package uk.gov.gds.ier.transaction.crown.address
import uk.gov.gds.ier.test._
import uk.gov.gds.ier.model._
import uk.gov.gds.ier.validation.ErrorTransformForm
import uk.gov.gds.ier.transaction.crown.InprogressCrown
class AddressManualMustacheTests
extends MustacheTestSuite
with AddressForms
with WithMockCrownControllers
with AddressManualMustache {
when(mockAddressManualStep.routing).thenReturn(routes("/register-to-vote/crown/address/manual"))
when(mockAddressStep.routing).thenReturn(routes("/register-to-vote/crown/address"))
it should "empty progress form should produce empty Model (manualData)" in {
val emptyApplicationForm = addressForm
val addressModel = mustache.data(
emptyApplicationForm,
Call("POST", "/register-to-vote/crown/address/manual"),
InprogressCrown()
).asInstanceOf[ManualModel]
addressModel.question.title should be("What was your last UK address?")
addressModel.question.postUrl should be("/register-to-vote/crown/address/manual")
addressModel.lookupUrl should be ("/register-to-vote/crown/address")
addressModel.postcode.value should be ("")
addressModel.maLineOne.value should be ("")
addressModel.maLineTwo.value should be ("")
addressModel.maLineThree.value should be ("")
addressModel.maCity.value should be ("")
}
it should "progress form with valid values should produce Mustache Model with values present "+
"(manualData) - lastAddress = yes and living there" in {
val partiallyFilledApplicationForm = addressForm.fill(InprogressCrown(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.YesAndLivingThere),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "WR26NJ",
manualAddress = Some(PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")))
))
)),
possibleAddresses = None
))
val addressModel = mustache.data(
partiallyFilledApplicationForm,
Call("POST", "/register-to-vote/crown/address/manual"),
InprogressCrown()
).asInstanceOf[ManualModel]
addressModel.question.title should be("What is your UK address?")
addressModel.question.postUrl should be("/register-to-vote/crown/address/manual")
addressModel.lookupUrl should be ("/register-to-vote/crown/address")
addressModel.postcode.value should be ("WR26NJ")
addressModel.maLineOne.value should be ("Unit 4, Elgar Business Centre")
addressModel.maLineTwo.value should be ("Moseley Road")
addressModel.maLineThree.value should be ("Hallow")
addressModel.maCity.value should be ("Worcester")
}
it should "progress form with valid values should produce Mustache Model with values present "+
"(manualData) - lastAddress = yes and not living there" in {
val partiallyFilledApplicationForm = addressForm.fill(InprogressCrown(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.YesAndNotLivingThere),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "WR26NJ",
manualAddress = Some(PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")))
))
)),
possibleAddresses = None
))
val addressModel = mustache.data(
partiallyFilledApplicationForm,
Call("POST", "/register-to-vote/crown/address/manual"),
InprogressCrown()
).asInstanceOf[ManualModel]
addressModel.question.title should be("What is your UK address?")
addressModel.question.postUrl should be("/register-to-vote/crown/address/manual")
addressModel.lookupUrl should be ("/register-to-vote/crown/address")
addressModel.postcode.value should be ("WR26NJ")
addressModel.maLineOne.value should be ("Unit 4, Elgar Business Centre")
addressModel.maLineTwo.value should be ("Moseley Road")
addressModel.maLineThree.value should be ("Hallow")
addressModel.maCity.value should be ("Worcester")
}
it should "progress form with valid values should produce Mustache Model with values present "+
"(manualData) - lastAddress = false" in {
val partiallyFilledApplicationForm = addressForm.fill(InprogressCrown(
address = Some(LastAddress(
hasAddress = Some(HasAddressOption.No),
address = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = "WR26NJ",
manualAddress = Some(PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")))
))
)),
possibleAddresses = None
))
val addressModel = mustache.data(
partiallyFilledApplicationForm,
Call("POST", "/register-to-vote/crown/address/manual"),
InprogressCrown()
).asInstanceOf[ManualModel]
addressModel.question.title should be("What was your last UK address?")
addressModel.question.postUrl should be("/register-to-vote/crown/address/manual")
addressModel.lookupUrl should be ("/register-to-vote/crown/address")
addressModel.postcode.value should be ("WR26NJ")
addressModel.maLineOne.value should be ("Unit 4, Elgar Business Centre")
addressModel.maLineTwo.value should be ("Moseley Road")
addressModel.maLineThree.value should be ("Hallow")
addressModel.maCity.value should be ("Worcester")
}
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/transaction/crown/address/AddressManualMustacheTests.scala | Scala | mit | 5,769 |
package skinny.mailer.feature
import skinny.mailer.SkinnyMailerConfigApi
/**
* Provides SkinnyMailerConfig
*/
trait ConfigFeature {
/**
* Returns all loaded config.
*
* @return config
*/
def config: SkinnyMailerConfigApi = new SkinnyMailerConfigApi {}
}
| BlackPrincess/skinny-framework | mailer/src/main/scala/skinny/mailer/feature/ConfigFeature.scala | Scala | mit | 276 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.feature.{Word2VecModel => OldWord2VecModel}
import org.apache.spark.sql.Row
import org.apache.spark.util.Utils
class Word2VecSuite extends MLTest with DefaultReadWriteTest {
import testImplicits._
test("params") {
ParamsSuite.checkParams(new Word2Vec)
val model = new Word2VecModel("w2v", new OldWord2VecModel(Map("a" -> Array(0.0f))))
ParamsSuite.checkParams(model)
}
test("Word2Vec") {
val sentence = "a b " * 100 + "a c " * 10
val numOfWords = sentence.split(" ").size
val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" "))
val codes = Map(
"a" -> Array(-0.2811822295188904, -0.6356269121170044, -0.3020961284637451),
"b" -> Array(1.0309048891067505, -1.29472815990448, 0.22276712954044342),
"c" -> Array(-0.08456747233867645, 0.5137411952018738, 0.11731560528278351)
)
val expected = doc.map { sentence =>
Vectors.dense(sentence.map(codes.apply).reduce((word1, word2) =>
word1.zip(word2).map { case (v1, v2) => v1 + v2 }
).map(_ / numOfWords))
}
val docDF = doc.zip(expected).toDF("text", "expected")
val w2v = new Word2Vec()
.setVectorSize(3)
.setInputCol("text")
.setOutputCol("result")
.setSeed(42L)
val model = w2v.fit(docDF)
MLTestingUtils.checkCopyAndUids(w2v, model)
// These expectations are just magic values, characterizing the current
// behavior. The test needs to be updated to be more general, see SPARK-11502
val magicExp = Vectors.dense(0.30153007534417237, -0.6833061711354689, 0.5116530778733167)
testTransformer[(Seq[String], Vector)](docDF, model, "result", "expected") {
case Row(vector1: Vector, vector2: Vector) =>
assert(vector1 ~== magicExp absTol 1E-5, "Transformed vector is different with expected.")
}
}
test("getVectors") {
val sentence = "a b " * 100 + "a c " * 10
val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" "))
val codes = Map(
"a" -> Array(-0.2811822295188904, -0.6356269121170044, -0.3020961284637451),
"b" -> Array(1.0309048891067505, -1.29472815990448, 0.22276712954044342),
"c" -> Array(-0.08456747233867645, 0.5137411952018738, 0.11731560528278351)
)
val expectedVectors = codes.toSeq.sortBy(_._1).map { case (w, v) => Vectors.dense(v) }
val docDF = doc.zip(doc).toDF("text", "alsotext")
val model = new Word2Vec()
.setVectorSize(3)
.setInputCol("text")
.setOutputCol("result")
.setSeed(42L)
.fit(docDF)
val realVectors = model.getVectors.sort("word").select("vector").rdd.map {
case Row(v: Vector) => v
}.collect()
// These expectations are just magic values, characterizing the current
// behavior. The test needs to be updated to be more general, see SPARK-11502
val magicExpected = Seq(
Vectors.dense(0.3326166272163391, -0.5603077411651611, -0.2309209555387497),
Vectors.dense(0.32463887333869934, -0.9306551218032837, 1.393115520477295),
Vectors.dense(-0.27150997519493103, 0.4372006058692932, -0.13465698063373566)
)
realVectors.zip(magicExpected).foreach {
case (real, expected) =>
assert(real ~== expected absTol 1E-5, "Actual vector is different from expected.")
}
}
test("findSynonyms") {
val sentence = "a b " * 100 + "a c " * 10
val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" "))
val docDF = doc.zip(doc).toDF("text", "alsotext")
val model = new Word2Vec()
.setVectorSize(3)
.setInputCol("text")
.setOutputCol("result")
.setSeed(42L)
.fit(docDF)
val expected = Map(("b", 0.2608488929093532), ("c", -0.8271274846926078))
val findSynonymsResult = model.findSynonyms("a", 2).rdd.map {
case Row(w: String, sim: Double) => (w, sim)
}.collectAsMap()
expected.foreach {
case (expectedSynonym, expectedSimilarity) =>
assert(findSynonymsResult.contains(expectedSynonym))
assert(expectedSimilarity ~== findSynonymsResult(expectedSynonym) absTol 1E-5)
}
val findSynonymsArrayResult = model.findSynonymsArray("a", 2).toMap
findSynonymsResult.foreach {
case (expectedSynonym, expectedSimilarity) =>
assert(findSynonymsArrayResult.contains(expectedSynonym))
assert(expectedSimilarity ~== findSynonymsArrayResult(expectedSynonym) absTol 1E-5)
}
}
test("window size") {
val sentence = "a q s t q s t b b b s t m s t m q " * 100 + "a c " * 10
val doc = sc.parallelize(Seq(sentence, sentence)).map(line => line.split(" "))
val docDF = doc.zip(doc).toDF("text", "alsotext")
val model = new Word2Vec()
.setVectorSize(3)
.setWindowSize(2)
.setInputCol("text")
.setOutputCol("result")
.setSeed(42L)
.fit(docDF)
val (synonyms, similarity) = model.findSynonyms("a", 6).rdd.map {
case Row(w: String, sim: Double) => (w, sim)
}.collect().unzip
// Increase the window size
val biggerModel = new Word2Vec()
.setVectorSize(3)
.setInputCol("text")
.setOutputCol("result")
.setSeed(42L)
.setWindowSize(10)
.fit(docDF)
val (synonymsLarger, similarityLarger) = model.findSynonyms("a", 6).rdd.map {
case Row(w: String, sim: Double) => (w, sim)
}.collect().unzip
// The similarity score should be very different with the larger window
assert(math.abs(similarity(5) - similarityLarger(5) / similarity(5)) > 1E-5)
}
test("Word2Vec read/write numPartitions calculation") {
val smallModelNumPartitions = Word2VecModel.Word2VecModelWriter.calculateNumberOfPartitions(
Utils.byteStringAsBytes("64m"), numWords = 10, vectorSize = 5)
assert(smallModelNumPartitions === 1)
val largeModelNumPartitions = Word2VecModel.Word2VecModelWriter.calculateNumberOfPartitions(
Utils.byteStringAsBytes("64m"), numWords = 1000000, vectorSize = 5000)
assert(largeModelNumPartitions > 1)
}
test("Word2Vec read/write") {
val t = new Word2Vec()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setMaxIter(2)
.setMinCount(8)
.setNumPartitions(1)
.setSeed(42L)
.setStepSize(0.01)
.setVectorSize(100)
.setMaxSentenceLength(500)
testDefaultReadWrite(t)
}
test("Word2VecModel read/write") {
val word2VecMap = Map(
("china", Array(0.50f, 0.50f, 0.50f, 0.50f)),
("japan", Array(0.40f, 0.50f, 0.50f, 0.50f)),
("taiwan", Array(0.60f, 0.50f, 0.50f, 0.50f)),
("korea", Array(0.45f, 0.60f, 0.60f, 0.60f))
)
val oldModel = new OldWord2VecModel(word2VecMap)
val instance = new Word2VecModel("myWord2VecModel", oldModel)
val newInstance = testDefaultReadWrite(instance)
assert(newInstance.getVectors.collect().sortBy(_.getString(0)) ===
instance.getVectors.collect().sortBy(_.getString(0)))
}
test("Word2Vec works with input that is non-nullable (NGram)") {
val sentence = "a q s t q s t b b b s t m s t m q "
val docDF = sc.parallelize(Seq(sentence, sentence)).map(_.split(" ")).toDF("text")
val ngram = new NGram().setN(2).setInputCol("text").setOutputCol("ngrams")
val ngramDF = ngram.transform(docDF)
val model = new Word2Vec()
.setVectorSize(2)
.setInputCol("ngrams")
.setOutputCol("result")
.fit(ngramDF)
// Just test that this transformation succeeds
testTransformerByGlobalCheckFunc[(Seq[String], Seq[String])](ngramDF, model, "result") { _ => }
}
}
| yanboliang/spark | mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala | Scala | apache-2.0 | 8,653 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io._
import java.net.URI
import java.nio.charset.StandardCharsets
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataOutputStream, Path}
import org.apache.hadoop.fs.permission.FsPermission
import org.json4s.JsonAST.JValue
import org.json4s.jackson.JsonMethods._
import org.apache.spark.{SPARK_VERSION, SparkConf}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.io.CompressionCodec
import org.apache.spark.util.{JsonProtocol, Utils}
/**
* A SparkListener that logs events to persistent storage.
*
* Event logging is specified by the following configurable parameters:
* spark.eventLog.enabled - Whether event logging is enabled.
* spark.eventLog.compress - Whether to compress logged events
* spark.eventLog.overwrite - Whether to overwrite any existing files.
* spark.eventLog.dir - Path to the directory in which events are logged.
* spark.eventLog.buffer.kb - Buffer size to use when writing to output streams
*/
private[spark] class EventLoggingListener(
appId: String,
appAttemptId : Option[String],
logBaseDir: URI,
sparkConf: SparkConf,
hadoopConf: Configuration)
extends SparkListener with Logging {
import EventLoggingListener._
def this(appId: String, appAttemptId : Option[String], logBaseDir: URI, sparkConf: SparkConf) =
this(appId, appAttemptId, logBaseDir, sparkConf,
SparkHadoopUtil.get.newConfiguration(sparkConf))
private val shouldCompress = sparkConf.getBoolean("spark.eventLog.compress", false)
private val shouldOverwrite = sparkConf.getBoolean("spark.eventLog.overwrite", false)
private val testing = sparkConf.getBoolean("spark.eventLog.testing", false)
private val outputBufferSize = sparkConf.getInt("spark.eventLog.buffer.kb", 100) * 1024
private val fileSystem = Utils.getHadoopFileSystem(logBaseDir, hadoopConf)
private val compressionCodec =
if (shouldCompress) {
Some(CompressionCodec.createCodec(sparkConf))
} else {
None
}
private val compressionCodecName = compressionCodec.map { c =>
CompressionCodec.getShortName(c.getClass.getName)
}
// Only defined if the file system scheme is not local
private var hadoopDataStream: Option[FSDataOutputStream] = None
private var writer: Option[PrintWriter] = None
// For testing. Keep track of all JSON serialized events that have been logged.
private[scheduler] val loggedEvents = new ArrayBuffer[JValue]
// Visible for tests only.
private[scheduler] val logPath = getLogPath(logBaseDir, appId, appAttemptId, compressionCodecName)
/**
* Creates the log file in the configured log directory.
*/
def start() {
if (!fileSystem.getFileStatus(new Path(logBaseDir)).isDirectory) {
throw new IllegalArgumentException(s"Log directory $logBaseDir is not a directory.")
}
val workingPath = logPath + IN_PROGRESS
val uri = new URI(workingPath)
val path = new Path(workingPath)
val defaultFs = FileSystem.getDefaultUri(hadoopConf).getScheme
val isDefaultLocal = defaultFs == null || defaultFs == "file"
if (shouldOverwrite && fileSystem.delete(path, true)) {
logWarning(s"Event log $path already exists. Overwriting...")
}
/* The Hadoop LocalFileSystem (r1.0.4) has known issues with syncing (HADOOP-7844).
* Therefore, for local files, use FileOutputStream instead. */
val dstream =
if ((isDefaultLocal && uri.getScheme == null) || uri.getScheme == "file") {
new FileOutputStream(uri.getPath)
} else {
hadoopDataStream = Some(fileSystem.create(path))
hadoopDataStream.get
}
try {
val cstream = compressionCodec.map(_.compressedOutputStream(dstream)).getOrElse(dstream)
val bstream = new BufferedOutputStream(cstream, outputBufferSize)
EventLoggingListener.initEventLog(bstream)
fileSystem.setPermission(path, LOG_FILE_PERMISSIONS)
writer = Some(new PrintWriter(bstream))
logInfo("Logging events to %s".format(logPath))
} catch {
case e: Exception =>
dstream.close()
throw e
}
}
/** Log the event as JSON. */
private def logEvent(event: SparkListenerEvent, flushLogger: Boolean = false) {
val eventJson = JsonProtocol.sparkEventToJson(event)
// scalastyle:off println
writer.foreach(_.println(compact(render(eventJson))))
// scalastyle:on println
if (flushLogger) {
writer.foreach(_.flush())
hadoopDataStream.foreach(_.hflush())
}
if (testing) {
loggedEvents += eventJson
}
}
// Events that do not trigger a flush
override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = logEvent(event)
override def onTaskStart(event: SparkListenerTaskStart): Unit = logEvent(event)
override def onTaskGettingResult(event: SparkListenerTaskGettingResult): Unit = logEvent(event)
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = logEvent(event)
override def onEnvironmentUpdate(event: SparkListenerEnvironmentUpdate): Unit = logEvent(event)
// Events that trigger a flush
override def onStageCompleted(event: SparkListenerStageCompleted): Unit = {
logEvent(event, flushLogger = true)
}
override def onJobStart(event: SparkListenerJobStart): Unit = logEvent(event, flushLogger = true)
override def onJobEnd(event: SparkListenerJobEnd): Unit = logEvent(event, flushLogger = true)
override def onBlockManagerAdded(event: SparkListenerBlockManagerAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onBlockManagerRemoved(event: SparkListenerBlockManagerRemoved): Unit = {
logEvent(event, flushLogger = true)
}
override def onUnpersistRDD(event: SparkListenerUnpersistRDD): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationStart(event: SparkListenerApplicationStart): Unit = {
logEvent(event, flushLogger = true)
}
override def onApplicationEnd(event: SparkListenerApplicationEnd): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorAdded(event: SparkListenerExecutorAdded): Unit = {
logEvent(event, flushLogger = true)
}
override def onExecutorRemoved(event: SparkListenerExecutorRemoved): Unit = {
logEvent(event, flushLogger = true)
}
// No-op because logging every update would be overkill
override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = {}
// No-op because logging every update would be overkill
override def onExecutorMetricsUpdate(event: SparkListenerExecutorMetricsUpdate): Unit = { }
override def onOtherEvent(event: SparkListenerEvent): Unit = {
if (event.logEvent) {
logEvent(event, flushLogger = true)
}
}
/**
* Stop logging events. The event log file will be renamed so that it loses the
* ".inprogress" suffix.
*/
def stop(): Unit = {
writer.foreach(_.close())
val target = new Path(logPath)
if (fileSystem.exists(target)) {
if (shouldOverwrite) {
logWarning(s"Event log $target already exists. Overwriting...")
if (!fileSystem.delete(target, true)) {
logWarning(s"Error deleting $target")
}
} else {
throw new IOException("Target log file already exists (%s)".format(logPath))
}
}
fileSystem.rename(new Path(logPath + IN_PROGRESS), target)
// touch file to ensure modtime is current across those filesystems where rename()
// does not set it, -and which support setTimes(); it's a no-op on most object stores
try {
fileSystem.setTimes(target, System.currentTimeMillis(), -1)
} catch {
case e: Exception => logDebug(s"failed to set time of $target", e)
}
}
}
private[spark] object EventLoggingListener extends Logging {
// Suffix applied to the names of files still being written by applications.
val IN_PROGRESS = ".inprogress"
val DEFAULT_LOG_DIR = "/tmp/spark-events"
private val LOG_FILE_PERMISSIONS = new FsPermission(Integer.parseInt("770", 8).toShort)
// A cache for compression codecs to avoid creating the same codec many times
private val codecMap = new mutable.HashMap[String, CompressionCodec]
/**
* Write metadata about an event log to the given stream.
* The metadata is encoded in the first line of the event log as JSON.
*
* @param logStream Raw output stream to the event log file.
*/
def initEventLog(logStream: OutputStream): Unit = {
val metadata = SparkListenerLogStart(SPARK_VERSION)
val metadataJson = compact(JsonProtocol.logStartToJson(metadata)) + "\\n"
logStream.write(metadataJson.getBytes(StandardCharsets.UTF_8))
}
/**
* Return a file-system-safe path to the log file for the given application.
*
* Note that because we currently only create a single log file for each application,
* we must encode all the information needed to parse this event log in the file name
* instead of within the file itself. Otherwise, if the file is compressed, for instance,
* we won't know which codec to use to decompress the metadata needed to open the file in
* the first place.
*
* The log file name will identify the compression codec used for the contents, if any.
* For example, app_123 for an uncompressed log, app_123.lzf for an LZF-compressed log.
*
* @param logBaseDir Directory where the log file will be written.
* @param appId A unique app ID.
* @param appAttemptId A unique attempt id of appId. May be the empty string.
* @param compressionCodecName Name to identify the codec used to compress the contents
* of the log, or None if compression is not enabled.
* @return A path which consists of file-system-safe characters.
*/
def getLogPath(
logBaseDir: URI,
appId: String,
appAttemptId: Option[String],
compressionCodecName: Option[String] = None): String = {
val base = logBaseDir.toString.stripSuffix("/") + "/" + sanitize(appId)
val codec = compressionCodecName.map("." + _).getOrElse("")
if (appAttemptId.isDefined) {
base + "_" + sanitize(appAttemptId.get) + codec
} else {
base + codec
}
}
private def sanitize(str: String): String = {
str.replaceAll("[ :/]", "-").replaceAll("[.${}'\\"]", "_").toLowerCase
}
/**
* Opens an event log file and returns an input stream that contains the event data.
*
* @return input stream that holds one JSON record per line.
*/
def openEventLog(log: Path, fs: FileSystem): InputStream = {
val in = new BufferedInputStream(fs.open(log))
// Compression codec is encoded as an extension, e.g. app_123.lzf
// Since we sanitize the app ID to not include periods, it is safe to split on it
val logName = log.getName.stripSuffix(IN_PROGRESS)
val codecName: Option[String] = logName.split("\\\\.").tail.lastOption
val codec = codecName.map { c =>
codecMap.getOrElseUpdate(c, CompressionCodec.createCodec(new SparkConf, c))
}
try {
codec.map(_.compressedInputStream(in)).getOrElse(in)
} catch {
case e: Exception =>
in.close()
throw e
}
}
}
| spark0001/spark2.1.1 | core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala | Scala | apache-2.0 | 12,115 |
// Copyright (c) 2013, Johns Hopkins University. All rights reserved.
// This software is released under the 2-clause BSD license.
// See /LICENSE.txt
// Travis Wolfe, twolfe18@gmail.com, 30 July 2013
package edu.jhu.hlt.parma.inference
import edu.jhu.hlt.parma.types._
/**
* see InferenceEngine
*
* this type is supposed to be the parent for any type
* that is used to cache feature computation
*
* an inference engine will produce a feature representation,
* the computation for which will be front-loaded and cached,
* and then asked to produce alignments from this representation
*/
trait FeatureRepresentation {
def report: Document
def passage: Document
def context = new Context(report, passage)
/**
* this is only used for debugging, not inference
* put in here a feature vector, indices should be
* look-up-able with controller.featureName(idx)
*/
def inspectFeatures: Option[scala.collection.Map[Alignment, SVec]]
/**
* this is only used for debugging, not inference
* put in a probability or dot product in here
*/
def inspectScores: Option[scala.collection.Map[Alignment, Double]]
/**
* give the InferenceEngine that made and uses this
* feature representation
*/
def controller: InferenceEngine[_]
}
| hltcoe/parma | src/main/scala/edu/jhu/hlt/parma/inference/FeatureRepresentation.scala | Scala | bsd-2-clause | 1,261 |
package org.jetbrains.plugins.scala
package editor.importOptimizer
import java.util
import java.util.concurrent.atomic.AtomicInteger
import com.intellij.concurrency.JobLauncher
import com.intellij.lang.{ImportOptimizer, LanguageImportStatements}
import com.intellij.openapi.editor.Document
import com.intellij.openapi.progress.{ProgressIndicator, ProgressManager}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.EmptyRunnable
import com.intellij.psi._
import com.intellij.psi.impl.source.tree.LeafPsiElement
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.Processor
import com.intellij.util.containers.ContainerUtil
import org.jetbrains.plugins.scala.editor.typedHandler.ScalaTypedHandler
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScReferenceElement, ScStableCodeReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScForStatement, ScMethodCall}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportStmt}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.{ScImportsHolder, ScalaPsiUtil}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.ScDocComment
import scala.annotation.tailrec
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.collection.{immutable, mutable}
/**
* User: Alexander Podkhalyuzin
* Date: 16.06.2009
*/
class ScalaImportOptimizer extends ImportOptimizer {
import org.jetbrains.plugins.scala.editor.importOptimizer.ScalaImportOptimizer._
def processFile(file: PsiFile): Runnable = processFile(file, null)
def processFile(file: PsiFile, progressIndicator: ProgressIndicator = null): Runnable = {
def collectImportHoldersAndUsers: (util.ArrayList[ScImportsHolder], util.ArrayList[PsiElement]) = {
val holders = new util.ArrayList[ScImportsHolder]()
val users = new util.ArrayList[PsiElement]()
file.depthFirst.foreach { elem =>
elem match {
case holder: ScImportsHolder => holders.add(holder)
case _ =>
}
elem match {
case ImportUser(e) => users.add(e)
case _ =>
}
}
(holders, users)
}
val scalaFile = file match {
case scFile: ScalaFile => scFile
case multiRootFile: PsiFile if multiRootFile.getViewProvider.getLanguages contains ScalaFileType.SCALA_LANGUAGE =>
multiRootFile.getViewProvider.getPsi(ScalaFileType.SCALA_LANGUAGE).asInstanceOf[ScalaFile]
case _ => return EmptyRunnable.getInstance()
}
val project: Project = scalaFile.getProject
val documentManager = PsiDocumentManager.getInstance(project)
val document: Document = documentManager.getDocument(scalaFile)
val analyzingDocumentText = document.getText
val usedImports = ContainerUtil.newConcurrentSet[ImportUsed]()
val usedImportedNames = ContainerUtil.newConcurrentSet[UsedName]()
val (importHolders, importUsers) = collectImportHoldersAndUsers
val progressManager: ProgressManager = ProgressManager.getInstance()
val indicator: ProgressIndicator =
if (progressIndicator != null) progressIndicator
else if (progressManager.hasProgressIndicator) progressManager.getProgressIndicator
else null
if (indicator != null) indicator.setText2(file.getName + ": analyzing imports usage")
val size = importHolders.size + importUsers.size //processAllElementsConcurrentlyUnderProgress will be called 2 times
val counter = new AtomicInteger(0)
def processAllElementsConcurrentlyUnderProgress[T <: PsiElement](elements: util.List[T])(action: T => Unit) = {
JobLauncher.getInstance().invokeConcurrentlyUnderProgress(elements, indicator, true, true, new Processor[T] {
override def process(element: T): Boolean = {
val count: Int = counter.getAndIncrement
if (count <= size && indicator != null) indicator.setFraction(count.toDouble / size)
action(element)
true
}
})
}
processAllElementsConcurrentlyUnderProgress(importUsers) { element =>
collectImportsUsed(element, usedImports, usedImportedNames)
}
if (indicator != null) indicator.setText2(file.getName + ": collecting additional info")
def collectRanges(createInfo: ScImportStmt => Seq[ImportInfo]): Seq[RangeInfo] = {
val importsInfo = ContainerUtil.newConcurrentSet[RangeInfo]()
processAllElementsConcurrentlyUnderProgress(importHolders) {
case holder: ScImportsHolder =>
importsInfo.addAll(collectImportRanges(holder, createInfo, usedImportedNames.toSet))
case _ =>
}
importsInfo.toSeq.sortBy(_.startOffset)
}
val settings = OptimizeImportSettings(project)
import settings._
def isImportUsed(importUsed: ImportUsed): Boolean = {
//todo: collect proper information about language features
importUsed match {
case ImportSelectorUsed(sel) if sel.isAliasedImport => true
case _ => usedImports.contains(importUsed) || isLanguageFeatureImport(importUsed) || importUsed.qualName.exists(isAlwaysUsedImport)
}
}
val rangeInfos = collectRanges(createInfo(_, isImportUsed))
val optimized = rangeInfos.map(range => (range, optimizedImportInfos(range, settings)))
new Runnable {
def run() {
val documentManager = PsiDocumentManager.getInstance(project)
val document: Document = documentManager.getDocument(scalaFile)
documentManager.commitDocument(document)
val ranges: Seq[(RangeInfo, Seq[ImportInfo])] =
if (document.getText != analyzingDocumentText) //something was changed...
sameInfosWithUpdatedRanges()
else optimized
for ((range, importInfos) <- ranges.reverseIterator) {
replaceWithNewImportInfos(range, importInfos, settings, scalaFile)
}
documentManager.commitDocument(document)
}
def sameInfosWithUpdatedRanges(): Seq[(RangeInfo, Seq[ImportInfo])] = {
optimized.zip {
collectRanges(_ => Seq.empty)
}.map {
case ((_, infos), range) => (range, infos)
}
}
}
}
protected def getImportTextCreator: ImportTextCreator = new ImportTextCreator
protected def isImportDelimiter(psi: PsiElement) = psi.isInstanceOf[PsiWhiteSpace]
def supports(file: PsiFile): Boolean = file.isInstanceOf[ScalaFile] && file.getViewProvider.getAllFiles.size() < 3
def replaceWithNewImportInfos(range: RangeInfo, importInfos: Seq[ImportInfo], settings: OptimizeImportSettings, file: PsiFile): Unit = {
val firstPsi = range.firstPsi.retrieve()
val lastPsi = range.lastPsi.retrieve()
if (Option(firstPsi).exists(!_.isValid) || Option(lastPsi).exists(!_.isValid)) {
throw new IllegalStateException("Couldn't update imports: import range was invalidated after initial analysis")
}
val textCreator = getImportTextCreator
val fileText = file.getText
import settings._
@tailrec
def indentForOffset(index: Int, res: String = ""): String = {
if (index <= 0) res
else {
val c = fileText.charAt(index - 1)
if (c == ' ' || c == '\\t') indentForOffset(index - 1, s"$c$res")
else res
}
}
val newLineWithIndent: String = "\\n" + indentForOffset(range.startOffset)
var prevGroupIndex = -1
def groupSeparatorsBefore(info: ImportInfo, currentGroupIndex: Int) = {
if (currentGroupIndex <= prevGroupIndex || prevGroupIndex == -1) ""
else {
def isBlankLine(i: Int) = importLayout(i) == ScalaCodeStyleSettings.BLANK_LINE
val blankLineNumber =
Range(currentGroupIndex - 1, prevGroupIndex, -1).dropWhile(!isBlankLine(_)).takeWhile(isBlankLine).size
newLineWithIndent * blankLineNumber
}
}
val text = importInfos.map { info =>
val index: Int = findGroupIndex(info.prefixQualifier, settings)
val blankLines = groupSeparatorsBefore(info, index)
prevGroupIndex = index
blankLines + textCreator.getImportText(info, settings)
}.mkString(newLineWithIndent).replaceAll("""\\n[ \\t]+\\n""", "\\n\\n")
//it should cover play template files
val fileFactory = PsiFileFactory.getInstance(file.getProject)
val dummyFile = fileFactory.createFileFromText("dummy." + file.getFileType.getDefaultExtension, file.getLanguage, text)
val errorElements = dummyFile.getChildren.filter(_.isInstanceOf[PsiErrorElement]).map(_.getNode)
errorElements.foreach(dummyFile.getNode.removeChild)
val parentNode = firstPsi.getParent.getNode
val firstPsiNode = firstPsi.getNode
val firstNodeToRemove =
if (text.isEmpty) {
val prevNode = firstPsiNode.getTreePrev
if (prevNode != null && ScalaTokenTypes.WHITES_SPACES_TOKEN_SET.contains(prevNode.getElementType)) prevNode
else firstPsiNode
}
else firstPsiNode
val anchor = lastPsi.getNextSibling.getNode
withDisabledPostprocessFormatting(file.getProject) {
parentNode.removeRange(firstNodeToRemove, anchor)
parentNode.addChildren(dummyFile.getNode.getFirstChildNode, null, anchor)
}
}
def collectImportRanges(holder: ScImportsHolder,
createInfo: ScImportStmt => Seq[ImportInfo],
allUsedImportedNames: Set[UsedName]): Set[RangeInfo] = {
val result = mutable.HashSet[RangeInfo]()
var firstPsi: PsiElement = null
var lastPsi: PsiElement = null
val isLocalRange = holder match {
case _: ScalaFile | _: ScPackaging => false
case _ => true
}
val infos = ArrayBuffer[ImportInfo]()
val sortedUsedNames = allUsedImportedNames.toSeq.sortBy(_.offset)
def addRange(): Unit = {
if (firstPsi != null && lastPsi != null) {
val rangeStart = firstPsi.getTextRange.getStartOffset
val usedImportedNames = sortedUsedNames.dropWhile(_.offset < rangeStart).map(_.name).toSet
val rangeInfo = RangeInfo(PsiAnchor.create(firstPsi), PsiAnchor.create(lastPsi), infos.toVector, usedImportedNames, isLocalRange)
result += rangeInfo
firstPsi = null
lastPsi = null
infos.clear()
}
}
def initRange(psi: PsiElement) {
firstPsi = psi
lastPsi = psi
}
for (child <- holder.getNode.getChildren(null)) {
child.getPsi match {
case whitespace: PsiWhiteSpace =>
case d: ScDocComment => addRange()
case comment: PsiComment =>
val next = comment.getNextSibling
val prev = comment.getPrevSibling
(next, prev) match {
case (w1: PsiWhiteSpace, w2: PsiWhiteSpace) if
w1.getText.contains("\\n") && w2.getText.contains("\\n") => addRange()
case _ =>
}
case s: LeafPsiElement =>
case a: PsiElement if isImportDelimiter(a) => //do nothing
case imp: ScImportStmt =>
if (firstPsi == null) {
imp.getPrevSibling match {
case a: PsiElement if isImportDelimiter(a) && !a.isInstanceOf[PsiWhiteSpace] =>
initRange(a)
lastPsi = imp
case _ => initRange(imp)
}
} else {
lastPsi = imp
}
infos ++= createInfo(imp)
case _ => addRange()
}
}
addRange()
result.toSet
}
}
object ScalaImportOptimizer {
private case class UsedName(name: String, offset: Int)
private object ImportUser {
def unapply(e: PsiElement): Option[PsiElement] = e match {
case elem @ (_: ScReferenceElement | _: ScSimpleTypeElement | _: ScExpression) => Some(elem)
case _ => None
}
}
val NO_IMPORT_USED: Set[ImportUsed] = Set.empty
val _root_prefix = "_root_"
/**
* We can't just select ScalaImportOptimizer because of Play2 templates
*
* @param file Any parallel psi file
*/
def runOptimizerUnsafe(file: ScalaFile) {
findOptimizerFor(file).foreach(_.processFile(file).run)
}
def findOptimizerFor(file: ScalaFile): Option[ImportOptimizer] = {
val topLevelFile = file.getViewProvider.getPsi(file.getViewProvider.getBaseLanguage)
val optimizers = LanguageImportStatements.INSTANCE.forFile(topLevelFile)
if (optimizers.isEmpty) return None
if (topLevelFile.getViewProvider.getPsi(ScalaFileType.SCALA_LANGUAGE) == null) return None
val i = optimizers.iterator()
while (i.hasNext) {
val opt = i.next()
if (opt supports topLevelFile) {
return Some(opt)
}
}
None
}
def isLanguageFeatureImport(used: ImportUsed): Boolean = {
val expr = used match {
case ImportExprUsed(e) => e
case ImportSelectorUsed(selector) => PsiTreeUtil.getParentOfType(selector, classOf[ScImportExpr])
case ImportWildcardSelectorUsed(e) => e
}
if (expr == null) return false
if (expr.qualifier == null) return false
expr.qualifier.resolve() match {
case o: ScObject =>
o.qualifiedName.startsWith("scala.language") || o.qualifiedName.startsWith("scala.languageFeature")
case _ => false
}
}
class ImportTextCreator {
def getImportText(importInfo: ImportInfo, isUnicodeArrow: Boolean, spacesInImports: Boolean,
sortLexicografically: Boolean): String = {
import importInfo._
val groupStrings = new ArrayBuffer[String]
def addGroup(names: Iterable[String]) = {
if (sortLexicografically) groupStrings ++= names.toSeq.sorted
else groupStrings ++= names
}
val arrow = if (isUnicodeArrow) ScalaTypedHandler.unicodeCaseArrow else "=>"
addGroup(singleNames)
addGroup(renames.map(pair => s"${pair._1} $arrow ${pair._2}"))
addGroup(hiddenNames.map(_ + s" $arrow _"))
if (hasWildcard) groupStrings += "_"
val space = if (spacesInImports) " " else ""
val root = if (rootUsed) s"${_root_prefix}." else ""
val postfix =
if (groupStrings.length > 1 || renames.nonEmpty || hiddenNames.nonEmpty) groupStrings.mkString(s"{$space", ", ", s"$space}")
else groupStrings(0)
s"import $root${relative.getOrElse(prefixQualifier)}.$postfix"
}
def getImportText(importInfo: ImportInfo, settings: OptimizeImportSettings): String =
getImportText(importInfo, settings.isUnicodeArrow, settings.spacesInImports, settings.sortImports)
}
def optimizedImportInfos(rangeInfo: RangeInfo, settings: OptimizeImportSettings): Seq[ImportInfo] = {
import settings._
val RangeInfo(firstPsi, _, importInfos, usedImportedNames, isLocalRange) = rangeInfo
val buffer = new ArrayBuffer[ImportInfo]()
val needReplaceWithFqnImports = addFullQualifiedImports && !(isLocalRange && isLocalImportsCanBeRelative)
if (needReplaceWithFqnImports)
buffer ++= importInfos.map(_.withoutRelative)
else
buffer ++= importInfos
if (sortImports) sortImportInfos(buffer, settings)
val result =
if (collectImports) mergeImportInfos(buffer)
else buffer.flatMap(_.split)
updateToWildcardImports(result, firstPsi, usedImportedNames, settings)
updateRootPrefix(result)
result.to[immutable.Seq]
}
def updateRootPrefix(importInfos: ArrayBuffer[ImportInfo]): Unit = {
val importedNames = new mutable.HashSet[String]()
for (i <- importInfos.indices) {
val info = importInfos(i)
if (info.canAddRoot && importedNames.contains(getFirstId(info.prefixQualifier)))
importInfos.update(i, info.withRootPrefix)
importedNames ++= info.allNames
}
}
def sortImportInfos(buffer: ArrayBuffer[ImportInfo], settings: OptimizeImportSettings): Unit = {
@tailrec
def iteration(): Unit = {
var i = 0
var changed = false
while (i + 1 < buffer.length) {
val (lInfo, rInfo) = (buffer(i), buffer(i + 1))
if (greater(lInfo, rInfo, settings) && swapWithNext(buffer, i)) changed = true
i = i + 1
}
if (changed) iteration()
}
iteration()
}
def updateToWildcardImports(infos: ArrayBuffer[ImportInfo],
startPsi: PsiAnchor,
usedImportedNames: Set[String],
settings: OptimizeImportSettings): Unit = {
val rangeStartPsi = startPsi.retrieve()
def resolvesAtRangeStart(name: String): Boolean = {
if (rangeStartPsi == null) false
else {
val ref = ScalaPsiElementFactory.createReferenceFromText(name, rangeStartPsi.getContext, rangeStartPsi)
ref.bind().exists {
case ScalaResolveResult(p: PsiPackage, _) =>
p.getParentPackage != null && p.getParentPackage.getName != null
case ScalaResolveResult(o: ScObject, _) if o.isPackageObject => o.qualifiedName.contains(".")
case ScalaResolveResult(o: ScObject, _) =>
o.getParent match {
case file: ScalaFile => false
case _ => true
}
case ScalaResolveResult(td: ScTypedDefinition, _) if td.isStable => true
case ScalaResolveResult(_: ScTypeDefinition, _) => false
case ScalaResolveResult(c: PsiClass, _) => true
case ScalaResolveResult(f: PsiField, _) if f.hasFinalModifier => true
case _ => false
}
}
}
def updateWithWildcardNames(buffer: ArrayBuffer[ImportInfo]) {
for ((info, idx) <- buffer.zipWithIndex) {
val withWildcardNames = info.withAllNamesForWildcard(rangeStartPsi)
if (info != withWildcardNames) {
buffer.update(idx, withWildcardNames)
}
}
}
def possiblyWithWildcard(info: ImportInfo): ImportInfo = {
val needUpdate = info.singleNames.size >= settings.classCountToUseImportOnDemand
val onlySingleNames = info.hiddenNames.isEmpty && info.renames.isEmpty && !info.hasWildcard
if (!needUpdate || !onlySingleNames) return info
val withWildcard = info.toWildcardInfo.withAllNamesForWildcard(rangeStartPsi)
if (withWildcard.wildcardHasUnusedImplicit) return info
updateWithWildcardNames(infos)
val explicitNames = infos.flatMap {
case `info` => Seq.empty
case other => other.singleNames
}.toSet
val namesFromOtherWildcards = infos.flatMap {
case `info` => Seq.empty
case other => other.allNames
}.toSet -- explicitNames
val problematicNames = withWildcard.allNamesForWildcard & usedImportedNames
val clashesWithOtherWildcards = problematicNames & namesFromOtherWildcards
def notAtRangeStart = problematicNames.forall(name => !resolvesAtRangeStart(name))
if (clashesWithOtherWildcards.size < info.singleNames.size && notAtRangeStart)
withWildcard.copy(hiddenNames = clashesWithOtherWildcards)
else info
}
for ((info, i) <- infos.zipWithIndex) {
val newInfo = possiblyWithWildcard(info)
if (info != newInfo)
infos.update(i, newInfo)
}
}
def insertImportInfos(infosToAdd: Seq[ImportInfo], infos: Seq[ImportInfo], rangeStart: PsiAnchor, settings: OptimizeImportSettings): Seq[ImportInfo] = {
import settings._
def addLastAndMoveUpwards(newInfo: ImportInfo, buffer: ArrayBuffer[ImportInfo]): Unit = {
var i = buffer.size
buffer.insert(i, newInfo)
while(i > 0 && greater(buffer(i - 1), buffer(i), settings) && swapWithNext(buffer, i - 1)) {
i -= 1
}
}
def replace(oldInfos: Seq[ImportInfo], newInfos: Seq[ImportInfo], buffer: ArrayBuffer[ImportInfo]) = {
val oldIndices = oldInfos.map(buffer.indexOf).filter(_ >= 0).sorted(Ordering[Int].reverse)
if (oldIndices.nonEmpty) {
val minIndex = oldIndices.last
oldIndices.foreach(buffer.remove)
buffer.insert(minIndex, newInfos: _*)
}
else {
newInfos.foreach(addLastAndMoveUpwards(_, buffer))
}
}
def withAliasedQualifier(info: ImportInfo): ImportInfo = {
if (addFullQualifiedImports) return info
for {
oldInfo <- infos
renamerPrefix = oldInfo.prefixQualifier
(name, newName) <- oldInfo.renames
} {
val oldPrefix = s"$renamerPrefix.$name"
if (info.prefixQualifier.startsWith(oldPrefix)) {
val stripped = info.prefixQualifier.stripPrefix(oldPrefix)
val newRelative = s"$newName$stripped"
val newPrefix = s"$renamerPrefix.$newRelative"
return info.copy(prefixQualifier = newPrefix, relative = Some(newRelative), rootUsed = false)
}
}
info
}
val actuallyInserted = infosToAdd.map(withAliasedQualifier)
val addedPrefixes = actuallyInserted.map(_.prefixQualifier)
val tooManySingleNames: Map[String, Boolean] = addedPrefixes.map { prefix =>
val singleNamesCount = (actuallyInserted ++ infos)
.filter(_.prefixQualifier == prefix)
.flatMap(_.singleNames)
.distinct.size
prefix -> (singleNamesCount >= classCountToUseImportOnDemand)
}.toMap
def insertSimpleInfo(info: ImportInfo, buffer: ArrayBuffer[ImportInfo]): Unit = {
val samePrefixInfos = buffer.filter(_.prefixQualifier == info.prefixQualifier)
if (collectImports) {
val merged = ImportInfo.merge(samePrefixInfos :+ info)
replace(samePrefixInfos, merged.toSeq, buffer)
}
else addLastAndMoveUpwards(info, buffer)
}
def insertInfoWithWildcard(info: ImportInfo, buffer: ArrayBuffer[ImportInfo], usedNames: Set[String]): Unit = {
val (samePrefixInfos, otherInfos) = buffer.partition(_.prefixQualifier == info.prefixQualifier)
val samePrefixWithNewSplitted = samePrefixInfos.flatMap(_.split) ++ info.split
val (simpleInfos, notSimpleInfos) = samePrefixWithNewSplitted.partition(_.singleNames.nonEmpty)
val (wildcard, withArrows) = notSimpleInfos.partition(_.hasWildcard)
val namesFromOtherWildcards = otherInfos.flatMap(_.namesFromWildcard).toSet
val simpleNamesToRemain = simpleInfos.flatMap(_.singleNames).toSet & namesFromOtherWildcards & usedNames
val simpleInfosToRemain = simpleInfos.filter(si => simpleNamesToRemain.contains(si.singleNames.head))
val renames = withArrows.flatMap(_.renames)
val hiddenNames = withArrows.flatMap(_.hiddenNames)
val newHiddenNames = {
val fromInsertedWildcard = info.allNamesForWildcard -- simpleNamesToRemain -- renames.map(_._1) -- hiddenNames
fromInsertedWildcard & namesFromOtherWildcards & usedNames
}
withArrows ++= newHiddenNames.map(info.toHiddenNameInfo)
val notSimpleMerged = ImportInfo.merge(withArrows ++ wildcard)
if (collectImports) {
val simpleMerged = ImportInfo.merge(simpleInfosToRemain ++ notSimpleMerged)
replace(samePrefixInfos, simpleMerged.toSeq, buffer)
}
else {
replace(samePrefixInfos, simpleInfosToRemain ++ notSimpleMerged, buffer)
}
}
val needAdditionalInfo = infosToAdd.exists(_.hasWildcard) || addedPrefixes.exists(tooManySingleNames)
val buffer = infos.to[ArrayBuffer]
if (needAdditionalInfo) {
val rangeStartPsi = rangeStart.retrieve()
val holder = PsiTreeUtil.getParentOfType(rangeStartPsi, classOf[ScImportsHolder])
val usedNames = collectUsedImportedNames(holder)
for (info <- infosToAdd) {
if (info.hasWildcard) {
insertInfoWithWildcard(info, buffer, usedNames)
}
else {
insertSimpleInfo(info, buffer)
}
}
updateToWildcardImports(buffer, rangeStart, usedNames, settings)
}
else {
actuallyInserted.foreach(insertSimpleInfo(_, buffer))
}
updateRootPrefix(buffer)
buffer.toVector
}
private def swapWithNext(buffer: ArrayBuffer[ImportInfo], i: Int): Boolean = {
val first: ImportInfo = buffer(i)
val second: ImportInfo = buffer(i + 1)
val firstPrefix: String = first.relative.getOrElse(first.prefixQualifier)
val firstPart: String = getFirstId(firstPrefix)
val secondPrefix = second.relative.getOrElse(second.prefixQualifier)
val secondPart = getFirstId(secondPrefix)
if (first.rootUsed || !second.allNames.contains(firstPart)) {
if (second.rootUsed || !first.allNames.contains(secondPart)) {
val t = first
buffer(i) = second
buffer(i + 1) = t
true
} else false
} else false
}
private def mergeImportInfos(buffer: ArrayBuffer[ImportInfo]): ArrayBuffer[ImportInfo] = {
def samePrefixAfter(i: Int): Int = {
var j = i + 1
while (j < buffer.length) {
if (buffer(j).prefixQualifier == buffer(i).prefixQualifier) return j
j += 1
}
-1
}
var i = 0
while (i < buffer.length - 1) {
val prefixIndex: Int = samePrefixAfter(i)
if (prefixIndex != -1) {
if (prefixIndex == i + 1) {
val merged = buffer(i).merge(buffer(i + 1))
buffer(i) = merged
buffer.remove(i + 1)
} else {
if (swapWithNext(buffer, i)) {
var j = i + 1
var break = false
while (!break && j != prefixIndex - 1) {
if (!swapWithNext(buffer, j)) break = true
j += 1
}
if (!break) {
val merged = buffer(j).merge(buffer(j + 1))
buffer(j) = merged
buffer.remove(j + 1)
}
} else i += 1
}
} else i += 1
}
buffer
}
def getFirstId(s: String): String = {
if (s.startsWith("`")) {
val index: Int = s.indexOf('`', 1)
if (index == -1) s
else s.substring(0, index + 1)
} else {
val index: Int = s.indexOf('.')
if (index == -1) s
else s.substring(0, index)
}
}
def findGroupIndex(info: String, settings: OptimizeImportSettings): Int = {
val groups = settings.importLayout
val suitable = groups.filter { group =>
group != ScalaCodeStyleSettings.BLANK_LINE && (group == ScalaCodeStyleSettings.ALL_OTHER_IMPORTS ||
info.startsWith(group))
}
val elem = suitable.tail.foldLeft(suitable.head) { (l, r) =>
if (l == ScalaCodeStyleSettings.ALL_OTHER_IMPORTS) r
else if (r == ScalaCodeStyleSettings.ALL_OTHER_IMPORTS) l
else if (r.startsWith(l)) r
else l
}
groups.indexOf(elem)
}
def greater(lPrefix: String, rPrefix: String, lText: String, rText: String, settings: OptimizeImportSettings): Boolean = {
val lIndex = findGroupIndex(lPrefix, settings)
val rIndex = findGroupIndex(rPrefix, settings)
if (lIndex > rIndex) true
else if (rIndex > lIndex) false
else lText > rText
}
def greater(lInfo: ImportInfo, rInfo: ImportInfo, settings: OptimizeImportSettings): Boolean = {
val textCreator = new ImportTextCreator
val lPrefix: String = lInfo.prefixQualifier
val rPrefix: String = rInfo.prefixQualifier
val lText = textCreator.getImportText(lInfo, settings)
val rText = textCreator.getImportText(rInfo, settings)
ScalaImportOptimizer.greater(lPrefix, rPrefix, lText, rText, settings)
}
private def importsUsedFor(expr: ScExpression): Set[ImportUsed] = {
val res = mutable.HashSet[ImportUsed]()
res ++= expr.getTypeAfterImplicitConversion(expectedOption = expr.smartExpectedType()).importsUsed
expr match {
case call: ScMethodCall => res ++= call.getImportsUsed
case f: ScForStatement => res ++= ScalaPsiUtil.getExprImports(f)
case _ =>
}
expr.findImplicitParameters match {
case Some(seq) =>
for (rr <- seq if rr != null) {
res ++= rr.importsUsed
}
case _ =>
}
res.toSet
}
private def collectImportsUsed(element: PsiElement, imports: util.Set[ImportUsed], names: util.Set[UsedName]): Unit = {
def implicitlyImported(srr: ScalaResolveResult) = {
srr.element match {
case c: PsiClass =>
val qName = c.qualifiedName
val name = c.name
qName == s"scala.$name" || qName == s"java.lang.$name"
case ContainingClass(o: ScObject) =>
o.isPackageObject && Set("scala", "scala.Predef").contains(o.qualifiedName)
case _ => false
}
}
def addResult(srr: ScalaResolveResult, fromElem: PsiElement) = {
val importsUsed = srr.importsUsed
if (importsUsed.nonEmpty || implicitlyImported(srr)) {
imports.addAll(importsUsed)
names.add(UsedName(srr.name, fromElem.getTextRange.getStartOffset))
}
}
def addFromExpression(expr: ScExpression): Unit = {
val afterImplicitConversion = expr.getTypeAfterImplicitConversion(expectedOption = expr.smartExpectedType())
imports.addAll(afterImplicitConversion.importsUsed)
afterImplicitConversion.implicitFunction.foreach(f => names.add(UsedName(f.name, expr.getTextRange.getStartOffset)))
expr match {
case call: ScMethodCall => imports.addAll(call.getImportsUsed)
case f: ScForStatement => imports.addAll(ScalaPsiUtil.getExprImports(f))
case _ =>
}
expr.findImplicitParameters match {
case Some(seq) =>
for (rr <- seq if rr != null) {
addResult(rr, expr)
}
case _ =>
}
}
element match {
case impQual: ScStableCodeReferenceElement
if impQual.qualifier.isEmpty && PsiTreeUtil.getParentOfType(impQual, classOf[ScImportStmt]) != null =>
//don't add as ImportUsed to be able to optimize it away if it is used only in unused imports
val hasImportUsed = impQual.multiResolve(false).exists {
case srr: ScalaResolveResult => srr.importsUsed.nonEmpty
case _ => false
}
if (hasImportUsed) {
names.add(UsedName(impQual.refName, impQual.getTextRange.getStartOffset))
}
case ref: ScReferenceElement if PsiTreeUtil.getParentOfType(ref, classOf[ScImportStmt]) == null =>
ref.multiResolve(false) foreach {
case scalaResult: ScalaResolveResult => addResult(scalaResult, ref)
case _ =>
}
case simple: ScSimpleTypeElement =>
simple.findImplicitParameters match {
case Some(parameters) =>
parameters.foreach {
case r: ScalaResolveResult => addResult(r, simple)
case _ =>
}
case _ =>
}
case _ =>
}
//separate match to have reference expressions processed
element match {
case e: ScExpression => addFromExpression(e)
case _ =>
}
}
//quite heavy computation, is really needed only for dealing with wildcard imports
def collectUsedImportedNames(holder: ScImportsHolder): Set[String] = {
val imports = new util.HashSet[ImportUsed]()
val names = new util.HashSet[UsedName]()
holder.depthFirst.foreach {
case ImportUser(elem) => collectImportsUsed(elem, imports, names)
case _ =>
}
names.toSet.map((x: UsedName) => x.name)
}
def createInfo(imp: ScImportStmt, isImportUsed: ImportUsed => Boolean = _ => true): Seq[ImportInfo] =
imp.importExprs.flatMap(ImportInfo(_, isImportUsed))
} | Im-dex/intellij-scala | src/org/jetbrains/plugins/scala/editor/importOptimizer/ScalaImportOptimizer.scala | Scala | apache-2.0 | 32,034 |
/*
* Copyright (c) 2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression.features
import org.junit.Ignore
import org.junit.Test
import org.scalaide.debug.internal.expression.BaseIntegrationTest
import org.scalaide.debug.internal.expression.Names.Java
import org.scalaide.debug.internal.expression.Names.Scala
trait MethodsAsFunctionsTest { self: BaseIntegrationTest =>
@Test
def methodsFromObject(): Unit = {
eval("List(1, 2).foldLeft(ObjectMethods.zero)(ObjectMethods.sum)", 3, Java.primitives.int)
eval("List(-1, 1).filter(_ > ObjectMethods.zero)", List(1), Scala.::)
}
@Ignore("Potential bug in Toolbox.")
@Test
def methodAsMapParam(): Unit = eval("nat.map(inc)", Array(3, 4, 5), Scala.Array(Scala.primitives.Int))
@Test
def methodCall(): Unit = eval("zero", 0, Java.primitives.int)
@Ignore("Potential bug in Toolbox.")
@Test
def methodAsFilterParam(): Unit = eval("nat.filter(_ > inc(inc(zero)))", Array(3, 4), Scala.Array(Scala.primitives.Int))
@Ignore("Potential bug in Toolbox.")
@Test
def methodsAsFoldParam(): Unit = eval("nat.foldLeft(zero)(sum)", 9, Java.primitives.int)
@Test
def methodAsGetOrElseParam(): Unit = eval("None.getOrElse(zero)", 0, Java.primitives.int)
@Test
def andThenMethods(): Unit = eval("(inc _ andThen inc _)(zero)", 2, Java.primitives.int)
@Ignore("Needs investigation.")
@Test
def composeMethods(): Unit = eval("(inc _ compose dec)(zero)", 0, Java.primitives.int)
}
| scala-ide/scala-ide | org.scala-ide.sdt.debug.expression.tests/src/org/scalaide/debug/internal/expression/features/MethodsAsFunctionsTest.scala | Scala | bsd-3-clause | 1,496 |
package com.trafficland.augmentsbt.distribute
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.linux.LinuxPlugin.autoImport.{Linux, daemonUser}
import com.typesafe.sbt.packager.rpm.RpmPlugin.autoImport.{rpmDaemonLogFile, rpmVendor}
import sbt._
import com.trafficland.augmentsbt.rpm.RPMPlugin
object StartupScriptPlugin extends AutoPlugin {
override def requires = RPMPlugin
object autoImport extends DistributeKeys
import autoImport._
override lazy val projectSettings = Seq(
startScriptJavaOptions := Seq.empty,
startScriptMainArguments := Seq.empty,
daemonUser in Linux := "coreservices",
daemonStdoutLogFile := Some("stdout.log"),
defaultLinuxLogsLocation := s"/var/log/${rpmVendor.value}",
startScriptConfigFileName := "prod.conf",
loggingConfigFileName := Some("logback.xml"),
executableScriptName := "start",
bashScriptExtraDefines ++= {
val loggingArgOpt = loggingConfigFileName.value.map { log =>
s"-Dlogback.configurationFile=$$app_home/../conf/$log"
}
val configArg = s"-Dconfig.file=$$app_home/../conf/${startScriptConfigFileName.value}"
val javaArgs = startScriptJavaOptions.value ++ loggingArgOpt :+ configArg
val addJavaArgs = javaArgs.map(arg => s"addJava $arg")
val addMainArgs = startScriptMainArguments.value.map(arg => s"addApp $arg")
addMainArgs ++ addJavaArgs
}
)
}
| trafficland/augment-sbt | src/main/scala/com/trafficland/augmentsbt/distribute/StartupScriptPlugin.scala | Scala | apache-2.0 | 1,419 |
package puck.parser
import com.nativelibs4java.opencl._
import puck.linalg.CLMatrix
import java.io.Closeable
import breeze.collection.mutable.TriangularArray
import scala.collection.mutable.ArrayBuffer
import com.typesafe.scalalogging.slf4j.LazyLogging
/**
*
*
* @author dlwh
*/
class WorkSpace(val numWorkCells: Int,
val numChartCells: Int,
val cellSize: Int,
val maskSize: Int)(implicit context: CLContext, queue: CLQueue) extends LazyLogging with Closeable {
def getBatches[W](sentences: IndexedSeq[IndexedSeq[W]], masks: PruningMask = NoPruningMask): IndexedSeq[Batch[W]] = {
val result = ArrayBuffer[Batch[W]]()
var current = ArrayBuffer[IndexedSeq[W]]()
var currentCellTotal = 0
for ( (s, i) <- sentences.zipWithIndex) {
currentCellTotal += TriangularArray.arraySize(s.length) * 2
if (currentCellTotal > devInside.cols || current.length >= numWorkCells) {
currentCellTotal -= TriangularArray.arraySize(s.length) * 2
assert(current.nonEmpty)
result += createBatch(current, masks.slice(i - current.length, i))
currentCellTotal = TriangularArray.arraySize(s.length) * 2
current = ArrayBuffer()
}
current += s
}
if (current.nonEmpty) {
result += createBatch(current, masks.slice(sentences.length - current.length, sentences.length))
}
result
}
private def createBatch[W](sentences: IndexedSeq[IndexedSeq[W]], masks: PruningMask): Batch[W] = {
val batch = Batch[W](sentences, devInside, devOutside, masks)
logger.info(f"Batch size of ${sentences.length}, ${batch.numCellsUsed} cells used, total inside ${batch.numCellsUsed * cellSize * 4.0/1024/1024}%.2fM ")
batch
}
// Two work arrays for computing: L * R * rules, for fixed spans and split points (the "bot")
// One is the L part of the above
val devLeft = new CLMatrix[Float]( numWorkCells, cellSize)
// Another is the R part.
val devRight = new CLMatrix[Float]( numWorkCells, cellSize)
// finally, we have the array of parse charts
val devInside = new CLMatrix[Float](cellSize, numChartCells)
val devOutside = new CLMatrix[Float](cellSize, numChartCells)
val maskCharts = new CLMatrix[Int](maskSize, numChartCells)
val devInsideScale, devOutsideScale = context.createFloatBuffer(CLMem.Usage.InputOutput, numChartCells)
// work queue stuff
val workQueue = new ParseItemQueue(numChartCells)
// val parentQueue, leftQueue, rightQueue = context.createIntBuffer(CLMem.Usage.Input, numWorkCells)
val pPtrBuffer, lPtrBuffer, rPtrBuffer = context.createIntBuffer(CLMem.Usage.Input, numChartCells)
val queueOffsets = context.createIntBuffer(CLMem.Usage.Input, numWorkCells)
def close() = release()
def release() {
devLeft.release()
devRight.release()
devInside.release()
devOutside.release()
}
}
object WorkSpace {
def allocate( cellSize: Int,
maskSize: Int,
maxAllocSize: Long = -1, ratioOfChartsToWorkSpace: Int = 7)(implicit context: CLContext, queue: CLQueue): WorkSpace = {
var maxMemToUse = context.getDevices.head.getGlobalMemSize
if(maxAllocSize >= 0) maxMemToUse = math.min(maxAllocSize, maxMemToUse)
val sizeOfFloat = 4
val fractionOfMemoryToUse = 0.8 // slack!
val maxSentencesPerBatch: Long = 400 // just for calculation's sake
val sizeToAllocate = (maxMemToUse * fractionOfMemoryToUse).toInt - maxSentencesPerBatch * 3 * 4;
val maxPossibleNumberOfCells = ((sizeToAllocate / sizeOfFloat) / (cellSize + 4 + maskSize)).toInt // + 4 for each kind of offset
// We want numGPUCells and numGPUChartCells to be divisible by 16, so that we get aligned strided access:
// On devices of compute capability 1.0 or 1.1, the k-th thread in a half warp must access the
// k-th word in a segment aligned to 16 times the size of the elements being accessed; however,
// not all threads need to participate... If sequential threads in a half warp access memory that is
// sequential but not aligned with the segments, then a separate transaction results for each element
// requested on a device with compute capability 1.1 or lower.
val numberOfUnitsOf32 = maxPossibleNumberOfCells / 32
// average sentence length of sentence, let's say n.
// for the gpu charts, we'll need (n choose 2) * 2 * 2 =
// for the "P/L/R" parts, the maximum number of relaxations (P = L * R * rules) for a fixed span
// in a fixed sentence is (n/2)^2= n^2/4.
// Take n = 32, then we want our P/L/R arrays to be of the ratio (3 * 256):992 \\approx 3/4 (3/4 exaclty if we exclude the - n term)
// doesn't quite work the way we want (outside), so we'll bump the number to 4/5
val baseSize = numberOfUnitsOf32 / (2 + 2 * ratioOfChartsToWorkSpace)
val extra = numberOfUnitsOf32 % (2 + 2 * ratioOfChartsToWorkSpace)
val plrSize = baseSize
// TODO, can probably do a better job of these calculations?
val (workCells, chartCells) = (plrSize * 32, (baseSize * ratioOfChartsToWorkSpace + extra) * 32)
val maxFloatsPerBuffer = (context.getDevices.head.getMaxMemAllocSize / sizeOfFloat / cellSize).toInt
new WorkSpace(workCells min maxFloatsPerBuffer, chartCells min maxFloatsPerBuffer, cellSize, maskSize)
}
}
| dlwh/puck | src/main/scala/puck/parser/WorkSpace.scala | Scala | apache-2.0 | 5,345 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.reactivestreams
/**
* Mirrors the `Publisher` interface from the
* [[http://www.reactive-streams.org/ Reactive Streams]] project.
*
* A `Publisher` is a provider of a potentially unbounded number of sequenced
* elements, publishing them according to the demand received from its
* [[Subscriber Subscribers]].
*
* A `Publisher` can serve multiple Subscribers subscribed
* dynamically at various points in time.
*/
trait Publisher[T] extends Any {
/**
* Request the publisher to start emitting data.
*
* This is a factory method and can be called multiple times, each time
* starting a new [[Subscription]]. Each [[Subscription]] will work
* for only a single [[Subscriber]]. A [[Subscriber]] should only
* subscribe once to a single [[Publisher]].
*
* If the [[Publisher]] rejects the subscription attempt or otherwise fails
* it will signal the error via [[Subscriber.onError]].
*
* @param subscriber the [[Subscriber]] that will consume signals
* from this [[Publisher]]
*/
def subscribe(subscriber: Subscriber[_ >: T]): Unit
}
| monifu/monifu | monix-execution/js/src/main/scala/org/reactivestreams/Publisher.scala | Scala | apache-2.0 | 1,811 |
/* vim: set ts=2 et sw=2 sts=2 fileencoding=utf-8: */
import exceler.test._
import org.scalatest.FunSuite
class ExcelerConfigTest extends FunSuite with TestResource {
test("MyProperties") {
val prop = MyProperties("hehe", getURI(testProperties1))
assert(prop.get("test.item").get == "HelloWorld")
}
}
| wak109/exceler | jvm/src/test/scala/MyPropertiesTest.scala | Scala | mit | 317 |
package org.chiflink.ctaprocessor.loaders.ctaloader
/**
* Created by ubuntu on 2/20/17.
*/
import com.beust.jcommander.JCommander
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010
import java.util.Properties
import org.apache.flink.streaming.util.serialization.SimpleStringSchema
object LoadFixes {
val config = new LoadFixesArgs
def main(args: Array[String]): Unit = {
new JCommander(this.config, args: _*)
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setParallelism(1)
env.getConfig.setGlobalJobParameters(this.config)
val kafkaProps = new Properties
kafkaProps.setProperty("zookeeper.connect", this.config.kafkaZookeeperHost)
kafkaProps.setProperty("bootstrap.servers", this.config.kafkaBootStrapServer)
val stream = env.readTextFile("file://"+this.config.inputFile).name("CTAStream")
.flatMap(new FixProcessor).name("FixProcessor")
.addSink(new FlinkKafkaProducer010[String](this.config.kafkaTopic,new SimpleStringSchema(), kafkaProps))
env.execute("ctaLoader")
}
}
| chi-apache-flink-meetup/ctaProcessor | code/loaders/ctaLoader/src/main/scala/org/chiflink/ctaprocessor/loaders/ctaloader/LoadFixes.scala | Scala | apache-2.0 | 1,318 |
package utils
import com.typesafe.config.ConfigFactory
/**
* Created by Engin Yoeyen on 04/10/14.
*/
object Config {
private val config = ConfigFactory.load()
val baseUrl = config.getString("application.baseUrl")
val baseAuthenticationEnabled = config.hasPath("basic.auth")
val getAuthenticationEnabled = config.hasPath("get.auth")
override def toString = s"Config($config)"
}
| stylight/postgresql-rest-api | app/utils/Config.scala | Scala | mit | 394 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.data.Query
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithMultipleSfts
import org.locationtech.geomesa.accumulo.data.AccumuloQueryPlan.BatchScanPlan
import org.locationtech.geomesa.accumulo.iterators.{AttributeKeyValueIterator, FilterTransformIterator}
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AttrKeyPlusValueIteratorTest extends Specification with TestWithMultipleSfts {
val spec =
"name:String:index=join:cardinality=high," +
"age:Integer:index=false," +
"count:Long:index=false," +
"dtg:Date:default=true," +
"*geom:Point:srid=4326"
def features(sft: SimpleFeatureType): Seq[ScalaSimpleFeature] = Seq(
Array("alice", 20, 1, "2014-01-01T12:00:00.000Z", WKTUtils.read("POINT(45.0 49.0)")),
Array("bill", 21, 2, "2014-01-02T12:00:00.000Z", WKTUtils.read("POINT(46.0 49.0)")),
Array("bob", 30, 3, "2014-01-03T12:00:00.000Z", WKTUtils.read("POINT(47.0 49.0)")),
Array("charles", null, 4, "2014-01-04T12:00:00.000Z", WKTUtils.read("POINT(48.0 49.0)"))
).map { entry =>
val feature = new ScalaSimpleFeature(sft, entry.head.toString)
feature.setAttributes(entry.asInstanceOf[Array[AnyRef]])
feature
}
lazy val sft = createNewSchema(spec)
step {
addFeatures(sft, features(sft))
}
"Query planning" should {
"work with table sharing off" >> {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
sft.isTableSharing must beFalse
"do a single scan for attribute idx queries" >> {
val filterName = "(((name = 'alice') or name = 'bill') or name = 'bob')"
val filter = ECQL.toFilter(s"$filterName AND BBOX(geom, 40, 40, 60, 60) and " +
s"dtg during 2014-01-01T00:00:00.000Z/2014-01-05T00:00:00.000Z ")
val query = new Query(sft.getTypeName, filter, Array[String]("dtg", "geom", "name"))
val plans = ds.getQueryPlan(query)
plans.size mustEqual 1
plans.head must beAnInstanceOf[BatchScanPlan]
val bsp = plans.head.asInstanceOf[BatchScanPlan]
bsp.iterators must haveLength(2)
bsp.iterators.map(_.getIteratorClass) must containTheSameElementsAs {
Seq(classOf[FilterTransformIterator].getName, classOf[AttributeKeyValueIterator].getName)
}
val fs = ds.getFeatureSource(sft.getTypeName)
val rws = SelfClosingIterator(fs.getFeatures(query).features).toList
rws must haveLength(3)
val alice = rws.filter(_.get[String]("name") == "alice").head
alice.getID mustEqual "alice"
alice.getAttributeCount mustEqual 3
}
"work with 150 attrs" >> {
import scala.collection.JavaConversions._
val ff = CommonFactoryFinder.getFilterFactory2
val filterName = ff.or((0 to 150).map(i => ff.equals(ff.property("name"), ff.literal(i.toString))))
val filter = ff.and(filterName, ECQL.toFilter("BBOX(geom, 40, 40, 60, 60) and " +
"dtg during 2014-01-01T00:00:00.000Z/2014-01-05T00:00:00.000Z "))
val query = new Query(sft.getTypeName, filter, Array[String]("dtg", "geom", "name"))
val plans = ds.getQueryPlan(query)
plans.size mustEqual 1
plans.head must beAnInstanceOf[BatchScanPlan]
val bsp = plans.head.asInstanceOf[BatchScanPlan]
bsp.iterators must haveLength(2)
bsp.iterators.map(_.getIteratorClass) must containTheSameElementsAs {
Seq(classOf[FilterTransformIterator].getName, classOf[AttributeKeyValueIterator].getName)
}
}
"support sampling" >> {
// note: sampling is per-iterator, so an 'OR =' query usually won't reduce the results
val filterName = "name > 'alice'"
val filter = ECQL.toFilter(s"$filterName AND BBOX(geom, 40, 40, 60, 60) and " +
s"dtg during 2014-01-01T00:00:00.000Z/2014-01-05T00:00:00.000Z ")
val query = new Query(sft.getTypeName, filter, Array[String]("dtg", "geom", "name"))
query.getHints.put(QueryHints.SAMPLING, new java.lang.Float(.5f))
val plans = ds.getQueryPlan(query)
plans.size mustEqual 1
plans.head must beAnInstanceOf[BatchScanPlan]
val bsp = plans.head.asInstanceOf[BatchScanPlan]
bsp.iterators must haveLength(2)
bsp.iterators.map(_.getIteratorClass) must containTheSameElementsAs {
Seq(classOf[FilterTransformIterator].getName, classOf[AttributeKeyValueIterator].getName)
}
val fs = ds.getFeatureSource(sft.getTypeName)
val rws = SelfClosingIterator(fs.getFeatures(query).features).toList
rws must haveLength(2)
rws.head.getAttributeCount mustEqual 3
}
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/AttrKeyPlusValueIteratorTest.scala | Scala | apache-2.0 | 5,758 |
package net.lshift.diffa.agent.auth
import net.lshift.diffa.kernel.config.system.PolicyStatement
/**
* Definitions of the various supported Diffa privileges.
*/
trait Privilege {
/** The name of the privilege **/
def name:String
/** Determines whether the given statement provides access to the given target for this privilege */
def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject):Boolean
}
case class UserPrivilege(val name:String) extends Privilege {
// Target evaluation is handled specially for users
def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = false
}
/**
* A privilege that is applied against a space.
*/
class SpacePrivilege(val name:String) extends Privilege {
def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = {
target match {
case st:SpaceTarget => st.space == space || st.parents.contains(space)
case _ => false
}
}
}
/**
* A privilege that is applied against a pair.
*/
class PairPrivilege(name:String) extends SpacePrivilege(name) {
override def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = {
super.isValidForTarget(space, stmt, target) && (target match {
case pt:PairTarget => stmt.appliesTo("pair", pt.pair)
case _ => true // If the target wasn't a pair, then allow
})
}
}
/**
* A privilege that is applied against an endpoint.
*/
class EndpointPrivilege(name:String) extends SpacePrivilege(name) {
override def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = {
super.isValidForTarget(space, stmt, target) && (target match {
case et:EndpointTarget => stmt.appliesTo("endpoint", et.endpoint)
case _ => true // If the target wasn't an endpoint, then allow
})
}
}
/**
* A privilege that is applied against an action.
*/
class ActionPrivilege(name:String) extends PairPrivilege(name) {
override def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = {
super.isValidForTarget(space, stmt, target) && (target match {
case at:ActionTarget => stmt.appliesTo("action", at.action)
case _ => true // If the target wasn't an action, then allow
})
}
}
/**
* A privilege that is applied against a report.
*/
class ReportPrivilege(name:String) extends PairPrivilege(name) {
override def isValidForTarget(space:Long, stmt:PolicyStatement, target:TargetObject) = {
super.isValidForTarget(space, stmt, target) && (target match {
case rt:ReportTarget => stmt.appliesTo("report", rt.report)
case _ => true // If the target wasn't a report, then allow
})
}
}
/** Catalogue of privileges supported by Diffa */
object Privileges {
val SPACE_USER = new SpacePrivilege("space-user")
val READ_DIFFS = new PairPrivilege("read-diffs") // Ability to query for differences, access overviews, etc
val CONFIGURE = new SpacePrivilege("configure") // Ability to configure a space
val INITIATE_SCAN = new PairPrivilege("initiate-scan")
val CANCEL_SCAN = new PairPrivilege("cancel-scan")
val POST_CHANGE_EVENT = new EndpointPrivilege("post-change-event")
val POST_INVENTORY = new EndpointPrivilege("post-inventory")
val SCAN_STATUS = new PairPrivilege("view-scan-status")
val DIAGNOSTICS = new PairPrivilege("view-diagnostics")
val INVOKE_ACTIONS = new ActionPrivilege("invoke-actions")
val IGNORE_DIFFS = new PairPrivilege("ignore-diffs")
val VIEW_EXPLANATIONS = new PairPrivilege("view-explanations")
val EXECUTE_REPORT = new ReportPrivilege("execute-report")
val VIEW_ACTIONS = new PairPrivilege("view-actions")
val VIEW_REPORTS = new PairPrivilege("view-reports")
val READ_EVENT_DETAILS = new PairPrivilege("read-event-details")
val USER_PREFERENCES = new UserPrivilege("user-preferences")
} | 0x6e6562/diffa | agent/src/main/scala/net/lshift/diffa/agent/auth/Privilege.scala | Scala | apache-2.0 | 3,920 |
/* Copyright 2015 UniCredit S.p.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.unicredit.riffel
import Chisel._
case class RiffelTXCommTester(rc: RiffelTXComm) extends Tester(rc) {
//STEP 1
poke (rc.io.data_valid, 0)
poke (rc.io.data_len, 0)
poke (rc.io.data, 0)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 0)
expect (rc.io.TX, 0)
expect (rc.io.TX_LEN, 0)
expect (rc.io.TX_DATA, 0)
expect (rc.io.TX_DATA_VALID, 0)
step(1)
//STEP 2
poke (rc.io.data_valid, 1)
poke (rc.io.data_len, 3)
poke (rc.io.data, 11)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 0)
expect (rc.io.TX, 0)
expect (rc.io.TX_LEN, 0)
expect (rc.io.TX_DATA, 0)
expect (rc.io.TX_DATA_VALID, 0)
step(1)
//STEP 3
poke (rc.io.data_valid, 1)
poke (rc.io.data_len, 3)
poke (rc.io.data, 22)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 0)
expect (rc.io.TX, 1)
expect (rc.io.TX_LEN, 6)
expect (rc.io.TX_DATA, 11)
expect (rc.io.TX_DATA_VALID, 0)
step(1)
//STEP 4
poke (rc.io.data_valid, 1)
poke (rc.io.data_len, 3)
poke (rc.io.data, 33)
poke (rc.io.TX_ACK, 1)
poke (rc.io.TX_DATA_REN, 0)
expect (rc.io.TX, 1)
expect (rc.io.TX_LEN, 6)
expect (rc.io.TX_DATA, 11)
expect (rc.io.TX_DATA_VALID, 1)
step(1)
//STEP 5
poke (rc.io.data_valid, 0)
poke (rc.io.data_len, 0)
poke (rc.io.data, 0)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 1)
expect (rc.io.TX, 1)
expect (rc.io.TX_LEN, 6)
expect (rc.io.TX_DATA, 11)
expect (rc.io.TX_DATA_VALID, 1)
step(1)
//STEP 6
poke (rc.io.data_valid, 0)
poke (rc.io.data_len, 0)
poke (rc.io.data, 0)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 1)
expect (rc.io.TX, 1)
expect (rc.io.TX_LEN, 6)
expect (rc.io.TX_DATA, 22)
expect (rc.io.TX_DATA_VALID, 1)
step(1)
//STEP 7
poke (rc.io.data_valid, 0)
poke (rc.io.data_len, 0)
poke (rc.io.data, 0)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 1)
expect (rc.io.TX, 1)
expect (rc.io.TX_LEN, 6)
expect (rc.io.TX_DATA, 33)
expect (rc.io.TX_DATA_VALID, 1)
step(1)
//STEP 8
poke (rc.io.data_valid, 0)
poke (rc.io.data_len, 0)
poke (rc.io.data, 0)
poke (rc.io.TX_ACK, 0)
poke (rc.io.TX_DATA_REN, 0)
expect (rc.io.TX, 0)
expect (rc.io.TX_LEN, 0)
expect (rc.io.TX_DATA, 0)
expect (rc.io.TX_DATA_VALID, 0)
step(1)
}
| unicredit/riffel | src/test/scala/eu/unicredit/riffel/RiffelTXCommTester.scala | Scala | apache-2.0 | 2,790 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.AttributeProcessPara
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: emeris.hernandez@e-evolution.com, http://www.e-evolution.com , http://github.com/EmerisScala
* Created by emeris.hernandez@e-evolution.com , www.e-evolution.com on 07/11/17.
*/
/**
* Attribute Process Para Repository
* @param session
* @param executionContext
*/
class AttributeProcessParaRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.AttributeProcessParaRepository[AttributeProcessPara , Int]
with AttributeProcessParaMapping {
def getById(id: Int): Future[AttributeProcessPara] = {
Future(run(queryAttributeProcessPara.filter(_.attributeProcessParaId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[AttributeProcessPara] = {
Future(run(queryAttributeProcessPara.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByAttributeProcessParaId(id : Int) : Future[List[AttributeProcessPara]] = {
Future(run(queryAttributeProcessPara))
}
def getAll() : Future[List[AttributeProcessPara]] = {
Future(run(queryAttributeProcessPara))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[AttributeProcessPara]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countAttributeProcessPara()
elements <- if (offset > count) Future.successful(Nil)
else selectAttributeProcessPara(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countAttributeProcessPara() = {
Future(run(queryAttributeProcessPara.size).toInt)
}
private def selectAttributeProcessPara(offset: Int, limit: Int): Future[Seq[AttributeProcessPara]] = {
Future(run(queryAttributeProcessPara).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/AttributeProcessParaRepository.scala | Scala | gpl-3.0 | 2,983 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.apigw.actions.test
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.rest.WskRest
/**
* Tests for basic CLI usage. Some of these tests require a deployed backend.
*/
@RunWith(classOf[JUnitRunner])
class ApiGwRestRoutemgmtActionTests extends ApiGwRoutemgmtActionTests {
override lazy val wsk = new WskRest
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/whisk/core/apigw/actions/test/ApiGwRestRoutemgmtActionTests.scala | Scala | apache-2.0 | 1,171 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.holdenkarau.spark.testing
import org.apache.spark._
import org.scalatest.BeforeAndAfterAll
import org.scalatest.BeforeAndAfterEach
import org.scalatest.Suite
/**
* Manages a local `sc` {@link SparkContext} variable,
* correctly stopping it after each test.
*/
trait LocalSparkContext extends BeforeAndAfterEach
with BeforeAndAfterAll { self: Suite =>
@transient var sc: SparkContext = _
override def afterEach() {
resetSparkContext()
super.afterEach()
}
def resetSparkContext() {
LocalSparkContext.stop(sc)
sc = null
}
}
object LocalSparkContext {
def stop(sc: SparkContext) {
Option(sc).foreach(_.stop())
// To avoid Akka rebinding to the same port, since it doesn't
// unbind immediately on shutdown.
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSpark[T](sc: SparkContext)(f: SparkContext => T): T = {
try {
f(sc)
} finally {
stop(sc)
}
}
}
| snithish/spark-testing-base | src/main/1.3-only/scala/com/holdenkarau/spark/testing/LocalSparkContext.scala | Scala | apache-2.0 | 1,826 |
package scoutagent.controller
import scoutagent._
import scoutagent.State._
trait Controller {
def copy: Controller
def setup(mapHeight: Int, mapWidth: Int): Unit
def selectAction(actions: List[String], state: AgentState): String
def shutDown(stateActionPairs: List[StateActionPair]): Unit
}
| KeithCissell/SCOUt | app/src/main/scala/agent/Controller.scala | Scala | mit | 307 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.deploy.StandaloneResourceUtils._
import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.plugin.PluginContainer
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.source.JVMCPUSource
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.resource.{ResourceID, ResourceInformation}
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.shuffle.ShuffleDataIOUtils
import org.apache.spark.shuffle.api.ShuffleDriverComponents
import org.apache.spark.status.{AppStatusSource, AppStatusStore}
import org.apache.spark.status.api.v1.ThreadStackTrace
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.util._
import org.apache.spark.util.logging.DriverLogger
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// The following constructors are required when Java code accesses SparkContext directly.
// Please see SI-4278
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _listenerBus: LiveListenerBus = _
private var _env: SparkEnv = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _driverLogger: Option[DriverLogger] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
private var _statusStore: AppStatusStore = _
private var _heartbeater: Heartbeater = _
private var _resources: scala.collection.immutable.Map[String, ResourceInformation] = _
private var _shuffleDriverComponents: ShuffleDriverComponents = _
private var _plugins: Option[PluginContainer] = None
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def resources: Map[String, ResourceInformation] = _resources
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE)
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
private def isClientStandalone: Boolean = {
val isSparkCluster = master match {
case SparkMasterRegex.SPARK_REGEX(_) => true
case SparkMasterRegex.LOCAL_CLUSTER_REGEX(_, _, _) => true
case _ => false
}
deployMode == "client" && isSparkCluster
}
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
private[spark] def statusStore: AppStatusStore = _statusStore
// An asynchronous listener bus for Spark events
private[spark] def listenerBus: LiveListenerBus = _listenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* in case of MESOS something like 'driver-20170926223339-0001'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
Utils.cloneProperties(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ROOT)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
_driverLogger = DriverLogger(_conf)
val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
val allResources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt)
_resources = {
// driver submitted in client mode under Standalone may have conflicting resources with
// other drivers/workers on this host. We should sync driver's resources info into
// SPARK_RESOURCES/SPARK_RESOURCES_COORDINATE_DIR/ to avoid collision.
if (isClientStandalone) {
acquireResources(_conf, SPARK_DRIVER_PREFIX, allResources, Utils.getProcessId)
} else {
allResources
}
}
logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing(DRIVER_PORT, 0)
_conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.get(EVENT_LOG_COMPRESS)
if (compress && isEventLogEnabled) {
Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName)
} else {
None
}
}
_listenerBus = new LiveListenerBus(_conf)
// Initialize the app status store and listener before SparkEnv is created so that it gets
// all events.
val appStatusSource = AppStatusSource.createSource(conf)
_statusStore = AppStatusStore.createLiveStore(conf, appStatusSource)
listenerBus.addToStatusQueue(_statusStore.listener.get)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this, _statusStore)
_progressBar =
if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.get(UI_ENABLED)) {
Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "",
startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Performance optimization: this dummy call to .size() triggers eager evaluation of
// Configuration's internal `properties` field, guaranteeing that it will be computed and
// cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create
// a new per-session Configuration. If `properties` has not been computed by that time
// then each newly-created Configuration will perform its own expensive IO and XML
// parsing to load configuration defaults and populate its own properties. By ensuring
// that we've pre-computed the parent's properties, the child Configuration will simply
// clone the parent's properties.
_hadoopConfiguration.size()
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(addJar)
}
if (files != null) {
files.foreach(addFile)
}
_executorMemory = _conf.getOption(EXECUTOR_MEMORY.key)
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
_shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
_shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
_conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
}
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Initialize any plugins before the task scheduler is initialized.
_plugins = PluginContainer(this)
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
} else {
None
}
// create and start the heartbeater for collecting memory metrics
_heartbeater = new Heartbeater(
() => SparkContext.this.reportHeartBeat(_executorMetricsSource),
"driver-heartbeater",
conf.get(EXECUTOR_HEARTBEAT_INTERVAL))
_heartbeater.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = _taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
if (_conf.get(UI_REVERSE_PROXY)) {
System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
_cleaner =
if (_conf.get(CLEANER_REFERENCE_TRACKING)) {
Some(new ContextCleaner(this, _shuffleDriverComponents))
} else {
None
}
_cleaner.foreach(_.start())
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
cleaner = cleaner))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// Post init
_taskScheduler.postStartHook()
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_env.metricsSystem.registerSource(new JVMCPUSource())
_executorMetricsSource.foreach(_.register(_env.metricsSystem))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
appStatusSource.foreach(_env.metricsSystem.registerSource(_))
_plugins.foreach(_.registerMetrics(applicationId))
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
try {
stop()
} catch {
case e: Throwable =>
logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e)
}
}
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties): Unit = {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String,
description: String, interruptOnCancel: Boolean = false): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty)
val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet
if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, nonEmptyRdds)
} else {
new UnionRDD(this, nonEmptyRdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Option(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
addFile(path, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
val uri = new Path(path).toUri
val schemeCorrectedURI = uri.getScheme match {
case null => new File(path).getCanonicalFile.toURI
case "local" =>
logWarning("File with 'local' scheme is not supported to add to file server, since " +
"it is already available on every node.")
return
case _ => uri
}
val hadoopPath = new Path(schemeCorrectedURI)
val scheme = schemeCorrectedURI.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else {
if (uri.getScheme == null) {
schemeCorrectedURI.toString
} else {
path
}
}
val timestamp = System.currentTimeMillis
if (addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
} else {
logWarning(s"The path $path has been added already. Overwriting of added paths " +
"is not supported in the current version.")
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.addToSharedQueue(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
Nil
}
}
/**
* Get the max number of tasks that can be concurrent launched currently.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @return The max number of tasks that can be concurrent launched currently.
*/
private[spark] def maxNumConcurrentTasks(): Int = schedulerBackend.maxNumConcurrentTasks()
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: scala.collection.immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestTotalExecutors(numExecutors, localityAwareTasks, hostToLocalTaskCount)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* This is not supported when dynamic allocation is turned on.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
require(executorAllocationManager.isEmpty,
"killExecutors() unsupported with Dynamic Allocation turned on")
b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the slave to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId))
rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0)
rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L)
rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L)
}
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]): Unit = {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
*
* If a jar is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
def addLocalJarFile(file: File): String = {
try {
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
env.rpcEnv.fileServer.addJar(file)
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
}
def checkRemoteJarFile(path: String): String = {
val hadoopPath = new Path(path)
val scheme = hadoopPath.toUri.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
try {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Jar ${path} not found")
}
if (fs.isDirectory(hadoopPath)) {
throw new IllegalArgumentException(
s"Directory ${path} is not allowed for addJar")
}
path
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
} else {
path
}
}
if (path == null || path.isEmpty) {
logWarning("null or empty path specified as parameter to addJar")
} else {
val key = if (path.contains("\\\\")) {
// For local paths with backslashes on Windows, URI throws an exception
addLocalJarFile(new File(path))
} else {
val uri = new Path(path).toUri
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
uri.getScheme match {
// A JAR file which exists only on the driver node
case null =>
// SPARK-22585 path without schema is not url encoded
addLocalJarFile(new File(uri.getPath))
// A JAR file which exists only on the driver node
case "file" => addLocalJarFile(new File(uri.getPath))
// A JAR file which exists locally on every worker node
case "local" => "file:" + uri.getPath
case _ => checkRemoteJarFile(path)
}
}
if (key != null) {
val timestamp = System.currentTimeMillis
if (addedJars.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added JAR $path at $key with timestamp $timestamp")
postEnvironmentUpdate()
} else {
logWarning(s"The jar $path has been added already. Overwriting of added jars " +
"is not supported in the current version.")
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop SparkContext within listener bus thread.")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
if (listenerBus != null) {
Utils.tryLogNonFatalError {
postApplicationEnd()
}
}
Utils.tryLogNonFatalError {
_driverLogger.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
Utils.tryLogNonFatalError {
_plugins.foreach(_.shutdown())
}
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_heartbeater != null) {
Utils.tryLogNonFatalError {
_heartbeater.stop()
}
_heartbeater = null
}
if (_shuffleDriverComponents != null) {
Utils.tryLogNonFatalError {
_shuffleDriverComponents.cleanupApplication()
}
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
if (isClientStandalone) {
releaseResources(_conf, SPARK_DRIVER_PREFIX, _resources, Utils.getProcessId)
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
if (_statusStore != null) {
_statusStore.close()
}
// Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this
// `SparkContext` is stopped.
localProperties.remove()
// Unset YARN mode system env variable, to allow switching between cluster types.
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String): Unit = {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite): Unit = {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite(): Unit = {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit): Unit = {
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit): Unit = {
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String): Unit = {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI
* or through SparkListener.onTaskStart.
*
* @param taskId the task ID to kill. This id uniquely identifies the task attempt.
* @param interruptThread whether to interrupt the thread running the task.
* @param reason the reason for killing the task, which should be a short string. If a task
* is killed multiple times with different reasons, only one reason will be reported.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(
taskId: Long,
interruptThread: Boolean = true,
reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = {
dagScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Clean a closure to make it ready to be serialized and sent to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String): Unit = {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
try {
conf.get(EXTRA_LISTENERS).foreach { classNames =>
val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf)
listeners.foreach { listener =>
listenerBus.addToSharedQueue(listener)
logInfo(s"Registered listener ${listener.getClass().getName()}")
}
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this, _env.metricsSystem)
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart(): Unit = {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}
/** Post the application end event */
private def postApplicationEnd(): Unit = {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate(): Unit = {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration,
schedulingMode, addedJarPaths, addedFilePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Reports heartbeat metrics for the driver. */
private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = {
val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager)
executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics))
val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics]
// In the driver, we do not track per-stage metrics, so use a dummy stage for the key
driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics))
val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0)
listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates,
driverUpdates))
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if another thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." +
s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}"
throw new SparkException(errMsg)
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext should be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config))
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext())
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T])
: ArrayWritable = {
def anyToWritable[U <: Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String): Int = {
numDriverCores(master, null)
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String, conf: SparkConf): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case "yarn" =>
if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") {
conf.getInt(DRIVER_CORES.key, 0)
} else {
0
}
case _ => 0 // Either driver is not being used, or its core count will be interpolated later
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String,
deployMode: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
// Ensure that executor's resources satisfies one or more tasks requirement.
def checkResourcesPerTask(clusterMode: Boolean, executorCores: Option[Int]): Unit = {
val taskCores = sc.conf.get(CPUS_PER_TASK)
val execCores = if (clusterMode) {
executorCores.getOrElse(sc.conf.get(EXECUTOR_CORES))
} else {
executorCores.get
}
// Number of cores per executor must meet at least one task requirement.
if (execCores < taskCores) {
throw new SparkException(s"The number of cores per executor (=$execCores) has to be >= " +
s"the task config: ${CPUS_PER_TASK.key} = $taskCores when run on $master.")
}
// Calculate the max slots each executor can provide based on resources available on each
// executor and resources required by each task.
val taskResourceRequirements = parseResourceRequirements(sc.conf, SPARK_TASK_PREFIX)
val executorResourcesAndAmounts =
parseAllResourceRequests(sc.conf, SPARK_EXECUTOR_PREFIX)
.map(request => (request.id.resourceName, request.amount)).toMap
var numSlots = execCores / taskCores
var limitingResourceName = "CPU"
taskResourceRequirements.foreach { taskReq =>
// Make sure the executor resources were specified through config.
val execAmount = executorResourcesAndAmounts.getOrElse(taskReq.resourceName,
throw new SparkException("The executor resource config: " +
ResourceID(SPARK_EXECUTOR_PREFIX, taskReq.resourceName).amountConf +
" needs to be specified since a task requirement config: " +
ResourceID(SPARK_TASK_PREFIX, taskReq.resourceName).amountConf +
" was specified")
)
// Make sure the executor resources are large enough to launch at least one task.
if (execAmount < taskReq.amount) {
throw new SparkException("The executor resource config: " +
ResourceID(SPARK_EXECUTOR_PREFIX, taskReq.resourceName).amountConf +
s" = $execAmount has to be >= the requested amount in task resource config: " +
ResourceID(SPARK_TASK_PREFIX, taskReq.resourceName).amountConf +
s" = ${taskReq.amount}")
}
// Compare and update the max slots each executor can provide.
// If the configured amount per task was < 1.0, a task is subdividing
// executor resources. If the amount per task was > 1.0, the task wants
// multiple executor resources.
val resourceNumSlots = Math.floor(execAmount * taskReq.numParts / taskReq.amount).toInt
if (resourceNumSlots < numSlots) {
numSlots = resourceNumSlots
limitingResourceName = taskReq.resourceName
}
}
// There have been checks above to make sure the executor resources were specified and are
// large enough if any task resources were specified.
taskResourceRequirements.foreach { taskReq =>
val execAmount = executorResourcesAndAmounts(taskReq.resourceName)
if ((numSlots * taskReq.amount / taskReq.numParts) < execAmount) {
val taskReqStr = if (taskReq.numParts > 1) {
s"${taskReq.amount}/${taskReq.numParts}"
} else {
s"${taskReq.amount}"
}
val resourceNumSlots = Math.floor(execAmount * taskReq.numParts/taskReq.amount).toInt
val message = s"The configuration of resource: ${taskReq.resourceName} " +
s"(exec = ${execAmount}, task = ${taskReqStr}, " +
s"runnable tasks = ${resourceNumSlots}) will " +
s"result in wasted resources due to resource ${limitingResourceName} limiting the " +
s"number of runnable tasks per executor to: ${numSlots}. Please adjust " +
s"your configuration."
if (Utils.isTesting) {
throw new SparkException(message)
} else {
logWarning(message)
}
}
}
}
master match {
case "local" =>
checkResourcesPerTask(clusterMode = false, Some(1))
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
checkResourcesPerTask(clusterMode = false, Some(threadCount))
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
checkResourcesPerTask(clusterMode = false, Some(threadCount))
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
checkResourcesPerTask(clusterMode = true, None)
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
checkResourcesPerTask(clusterMode = true, Some(coresPerSlave.toInt))
// Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang.
val memoryPerSlaveInt = memoryPerSlave.toInt
if (sc.executorMemory > memoryPerSlaveInt) {
throw new SparkException(
"Asked to launch cluster with %d MiB RAM / worker but requested %d MiB/worker".format(
memoryPerSlaveInt, sc.executorMemory))
}
// For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED
// to false because this mode is intended to be used for testing and in this case all the
// executors are running on the same host. So if host local reading was enabled here then
// testing of the remote fetching would be secondary as setting this config explicitly to
// false would be required in most of the unit test (despite the fact that remote fetching
// is much more frequent in production).
sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false)
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = new LocalSparkCluster(
numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
checkResourcesPerTask(clusterMode = true, None)
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
// The following implicit declarations have been added on top of the very similar ones
// below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta
// expansion of zero-arg methods and thus won't match a no-arg method where it expects
// an implicit that is a function of no args.
implicit val intWritableConverterFn: () => WritableConverter[Int] =
() => simpleWritableConverter[Int, IntWritable](_.get)
implicit val longWritableConverterFn: () => WritableConverter[Long] =
() => simpleWritableConverter[Long, LongWritable](_.get)
implicit val doubleWritableConverterFn: () => WritableConverter[Double] =
() => simpleWritableConverter[Double, DoubleWritable](_.get)
implicit val floatWritableConverterFn: () => WritableConverter[Float] =
() => simpleWritableConverter[Float, FloatWritable](_.get)
implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] =
() => simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit val stringWritableConverterFn: () => WritableConverter[String] =
() => simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] =
() => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
// These implicits remain included for backwards-compatibility. They fulfill the
// same role as those above.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
| jkbradley/spark | core/src/main/scala/org/apache/spark/SparkContext.scala | Scala | apache-2.0 | 126,907 |
package org.openurp.edu.eams.system.web.action
import org.beangle.commons.collection.Order
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.commons.lang.Strings
import org.beangle.security.blueprint.User
import org.beangle.security.blueprint.model.UserBean
import org.beangle.struts2.helper.QueryHelper
import org.openurp.edu.base.Student
import org.openurp.edu.eams.core.service.StudentService
import org.openurp.edu.eams.system.security.EamsUserService
import org.openurp.edu.eams.util.DataRealmUtils
import org.openurp.edu.eams.web.action.common.RestrictionSupportAction
class StudentUserAction extends RestrictionSupportAction {
var eamsUserService: EamsUserService = _
var studentService: StudentService = _
def search(): String = {
val entityQuery = OqlBuilder.from(classOf[Student], "student")
QueryHelper.populateConditions(entityQuery, "student.type.id")
entityQuery.join("left", "student.major", "major")
entityQuery.limit(getPageLimit)
entityQuery.orderBy(Order.parse(get("orderBy")))
val stdTypeId = getLong("student.stdType.id")
DataRealmUtils.addDataRealms(entityQuery, Array("student.type.id", "student.department.id"), restrictionHelper.getDataRealmsWith(stdTypeId))
val stds = entityDao.search(entityQuery)
val stdUserMap = new HashMap()
var iter = stds.iterator()
while (iter.hasNext) {
val std = iter.next().asInstanceOf[Student]
val user = eamsUserService.get(std.getCode)
if (null != user) {
stdUserMap.put(std.id.toString, user)
}
}
put("stds", stds)
put("stdUserMap", stdUserMap)
forward()
}
def activate(): String = {
val stdCodes = get("stdCodes")
val activate = getBoolean("isActivate")
val isActivate = if ((activate == null)) false else activate.booleanValue()
try {
val users = entityDao.get(classOf[User], "name", Strings.split(stdCodes, ","))
var iter = users.iterator()
while (iter.hasNext) {
val user = iter.next().asInstanceOf[User]
user.setEnabled(if (isActivate) true else false)
}
entityDao.saveOrUpdate(users)
} catch {
case e: Exception => {
logHelper.info("Failure in alert status stdUser nos:" + stdCodes, e)
return forwardError("error.occurred")
}
}
var msg = "info.activate.success"
if (!isActivate) msg = "info.unactivate.success"
redirect("search", msg)
}
def index(): String = {
put("departmentList", getColleges)
put("stdTypeList", getStdTypes)
forward()
}
def info(): String = {
val userId = getLong("userId")
var user: User = null
if (null != userId && userId.intValue() != 0) user = entityDao.get(classOf[User], userId).asInstanceOf[User] else {
return forwardError("error.model.notExist")
}
put("user", user)
forward()
}
def edit(): String = {
val stdCode = get("stdCode")
var stdUser: User = null
var std: Student = null
if (Strings.isNotEmpty(stdCode)) {
std = studentService.getStudent(stdCode)
if (null == std) return forwardError(Array("entity.student", "error.model.notExsits"))
stdUser = eamsUserService.get(stdCode)
if (null == stdUser) {
val curUser = entityDao.get(classOf[User], getUserId)
stdUser = eamsUserService.createStdUser(curUser, std)
}
}
put("user", stdUser)
forward()
}
def save(): String = {
val userId = getLong("user.id")
val savedUser = eamsUserService.get(userId).asInstanceOf[UserBean]
if (null == savedUser) return forwardError(Array("entity.student", "error.model.notExsits"))
savedUser.setMail(get("user.email"))
savedUser.setPassword(get("user.password"))
try {
logHelper.info("Update stdUser acount:" + savedUser.getName)
eamsUserService.saveOrUpdate(savedUser)
} catch {
case e: Exception => {
logHelper.info("Failure in Update stdUser :" + savedUser.getName)
redirect("search", "info.save.failure")
}
}
redirect("search", "info.save.success")
}
def add(): String = {
val stdCodeSeq = get("stdCodes")
if (Strings.isEmpty(stdCodeSeq)) return forwardError(Array("entity.student", "error.model.id.needed"))
val stdCodes = Strings.split(stdCodeSeq, ",")
val curUser = entityDao.get(classOf[User], getUserId)
try {
logHelper.info("Add count for std Nos:" + stdCodeSeq)
val stds = entityDao.get(classOf[Student], "code", stdCodes)
var it = stds.iterator()
while (it.hasNext) {
val one = it.next().asInstanceOf[Student]
eamsUserService.createStdUser(curUser, one)
}
} catch {
case e: Exception => {
logHelper.info("Failure Add count for std Nos:" + stdCodes, e)
return forwardError("error.occurred")
}
}
redirect("search", "info.add.success")
}
def promptToManager(): String = {
val names = Strings.split(get("stdCodes"), ",")
val manager = entityDao.get(classOf[User], getUserId)
for (i <- 0 until names.length) {
var one = eamsUserService.get(names(i))
if (null == one) {
logHelper.info("Add teacher acount for:" + names(i))
val std = studentService.getStudent(names(i))
one = eamsUserService.createStdUser(manager, std)
}
eamsUserService.saveOrUpdate(one)
}
eamsUserService.saveOrUpdate(manager.asInstanceOf[User])
redirect("search", "info.update.success")
}
}
| openurp/edu-eams-webapp | web/src/main/scala/org/openurp/edu/eams/system/web/action/StudentUserAction.scala | Scala | gpl-3.0 | 5,482 |
package scodec
import shapeless._
import shapeless.ops.hlist._
/** Operations on `HList`s that are not provided by Shapeless. */
object HListOps {
/**
* Computes the inverse of `(l: L).filterNot[Unit]` -- i.e., inserts unit values wherever the unit type
* appears in `L`.
* @tparam K HList type containing no `Unit` types
* @tparam L equivalent to `K` with `Unit` types added in arbitrary positions
* @param k list to insert unit values in to
* @return new list with unit values inserted
*/
def reUnit[K <: HList, L <: HList](k: K)(implicit ru: ReUnit[K, L]): L = ru(k)
/** Provides the `reUnit` method on an `HList`. */
implicit class ReUnitSyntax[K <: HList](val k: K) extends AnyVal {
def reUnit[L <: HList](implicit ru: ReUnit[K, L]): L = ru(k)
}
/** Typeclass that allows computation of the inverse of calling `filterNot[Unit]` on a `L`. */
sealed trait ReUnit[K <: HList, L <: HList] {
def apply(l: K): L
}
object ReUnit {
implicit lazy val base: ReUnit[HNil, HNil] = new ReUnit[HNil, HNil] {
def apply(l: HNil): HNil = HNil
}
implicit def `non-empty K and L where head of K and L are same type`[H, KT <: HList, LT <: HList](implicit
reUnit: ReUnit[KT, LT],
notUnit: H =:!= Unit
): ReUnit[H :: KT, H :: LT] = new ReUnit[H :: KT, H :: LT] {
def apply(k: H :: KT): H :: LT =
k.head :: reUnit(k.tail)
}
implicit def `non-empty K and any L where head of L is Unit`[K <: HList, LT <: HList](implicit
reUnit: ReUnit[K, LT]
): ReUnit[K, Unit :: LT] = new ReUnit[K, Unit :: LT] {
def apply(k: K): Unit :: LT =
() :: reUnit(k)
}
}
}
| danielwegener/scodec | src/main/scala/scodec/HListOps.scala | Scala | bsd-3-clause | 1,673 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import org.apache.mxnet.init.Base.{CPtrAddress, RefInt, RefLong, RefString, _LIB}
import org.apache.mxnet.utils.CToScalaUtils
import scala.collection.mutable.ListBuffer
import scala.reflect.macros.blackbox
private[mxnet] abstract class GeneratorBase {
case class Arg(argName: String, argType: String, argDesc: String, isOptional: Boolean) {
/**
* Filter the arg name with the Scala keyword that are not allow to use as arg name,
* such as var and type listed in here. This is due to the diff between C and Scala
* @return argname that works in Scala
*/
def safeArgName: String = argName match {
case "var" => "vari"
case "type" => "typeOf"
case _ => argName
}
}
case class Func(name: String, desc: String, listOfArgs: List[Arg], returnType: String)
/**
* Non Type-safe function generation method
* This method will filter all "_" functions
* @param isSymbol Check if generate the Symbol method
* @param isContrib Check if generate the contrib method
* @param isJava Check if generate Corresponding Java method
* @return List of functions
*/
def functionsToGenerate(isSymbol: Boolean, isContrib: Boolean,
isJava: Boolean = false): List[Func] = {
val l = getBackEndFunctions(isSymbol, isJava)
if (isContrib) {
l.filter(func => func.name.startsWith("_contrib_") || !func.name.startsWith("_"))
} else {
l.filterNot(_.name.startsWith("_"))
}
}
/**
* Filter the operators to generate in the type-safe Symbol.api and NDArray.api
* @param isSymbol Check if generate the Symbol method
* @param isContrib Check if generate the contrib method
* @return List of functions
*/
protected def typeSafeFunctionsToGenerate(isSymbol: Boolean, isContrib: Boolean): List[Func] = {
// Operators that should not be generated
val notGenerated = Set("Custom")
val l = getBackEndFunctions(isSymbol)
val res = if (isContrib) {
l.filter(func => func.name.startsWith("_contrib_") || !func.name.startsWith("_"))
} else {
l.filterNot(_.name.startsWith("_"))
}
res.filterNot(ele => notGenerated.contains(ele.name))
}
/**
* Extract and format the functions obtained from C API
* @param isSymbol Check if generate for Symbol
* @param isJava Check if extracting in Java format
* @return List of functions
*/
protected def getBackEndFunctions(isSymbol: Boolean, isJava: Boolean = false): List[Func] = {
val opNames = ListBuffer.empty[String]
_LIB.mxListAllOpNames(opNames)
opNames.map(opName => {
val opHandle = new RefLong
_LIB.nnGetOpHandle(opName, opHandle)
makeAtomicFunction(opHandle.value, opName, isSymbol, isJava)
}).toList
}
private def makeAtomicFunction(handle: CPtrAddress, aliasName: String,
isSymbol: Boolean, isJava: Boolean): Func = {
val name = new RefString
val desc = new RefString
val keyVarNumArgs = new RefString
val numArgs = new RefInt
val argNames = ListBuffer.empty[String]
val argTypes = ListBuffer.empty[String]
val argDescs = ListBuffer.empty[String]
_LIB.mxSymbolGetAtomicSymbolInfo(
handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs)
val extraDoc: String = if (keyVarNumArgs.value != null && keyVarNumArgs.value.length > 0) {
s"This function support variable length of positional input (${keyVarNumArgs.value})."
} else {
""
}
val argList = argNames zip argTypes zip argDescs map { case ((argName, argType), argDesc) =>
val family = if (isJava) "org.apache.mxnet.javaapi.NDArray"
else if (isSymbol) "org.apache.mxnet.Symbol"
else "org.apache.mxnet.NDArray"
val typeAndOption =
CToScalaUtils.argumentCleaner(argName, argType, family, isJava)
Arg(argName, typeAndOption._1, argDesc, typeAndOption._2)
}
val returnType =
if (isJava) "Array[org.apache.mxnet.javaapi.NDArray]"
else if (isSymbol) "org.apache.mxnet.Symbol"
else "org.apache.mxnet.NDArrayFuncReturn"
Func(aliasName, desc.value, argList.toList, returnType)
}
/**
* Generate class structure for all function APIs
*
* @param c Context used for generation
* @param funcDef DefDef type of function definitions
* @param annottees Annottees used to define Class or Module
* @return Expr used for code generation
*/
protected def structGeneration(c: blackbox.Context)
(funcDef: List[c.universe.DefDef], annottees: c.Expr[Any]*)
: c.Expr[Nothing] = {
import c.universe._
val inputs = annottees.map(_.tree).toList
// pattern match on the inputs
val modDefs = inputs map {
case ClassDef(mods, name, something, template) =>
val q = template match {
case Template(superMaybe, emptyValDef, defs) =>
Template(superMaybe, emptyValDef, defs ++ funcDef)
case ex =>
throw new IllegalArgumentException(s"Invalid template: $ex")
}
ClassDef(mods, name, something, q)
case ModuleDef(mods, name, template) =>
val q = template match {
case Template(superMaybe, emptyValDef, defs) =>
Template(superMaybe, emptyValDef, defs ++ funcDef)
case ex =>
throw new IllegalArgumentException(s"Invalid template: $ex")
}
ModuleDef(mods, name, q)
case ex =>
throw new IllegalArgumentException(s"Invalid macro input: $ex")
}
// wrap the result up in an Expr, and return it
val result = c.Expr(Block(modDefs, Literal(Constant(()))))
result
}
/**
* Build function argument definition, with optionality, and safe names
* @param func Functions
* @return List of string representing the functions interface
*/
protected def typedFunctionCommonArgDef(func: Func): List[String] = {
func.listOfArgs.map(arg =>
if (arg.isOptional) {
// let's avoid a stupid Option[Array[...]]
if (arg.argType.startsWith("Array[")) {
s"${arg.safeArgName} : ${arg.argType} = Array.empty"
} else {
s"${arg.safeArgName} : Option[${arg.argType}] = None"
}
}
else {
s"${arg.safeArgName} : ${arg.argType}"
}
)
}
}
// a mixin to ease generating the Random module
private[mxnet] trait RandomHelpers {
self: GeneratorBase =>
/**
* A generic type spec used in Symbol.random and NDArray.random modules
* @param isSymbol Check if generate for Symbol
* @param fullPackageSpec Check if leave the full name of the classTag
* @return A formatted string for random Symbol/NDArray
*/
protected def randomGenericTypeSpec(isSymbol: Boolean, fullPackageSpec: Boolean): String = {
val classTag = if (fullPackageSpec) "scala.reflect.ClassTag" else "ClassTag"
if (isSymbol) s"[T: SymbolOrScalar : $classTag]"
else s"[T: NDArrayOrScalar : $classTag]"
}
/**
* Filter the operators to generate in the type-safe Symbol.random and NDArray.random
* @param isSymbol Check if generate Symbol functions
* @return List of functions
*/
protected def typeSafeRandomFunctionsToGenerate(isSymbol: Boolean): List[Func] = {
getBackEndFunctions(isSymbol)
.filter(f => f.name.startsWith("_sample_") || f.name.startsWith("_random_"))
.map(f => f.copy(name = f.name.stripPrefix("_")))
// unify _random and _sample
.map(f => unifyRandom(f, isSymbol))
// deduplicate
.groupBy(_.name)
.mapValues(_.head)
.values
.toList
}
// unify call targets (random_xyz and sample_xyz) and unify their argument types
private def unifyRandom(func: Func, isSymbol: Boolean): Func = {
var typeConv = Set("org.apache.mxnet.NDArray", "org.apache.mxnet.Symbol",
"Float", "Int")
func.copy(
name = func.name.replaceAll("(random|sample)_", ""),
listOfArgs = func.listOfArgs
.map(hackNormalFunc)
.map(arg =>
if (typeConv(arg.argType)) arg.copy(argType = "T")
else arg
)
// TODO: some functions are non consistent in random_ vs sample_ regarding optionality
// we may try to unify that as well here.
)
}
/**
* Hacks to manage the fact that random_normal and sample_normal have
* non-consistent parameter naming in the back-end
* this first one, merge loc/scale and mu/sigma
* @param arg Argument need to modify
* @return Arg case class with clean arg names
*/
protected def hackNormalFunc(arg: Arg): Arg = {
if (arg.argName == "loc") arg.copy(argName = "mu")
else if (arg.argName == "scale") arg.copy(argName = "sigma")
else arg
}
/**
* This second one reverts this merge prior to back-end call
* @param func Function case class
* @return A string contains the implementation of random args
*/
protected def unhackNormalFunc(func: Func): String = {
if (func.name.equals("normal")) {
s"""if(target.equals("random_normal")) {
| if(map.contains("mu")) { map("loc") = map("mu"); map.remove("mu") }
| if(map.contains("sigma")) { map("scale") = map("sigma"); map.remove("sigma") }
|}
""".stripMargin
} else {
""
}
}
}
| eric-haibin-lin/mxnet | scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala | Scala | apache-2.0 | 10,136 |
package suzaku.ui.style
import suzaku.ui.Keywords
import scala.language.implicitConversions
sealed trait LengthDimension
sealed trait LengthUnit extends LengthDimension {
def +(b: LengthUnit) = LengthAdd(this, b)
def -(b: LengthUnit) = LengthSub(this, b)
def *(b: Double) = LengthMul(this, b)
def /(b: Double) = LengthDiv(this, b)
}
case class LengthU(value: Double) extends LengthDimension
case class LengthPx(value: Double) extends LengthUnit
case class LengthPct(value: Double) extends LengthUnit
case class LengthEm(value: Double) extends LengthUnit
case class LengthRem(value: Double) extends LengthUnit
case class LengthVw(value: Double) extends LengthUnit
case class LengthVh(value: Double) extends LengthUnit
case class LengthFr(value: Int) extends LengthUnit
case object LengthAuto extends LengthUnit
case class LengthAdd(a: LengthUnit, b: LengthUnit) extends LengthUnit
case class LengthSub(a: LengthUnit, b: LengthUnit) extends LengthUnit
case class LengthMul(a: LengthUnit, b: Double) extends LengthUnit
case class LengthDiv(a: LengthUnit, b: Double) extends LengthUnit
object LengthDimension {
import boopickle.Default._
implicit val lengthPickler = compositePickler[LengthUnit]
lengthPickler
.addTransform[LengthPx, Double](_.value, LengthPx)
.addTransform[LengthPct, Double](_.value, LengthPct)
.addTransform[LengthEm, Double](_.value, LengthEm)
.addTransform[LengthRem, Double](_.value, LengthRem)
.addTransform[LengthVw, Double](_.value, LengthVw)
.addTransform[LengthVh, Double](_.value, LengthVh)
.addTransform[LengthFr, Int](_.value, LengthFr)
.addConcreteType[LengthAdd]
.addConcreteType[LengthSub]
.addConcreteType[LengthMul]
.addConcreteType[LengthDiv]
.addConcreteType[LengthAuto.type]
implicit val lengthDimensionPickler = compositePickler[LengthDimension]
.join(lengthPickler)
.addTransform[LengthU, Double](_.value, LengthU)
}
sealed trait WidthDimension
case object WidthThin extends WidthDimension
case object WidthMedium extends WidthDimension
case object WidthThick extends WidthDimension
case class WidthLength(w: LengthUnit) extends WidthDimension
object WidthDimension {
import boopickle.Default._
import LengthDimension._
implicit val widthPickler = compositePickler[WidthDimension]
.addConcreteType[WidthThin.type]
.addConcreteType[WidthMedium.type]
.addConcreteType[WidthThick.type]
.addConcreteType[WidthLength]
}
sealed trait FontDimension
case object FontXXSmall extends FontDimension
case object FontXSmall extends FontDimension
case object FontSmall extends FontDimension
case object FontSmaller extends FontDimension
case object FontMedium extends FontDimension
case object FontLarge extends FontDimension
case object FontLarger extends FontDimension
case object FontXLarge extends FontDimension
case object FontXXLarge extends FontDimension
case class FontLength(size: LengthDimension) extends FontDimension
object FontDimension {
import boopickle.Default._
import LengthDimension._
implicit val fontPickler = compositePickler[FontDimension]
.addConcreteType[FontXXSmall.type]
.addConcreteType[FontXSmall.type]
.addConcreteType[FontSmall.type]
.addConcreteType[FontSmaller.type]
.addConcreteType[FontMedium.type]
.addConcreteType[FontLarge.type]
.addConcreteType[FontLarger.type]
.addConcreteType[FontXLarge.type]
.addConcreteType[FontXXLarge.type]
.addConcreteType[FontLength]
}
sealed trait WeightDimension
case object WeightNormal extends WeightDimension
case object WeightBold extends WeightDimension
case object WeightBolder extends WeightDimension
case object WeightLighter extends WeightDimension
case class WeightValue(weight: Int) extends WeightDimension
object WeightDimension {
import boopickle.Default._
implicit val weightPickler = compositePickler[WeightDimension]
.addConcreteType[WeightNormal.type]
.addConcreteType[WeightBold.type]
.addConcreteType[WeightBolder.type]
.addConcreteType[WeightLighter.type]
.addConcreteType[WeightValue]
}
trait LengthImplicits {
implicit class int2length(v: Int) {
def px = LengthPx(v)
def %% = LengthPct(v)
def em = LengthEm(v)
def rem = LengthRem(v)
def vw = LengthVw(v)
def vh = LengthVh(v)
def fr = LengthFr(v)
}
implicit class double2length(v: Double) {
def px = LengthPx(v)
def %% = LengthPct(v)
def em = LengthEm(v)
def rem = LengthRem(v)
def vw = LengthVw(v)
def vh = LengthVh(v)
}
implicit def int2u(v: Int): LengthU = LengthU(v)
implicit def autoLength(a: Keywords.auto.type): LengthUnit = LengthAuto
implicit def thin2width(a: Keywords.thin.type): WidthDimension = WidthThin
implicit def medium2width(a: Keywords.medium.type): WidthDimension = WidthMedium
implicit def thick2width(a: Keywords.thick.type): WidthDimension = WidthThick
implicit def length2width(l: LengthUnit): WidthDimension = WidthLength(l)
implicit def length2font(l: LengthUnit): FontDimension = FontLength(l)
implicit def xxsmall2width(a: Keywords.xxsmall.type): FontDimension = FontXXSmall
implicit def xsmall2width(a: Keywords.xsmall.type): FontDimension = FontXSmall
implicit def small2width(a: Keywords.small.type): FontDimension = FontSmall
implicit def smaller2width(a: Keywords.smaller.type): FontDimension = FontSmaller
implicit def xxlarge2width(a: Keywords.xxlarge.type): FontDimension = FontXXLarge
implicit def xlarge2width(a: Keywords.xlarge.type): FontDimension = FontXLarge
implicit def large2width(a: Keywords.large.type): FontDimension = FontLarge
implicit def larger2width(a: Keywords.larger.type): FontDimension = FontLarger
implicit def medium2fontwidth(a: Keywords.medium.type): FontDimension = FontMedium
implicit def int2weight(w: Int): WeightDimension = WeightValue(w)
}
| suzaku-io/suzaku | core-shared/shared/src/main/scala/suzaku/ui/style/LengthDimension.scala | Scala | apache-2.0 | 6,406 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.