code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
|---|---|---|---|---|---|
package com.pauldoo.euler.puzzle
import com.pauldoo.euler.common.Primes
import com.pauldoo.euler.common.LowestCommonMultiple.lowestCommonMultiple
import java.math.BigInteger
object Puzzle5 extends Puzzle {
def answer(): BigInt = {
lowestCommonMultiple(Range(1, 20).map{n => new BigInt(BigInteger.valueOf(n))});
}
}
|
pauldoo/projecteuler
|
src/com/pauldoo/euler/puzzle/Puzzle5.scala
|
Scala
|
isc
| 323
|
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.parquet.io
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.parquet.schema.Type.Repetition
import org.apache.parquet.schema.{MessageType, OriginalType, Type, Types}
import org.locationtech.geomesa.utils.geotools.ObjectType
import org.locationtech.geomesa.utils.geotools.ObjectType.ObjectType
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.SimpleFeatureType
/**
* Original parquet mapping - not versioned. Only supports points
*
* Of note, the FID field was marked as REPEATED, which seems to be an error and does not work with reading
* parquet files as avro GenericRecords (which is the main way to read an unknown parquet file)
*/
object SimpleFeatureParquetSchemaV0 {
import scala.collection.JavaConverters._
def apply(sft: SimpleFeatureType): MessageType = {
val idField =
Types.primitive(PrimitiveTypeName.BINARY, Repetition.REPEATED)
.as(OriginalType.UTF8)
.named(SimpleFeatureParquetSchema.FeatureIdField)
// NOTE: idField goes at the end of the record
val fields = sft.getAttributeDescriptors.asScala.map(convertField) :+ idField
new MessageType(sft.getTypeName, fields.asJava)
}
private def convertField(ad: AttributeDescriptor): Type = {
val bindings = ObjectType.selectType(ad)
val builder = bindings.head match {
case ObjectType.GEOMETRY =>
Types.buildGroup(Repetition.REQUIRED)
.primitive(PrimitiveTypeName.DOUBLE, Repetition.REQUIRED).named(SimpleFeatureParquetSchema.GeometryColumnX)
.primitive(PrimitiveTypeName.DOUBLE, Repetition.REQUIRED).named(SimpleFeatureParquetSchema.GeometryColumnY)
case ObjectType.DATE => Types.primitive(PrimitiveTypeName.INT64, Repetition.OPTIONAL)
case ObjectType.STRING => Types.primitive(PrimitiveTypeName.BINARY, Repetition.OPTIONAL).as(OriginalType.UTF8)
case ObjectType.INT => Types.primitive(PrimitiveTypeName.INT32, Repetition.OPTIONAL)
case ObjectType.DOUBLE => Types.primitive(PrimitiveTypeName.DOUBLE, Repetition.OPTIONAL)
case ObjectType.LONG => Types.primitive(PrimitiveTypeName.INT64, Repetition.OPTIONAL)
case ObjectType.FLOAT => Types.primitive(PrimitiveTypeName.FLOAT, Repetition.OPTIONAL)
case ObjectType.BOOLEAN => Types.primitive(PrimitiveTypeName.BOOLEAN, Repetition.OPTIONAL)
case ObjectType.BYTES => Types.primitive(PrimitiveTypeName.BINARY, Repetition.OPTIONAL)
case ObjectType.UUID => Types.primitive(PrimitiveTypeName.BINARY, Repetition.OPTIONAL)
case ObjectType.LIST => Types.optionalList().optionalElement(matchType(bindings(1)))
case ObjectType.MAP => Types.optionalMap().key(matchType(bindings(1))).optionalValue(matchType(bindings(2)))
}
builder.named(ad.getLocalName)
}
private def matchType(objType: ObjectType): PrimitiveTypeName = {
objType match {
case ObjectType.DATE => PrimitiveTypeName.INT64
case ObjectType.STRING => PrimitiveTypeName.BINARY
case ObjectType.INT => PrimitiveTypeName.INT32
case ObjectType.DOUBLE => PrimitiveTypeName.DOUBLE
case ObjectType.LONG => PrimitiveTypeName.INT64
case ObjectType.FLOAT => PrimitiveTypeName.FLOAT
case ObjectType.BOOLEAN => PrimitiveTypeName.BOOLEAN
case ObjectType.BYTES => PrimitiveTypeName.BINARY
case ObjectType.UUID => PrimitiveTypeName.BINARY
}
}
}
|
locationtech/geomesa
|
geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-parquet/src/main/scala/org/locationtech/geomesa/parquet/io/SimpleFeatureParquetSchemaV0.scala
|
Scala
|
apache-2.0
| 3,954
|
package pigpio
import org.scalatest.Tag
package object scaladsl {
/**
* a pitest will only be run once deployed to a pi
*/
val pitest = Tag("pigpio.scaladsl.PiIntegrationTest")
}
|
jw3/pigpio-scala
|
src/test/scala/pigpio/scaladsl/package.scala
|
Scala
|
apache-2.0
| 192
|
package com.twitter.finagle.stats
/**
* The default behavior for formatting, align with Commons Metrics.
*/
private[twitter] object HistogramFormatter {
def labelPercentile(p: Double): String = {
// this has a strange quirk that p999 gets formatted as p9990
// Round for precision issues; e.g. 0.9998999... converts to "p9998" with a direct int cast.
val gname: String = "p" + (p * 10000).round
if (3 < gname.length && ("00" == gname.substring(3))) {
gname.substring(0, 3)
} else {
gname
}
}
val labelMin: String = "min"
val labelMax: String = "max"
val labelAverage: String = "avg"
val labelCount: String = "count"
val labelSum: String = "sum"
}
|
twitter/util
|
util-stats/src/main/scala/com/twitter/finagle/stats/HistogramFormatter.scala
|
Scala
|
apache-2.0
| 705
|
package jk_5.nailed.ipc.codec
import io.netty.handler.codec.MessageToMessageEncoder
import io.netty.handler.codec.http.websocketx.TextWebSocketFrame
import io.netty.channel.ChannelHandlerContext
import com.nexus.data.json.JsonObject
import java.util
/**
* No description given
*
* @author jk-5
*/
class JsonObjectEncoder extends MessageToMessageEncoder[JsonObject] {
override def encode(ctx: ChannelHandlerContext, data: JsonObject, out: util.List[AnyRef]){
out.add(new TextWebSocketFrame(data.stringify))
}
}
|
nailed/nailed-legacy
|
src/main/scala/jk_5/nailed/ipc/codec/JsonObjectEncoder.scala
|
Scala
|
unlicense
| 525
|
package jp.co.bizreach.play2stub
import java.net.URL
import jp.co.bizreach.play2stub.RoutesCompiler.Route
import org.apache.commons.io.FileUtils
import play.api.Play._
import play.api._
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
/**
*
*/
class StubPlugin(app: Application) extends Plugin {
private val logger = Logger("jp.co.bizreach.play2stub.StubPlugin")
private val basePath = "play2stub"
val engineConf = app.configuration.getString(basePath + ".engine").getOrElse("hbs")
val dataRootConf = app.configuration.getString(basePath + ".data-root").getOrElse("/app/data")
val viewRootConf = app.configuration.getString(basePath + ".view-root").getOrElse("/app/views")
val proxyRootConf = app.configuration.getString(basePath + ".proxy-root")
val enableProxyConf = app.configuration.getBoolean(basePath + ".enable-proxy")
val beforePluginList = app.configuration.getStringSeq(basePath + ".filters.before").getOrElse(Seq.empty)
val afterPluginList = app.configuration.getStringSeq(basePath + ".filters.after")
val rendererList = app.configuration.getStringSeq(basePath + ".renderers")
val processorList = app.configuration.getStringSeq(basePath + ".processors")
val paramBuilderList = app.configuration.getStringSeq(basePath + ".param-builders")
val templateResolverConf = app.configuration.getString(basePath + ".template-resolver")
val loadClassPathConf = app.configuration.getBoolean(basePath + ".loadClassPath").getOrElse(Play.isProd(current))
val parameterSymbol = app.configuration.getString(basePath + ".syntax.parameter").getOrElse("~")
val wildcardSymbol = app.configuration.getString(basePath + ".syntax.wildcard").getOrElse("~~")
private def defaultRenderers =
Seq(new HandlebarsRenderer)
private def defaultProcessors =
Seq(new ProxyProcessor, new TemplateProcessor, new StaticHtmlProcessor, new JsonProcessor)
private def defaultParamBuilders =
Seq(new PathAndQueryStringParamBuilder)
private def defaultAfterFilters =
Seq(new RedirectFilter)
trait RouteHolder {
val routes: Seq[StubRouteConfig]
val engine: String = engineConf
val dataRoot: String = dataRootConf
val viewRoot: String = viewRootConf
val proxyRoot: Option[String] =proxyRootConf
val isProxyEnabled: Boolean = enableProxyConf.getOrElse(false)
val beforeFilters: Seq[BeforeFilter] = loadFilters[BeforeFilter](beforePluginList)
val afterFilters: Seq[AfterFilter] = afterPluginList.map(loadFilters[AfterFilter]).getOrElse(defaultAfterFilters)
val renderers: Seq[Renderer] = rendererList.map(loadFilters[Renderer]).getOrElse(defaultRenderers)
val processors: Seq[Processor] = processorList.map(loadFilters[Processor]).getOrElse(defaultProcessors)
val paramBuilders: Seq[ParamBuilder] = paramBuilderList.map(loadFilters[ParamBuilder]).getOrElse(defaultParamBuilders)
val templateResolver: TemplateResolver = loadTemplateResolver(templateResolverConf)
val fileLoader: FileLoader = new FileLoader(dataRootConf, viewRootConf, loadClassPathConf)
}
/**
* Holds stub configuration values
*/
lazy val holder = new RouteHolder {
private val routeList =
current.configuration.getConfigList(basePath + ".routes")
.map(_.asScala).getOrElse(Seq.empty)
override val routes = routeList.map{ route =>
val path = route.subKeys.mkString
route.getConfig(path).map { inner =>
StubRouteConfig(
route = parseRoute(inner.getString("path").getOrElse(path).replace(wildcardSymbol, "*").replace(parameterSymbol, ":")),
template = toTemplate(inner),
proxy = inner.getString("proxy"),
redirect = inner.getString("redirect"),
data = inner.getString("data"),
status = inner.getInt("status"),
noResponse = inner.getBoolean("noResponse").getOrElse(false),
headers = toMap(inner.getConfig("headers")),
params = toMap(inner.getConfig("params"))
)
}.get
}
}
/**
* Instantiate stub configuration holder on starting up
*/
override def onStart(): Unit = {
logger.debug("Initializing Play2Stub ...")
holder
logger.debug("Play2Stub is initialized !")
}
/**
*
*/
private def loadFilters[T](filters: Seq[String])(implicit ct: ClassTag[T]): Seq[T] =
filters.map(f => app.classloader.loadClass(f).newInstance().asInstanceOf[T])
/**
*
*/
private def loadTemplateResolver(conf: Option[String]): TemplateResolver =
conf.map(app.classloader.loadClass(_).newInstance()
.asInstanceOf[TemplateResolver]).getOrElse(new DefaultTemplateResolver)
/**
*
*/
private def parseRoute(path: String): Route =
RoutesCompiler.parse(path) match {
case Right(r: Route) => r
case Right(unexpected) =>
throw new RuntimeException(unexpected.toString)
case Left(err) =>
throw new RuntimeException(err)
}
/**
*
*/
private def toTemplate(c: Configuration): Option[Template] =
if (c.subKeys.contains("template")) {
val path =
if (c.keys.contains("template.path"))
c.getString("template.path").get
else
c.getString("template").get
val engine =
if (c.keys.contains("template.engine"))
c.getString("template.engine").get
else
engineConf
Some(Template(path, engine))
} else
None
/**
*
*/
private def toMap(conf: Option[Configuration]): Map[String, String] =
conf.map(_.entrySet
.map(e => e._1 -> e._2.render()))
.getOrElse(Map.empty).toMap
}
class FileLoader(
dataRoot: String, viewRoot: String, loadClassPath: Boolean) {
def load(pathWithExt: String, isData: Boolean = false): Option[URL] =
if (loadClassPath)
loadByClassPath(pathWithExt, isData)
else
loadByFilePath(pathWithExt, isData)
def loadByClassPath(pathWithExt: String, isData: Boolean): Option[URL] =
Option(getClass.getResource(concat(rootDir(isData), pathWithExt)))
def loadByFilePath(pathWithExt: String, isData: Boolean): Option[URL] = {
val file = FileUtils.getFile(
System.getProperty("user.dir"), rootDir(isData), pathWithExt)
if (file.exists())
Some(file.toURI.toURL)
else
None
}
def rootDir(isData: Boolean): String =
if (isData) dataRoot else viewRoot
def concat(path1: String, path2 :String): String =
(if (path1.endsWith("/")) path1 else path1 + "/") +
(if (path2.startsWith("/")) path2.substring(1) else path2)
}
|
bizreach/play2-stub
|
src/main/scala/jp/co/bizreach/play2stub/StubPlugin.scala
|
Scala
|
apache-2.0
| 6,556
|
/* *\
** _____ __ _____ __ ____ **
** / ___/ / / /____/ / / / \ FieldKit **
** / ___/ /_/ /____/ / /__ / / / (c) 2009, field.io **
** /_/ /____/ /____/ /_____/ http://www.field.io **
\* */
/* created February 24, 2010 */
package field.kit.physics
import field.kit._
import field.kit.math.geometry._
import scala.collection.mutable.ArrayBuffer
/**
* Represents a cubic space and also provides an interface (through subclassing)
* for various spatial optimisation techniques (Octree, Quadtree, ...)
*
* @author Marcus Wendt
*/
abstract class Space(position:Vec3, val dimension:Vec3)
extends AABB(position, dimension * 0.5f) {
type T = Particle
}
/**
* A space that uses an Octree to find neighbouring particles
*/
class OctreeSpace(position:Vec3, dimension:Vec3) extends Space(position, dimension) {
val tree = new Octree[Particle](Vec3(), dimension)
def this(dimension:Vec3) = this(new Vec3, dimension)
def clear = tree.clear
def insert(p:Particle) = tree insert p
def apply(point:Vec, radius:Float, result:ArrayBuffer[Particle]) {
result.clear
tree(new Sphere(point, radius), result)
}
def apply(bounds:BoundingVolume, result:ArrayBuffer[Particle]) {
result.clear
tree(bounds, result)
}
}
///**
//* A space that uses a Quadtree to find neighbouring particles
//*/
//class QuadtreeSpace(dimension:Vec3) extends Space(dimension) {
//
// var tree = new Quadtree(null, (x,y), (width/2f, height/2f))
//
// override def apply(point:Vec, radius:Float) = {
// result.clear
// tree(new Circle(point, radius), result)
// }
//
// override def insert(particle:Vec) = tree.insert(particle)
//
// override def clear = tree.clear
//}
|
field/FieldKit.scala
|
src.physics/field/kit/physics/Space.scala
|
Scala
|
lgpl-3.0
| 1,966
|
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivestreams
import java.util.concurrent.atomic.AtomicBoolean
import scala.language.implicitConversions
import org.mongodb.{ scala => mongoDB }
import org.{ reactivestreams => rxStreams }
object Implicits {
implicit def observableToPublisher[T](observable: mongoDB.Observable[T]): rxStreams.Publisher[T] = ObservableToPublisher(observable)
case class ObservableToPublisher[T](observable: mongoDB.Observable[T]) extends rxStreams.Publisher[T] {
def subscribe(subscriber: rxStreams.Subscriber[_ >: T]): Unit = {
observable.subscribe(
new mongoDB.Observer[T]() {
override def onSubscribe(subscription: mongoDB.Subscription): Unit = {
subscriber.onSubscribe(new rxStreams.Subscription() {
private final val cancelled: AtomicBoolean = new AtomicBoolean
def request(n: Long) {
if (!subscription.isUnsubscribed && n < 1) {
subscriber.onError(new IllegalArgumentException(
"""3.9 While the Subscription is not cancelled,
|Subscription.request(long n) MUST throw a java.lang.IllegalArgumentException if the
|argument is <= 0.""".stripMargin
))
} else {
subscription.request(n)
}
}
def cancel() {
if (!cancelled.getAndSet(true)) subscription.unsubscribe()
}
})
}
def onNext(result: T): Unit = subscriber.onNext(result)
def onError(e: Throwable): Unit = subscriber.onError(e)
def onComplete(): Unit = subscriber.onComplete()
}
)
}
}
}
|
rozza/mongo-scala-driver
|
examples/src/test/scala/reactivestreams/Implicits.scala
|
Scala
|
apache-2.0
| 2,319
|
package splendid.execution
import java.util.concurrent.ArrayBlockingQueue
import org.openrdf.query.BindingSet
import org.openrdf.query.QueryEvaluationException
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorSystem
import akka.actor.Props
import info.aduna.iteration.CloseableIteration
import splendid.execution.util.ResultCollector.Done
import splendid.execution.util.ResultCollector.Result
/**
* Bridge between the Actors push-based reactive result generation and the pull-based result processing in Sesame's Iterations.
*
* NOT THREAD-SAFE!
*/
class BindingSetIteration(props: Props, uri: String, query: String, bindings: BindingSet) extends CloseableIteration[BindingSet, QueryEvaluationException] {
var done = false
var peek: Option[BindingSet] = None
val resultQueue = new ArrayBlockingQueue[Option[BindingSet]](100)
val system = ActorSystem("my_operators")
val rootNode = system.actorOf(Props(new ResultCollector(props)), "root")
override def hasNext(): Boolean = !done && (peek.nonEmpty || (resultQueue.take() match {
case Some(bs) => {
peek = Some(bs); true
}
case None => { // end of queue
done = true
false
}
}))
@throws(classOf[QueryEvaluationException])
override def next(): BindingSet = if (done) {
throw new NoSuchElementException
} else {
peek match {
case Some(bs) => {
peek = None
bs
}
// end of queue
case None => resultQueue.take() getOrElse {
done = true
throw new NoSuchElementException
}
}
}
override def remove(): Unit = throw new UnsupportedOperationException()
// TODO: stop actors
override def close(): Unit = throw new UnsupportedOperationException()
class ResultCollector(props: Props) extends Actor with ActorLogging {
val child = context.actorOf(props)
def receive = {
case Result(bindings: BindingSet) => resultQueue.put(Some(bindings))
case Done => {
resultQueue.put(None)
context.system.terminate();
}
case msg if sender != child => child forward msg
case msg => log.warning(s"unknown message $msg")
}
}
}
|
goerlitz/splendid
|
src/main/scala/splendid/execution/BindingSetIteration.scala
|
Scala
|
lgpl-3.0
| 2,204
|
package linkchecker.errorKernel
import akka.actor.Actor
import scala.concurrent.duration._
import akka.actor.ReceiveTimeout
import akka.actor.ActorSystem
import akka.actor.Props
object MainActor {
def props = Props[MainActor]
}
class MainActor extends Actor {
import Receptionist._
val receptionist = context.actorOf(Receptionist.props, "receptionist")
context.watch(receptionist) // sign death pact
// receptionist ! Get("http://www.google.it")
// receptionist ! Get("http://www.google.it/1")
// receptionist ! Get("http://www.google.it/2")
// receptionist ! Get("http://www.google.it/3")
// receptionist ! Get("http://www.google.it/4")
receptionist ! Get("http://www.repubblica.it")
context.setReceiveTimeout(60.seconds)
def receive = {
case Result(url, set) =>
println(set.toVector.sorted.mkString(s"Results for '$url':\\n", "\\n", "\\n"))
case Failed(url) =>
println(s"Failed to fetch '$url'\\n")
case ReceiveTimeout =>
context.stop(self)
}
}
object Main {
def main(args: Array[String]) {
val system = ActorSystem("LinkChecker")
val mainActor = system.actorOf(MainActor.props)
}
}
|
fabiofumarola/akka-tutorial
|
src/main/scala/linkchecker/errorKernel/MainActor.scala
|
Scala
|
cc0-1.0
| 1,158
|
/*
* Copyright (c) 2011-13 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
package ops
object nat {
/**
* Type class witnessing that `B` is the predecessor of `A`.
*
* @author Miles Sabin
*/
trait Pred[A <: Nat] { type Out <: Nat }
object Pred {
def apply[A <: Nat](implicit pred: Pred[A]): Aux[A, pred.Out] = pred
type Aux[A <: Nat, B <: Nat] = Pred[A] { type Out = B }
implicit def pred[B <: Nat]: Aux[Succ[B], B] = new Pred[Succ[B]] { type Out = B }
}
/**
* Type class witnessing that `C` is the sum of `A` and `B`.
*
* @author Miles Sabin
*/
trait Sum[A <: Nat, B <: Nat] { type Out <: Nat }
object Sum {
def apply[A <: Nat, B <: Nat](implicit sum: Sum[A, B]): Aux[A, B, sum.Out] = sum
type Aux[A <: Nat, B <: Nat, C <: Nat] = Sum[A, B] { type Out = C }
implicit def sum1[B <: Nat]: Aux[_0, B, B] = new Sum[_0, B] { type Out = B }
implicit def sum2[A <: Nat, B <: Nat]
(implicit sum : Sum[A, Succ[B]]): Aux[Succ[A], B, sum.Out] = new Sum[Succ[A], B] { type Out = sum.Out }
}
/**
* Type class witnessing that `C` is the difference of `A` and `B`.
*
* @author Miles Sabin
*/
trait Diff[A <: Nat, B <: Nat] { type Out <: Nat }
object Diff {
def apply[A <: Nat, B <: Nat](implicit diff: Diff[A, B]): Aux[A, B, diff.Out] = diff
type Aux[A <: Nat, B <: Nat, C <: Nat] = Diff[A, B] { type Out = C }
implicit def diff1[A <: Nat]: Aux[A, _0, A] = new Diff[A, _0] { type Out = A }
implicit def diff2[A <: Nat, B <: Nat]
(implicit diff : Diff[A, B]): Aux[Succ[A], Succ[B], diff.Out] = new Diff[Succ[A], Succ[B]] { type Out = diff.Out }
}
/**
* Type class witnessing that `C` is the product of `A` and `B`.
*
* @author Miles Sabin
*/
trait Prod[A <: Nat, B <: Nat] { type Out <: Nat }
object Prod {
def apply[A <: Nat, B <: Nat](implicit prod: Prod[A, B]): Aux[A, B, prod.Out] = prod
type Aux[A <: Nat, B <: Nat, C <: Nat] = Prod[A, B] { type Out = C }
implicit def prod1[B <: Nat]: Aux[_0, B, _0] = new Prod[_0, B] { type Out = _0 }
implicit def prod2[A <: Nat, B <: Nat, C <: Nat]
(implicit prod: Prod.Aux[A, B, C], sum: Sum[B, C]): Aux[Succ[A], B, sum.Out] = new Prod[Succ[A], B] { type Out = sum.Out }
}
/**
* Type class witnessing that `Out` is the quotient of `A` and `B`.
*
* @author Tom Switzer
*/
trait Div[A <: Nat, B <: Nat] { type Out <: Nat }
object Div {
def apply[A <: Nat, B <: Nat](implicit div: Div[A, B]): Aux[A, B, div.Out] = div
import LT._
type Aux[A <: Nat, B <: Nat, C <: Nat] = Div[A, B] { type Out = C }
implicit def div1[A <: Nat]: Aux[_0, A, _0] = new Div[_0, A] { type Out = _0 }
implicit def div2[A <: Nat, B <: Nat](implicit lt: A < B): Aux[A, B, _0] =
new Div[A, B] { type Out = _0 }
implicit def div3[A <: Nat, B <: Nat, C <: Nat, D <: Nat]
(implicit diff: Diff.Aux[Succ[A], B, C], div: Div.Aux[C, B, D]): Aux[Succ[A], B, Succ[D]] =
new Div[Succ[A], B] { type Out = Succ[D] }
}
/**
* Typeclass witnessing that `Out` is `A` mod `B`.
*
* @author Tom Switzer
*/
trait Mod[A <: Nat, B <: Nat] { type Out <: Nat }
object Mod {
def apply[A <: Nat, B <: Nat](implicit mod: Mod[A, B]): Aux[A, B, mod.Out] = mod
type Aux[A <: Nat, B <: Nat, C <: Nat] = Mod[A, B] { type Out = C }
implicit def modAux[A <: Nat, B <: Nat, C <: Nat, D <: Nat, E <: Nat]
(implicit div: Div.Aux[A, B, C], prod: Prod.Aux[C, B, D], diff: Diff.Aux[A, D, E]): Aux[A, B, E] =
new Mod[A, B] { type Out = E }
}
/**
* Type class witnessing that `A` is less than `B`.
*
* @author Miles Sabin
*/
trait LT[A <: Nat, B <: Nat]
object LT {
def apply[A <: Nat, B <: Nat](implicit lt: A < B) = lt
type <[A <: Nat, B <: Nat] = LT[A, B]
implicit def lt1[B <: Nat] = new <[_0, Succ[B]] {}
implicit def lt2[A <: Nat, B <: Nat](implicit lt : A < B) = new <[Succ[A], Succ[B]] {}
}
/**
* Type class witnessing that `A` is less than or equal to `B`.
*
* @author Miles Sabin
*/
trait LTEq[A <: Nat, B <: Nat]
object LTEq {
def apply[A <: Nat, B <: Nat](implicit lteq: A <= B) = lteq
type <=[A <: Nat, B <: Nat] = LTEq[A, B]
implicit def ltEq1 = new <=[_0, _0] {}
implicit def ltEq2[B <: Nat] = new <=[_0, Succ[B]] {}
implicit def ltEq3[A <: Nat, B <: Nat](implicit lteq : A <= B) = new <=[Succ[A], Succ[B]] {}
}
/**
* Type class witnessing that `Out` is `A` min `B`.
*
* @author George Leontiev
*/
trait Min[A <: Nat, B <: Nat] { type Out <: Nat }
object Min {
def apply[A <: Nat, B <: Nat](implicit min: Min[A, B]): Aux[A, B, min.Out] = min
type Aux[A <: Nat, B <: Nat, C <: Nat] = Min[A, B] { type Out = C }
implicit def minAux0[A <: Nat, B <: Nat, C <: Nat]
(implicit lteq: LTEq[A, B]): Aux[A, B, A] = new Min[A, B] { type Out = A }
implicit def minAux1[A <: Nat, B <: Nat, C <: Nat]
(implicit lteq: LT[B, A]): Aux[A, B, B] = new Min[A, B] { type Out = B }
}
/**
* Type class witnessing that `Out` is `X` raised to the power `N`.
*
* @author George Leontiev
*/
trait Pow[N <: Nat, X <: Nat] { type Out <: Nat }
object Pow {
def apply[A <: Nat, B <: Nat](implicit pow: Pow[A, B]): Aux[A, B, pow.Out] = pow
import shapeless.nat._1
type Aux[N <: Nat, X <: Nat, Z <: Nat] = Pow[N, X] { type Out = Z }
implicit def pow1[A <: Nat]: Aux[Succ[A], _0, _0] = new Pow[Succ[A], _0] { type Out = _0 }
implicit def pow2[A <: Nat]: Aux[_0, Succ[A], _1] = new Pow[_0, Succ[A]] { type Out = _1 }
implicit def pow3[N <: Nat, X <: Nat, Z <: Nat, Y <: Nat]
(implicit ev : Pow.Aux[N, X, Z], ev2 : Prod.Aux[Z, X, Y]): Aux[Succ[N], X, Y] = new Pow[Succ[N], X] { type Out = Y }
}
/**
* Type class supporting conversion of type-level Nats to value level Ints.
*
* @author Miles Sabin
*/
trait ToInt[N <: Nat] {
def apply() : Int
}
object ToInt {
def apply[N <: Nat](implicit toInt: ToInt[N]): ToInt[N] = toInt
implicit val toInt0 = new ToInt[_0] {
def apply() = 0
}
implicit def toIntSucc[N <: Nat](implicit toIntN : ToInt[N]) = new ToInt[Succ[N]] {
def apply() = toIntN()+1
}
}
}
|
mandubian/shapeless
|
core/src/main/scala/shapeless/ops/nat.scala
|
Scala
|
apache-2.0
| 6,831
|
import System.{currentTimeMillis => now}
import scala.util.Random
object par extends App {
val n = 50 * 1000 * 1000
val max = 2 * n
def random = Random nextInt max
val col = Vector.fill(n)(random).par
val target = random
val start = now
col.count(math.sqrt(_) == target)
val middle = now
col.count(math.sqrt(_) == target)
val end = now
println("par results:")
println("1st run: " + (middle-start) + "ms")
println("2nd run: " + (end-middle) + "ms")
}
|
bwmcadams/lambdaconf-2015
|
speakers/marconilanna/whats_new_in_scala/par.scala
|
Scala
|
artistic-2.0
| 470
|
import sbt._
class UsePlugin(info: ProjectInfo) extends DefaultProject(info)
{
import antlr.Tool // verify that antlr is on compile classpath
lazy val check = task { Class.forName("antlr.Tool"); None } // verify antlr is on runtime classpath
}
|
sbt/sbt-zero-seven
|
src/sbt-test/project/plugins/project/build/UsePlugin.scala
|
Scala
|
bsd-3-clause
| 246
|
package example
import org.scalatest._
import scala.collection.mutable.ListBuffer
import scala.language.reflectiveCalls
import example.model.Card
import example.model.Deck
import example.model.Player
import example.model.Prefab
class SimSpec extends FlatSpec with Matchers with BeforeAndAfterEach {
var cards = new ListBuffer[Card]()
var cards2 = new ListBuffer[Card]()
var deck: Deck = _
var deck2: Deck = _
var sim: Sim = _
override def beforeEach() {
cards += (
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Unit", cost = 2),
new Card(generic_type = "Unit", cost = 3),
new Card(generic_type = "Spell", cost = 1),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Unit", cost = 2),
new Card(generic_type = "Unit", cost = 3),
new Card(generic_type = "Spell", cost = 1)
)
cards2 += (
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Unit", cost = 2),
new Card(generic_type = "Unit", cost = 3),
new Card(generic_type = "Spell", cost = 1),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Power", cost = 0),
new Card(generic_type = "Unit", cost = 2),
new Card(generic_type = "Unit", cost = 3),
new Card(generic_type = "Spell", cost = 1)
)
deck = new Deck(cards)
deck2 = new Deck(cards2)
sim = new Sim(new Player("Bob", 25, deck), new Player("Sue", 25, deck2))
}
"The Sim" should "initialize a game of cards" in {
sim.start
sim.playerOne.hand.size should be (7)
sim.playerOne.deck.cards.size should be (3)
val card = sim.playerOne.deck.draw.get
sim.playerOne.deck.cards.size should be (2)
sim.playerOne.deck.replace(card)
sim.playerOne.deck.cards.size should be (3)
}
it should "play a unit on the board" in {
sim.start
sim.playerOne.maxPower = 5 // force power to a medium value
sim.playerOne.currentPower = 5
// The fixture guarantees we draw a unit in 7 cards
sim.playerOne.hand foreach { c =>
if (c.generic_type == "Unit") {
sim.playerOne.play(c)
sim.playerOne.board.size should be > 0
}
}
}
it should "play a power in the pool" in {
sim.start
// The fixture guarantees we draw a power in 7 cards
sim.playerOne.hand foreach { c =>
if (c.generic_type == "Power") {
sim.playerOne.play(c)
sim.playerOne.pool.size should be > 0
}
}
}
it should "not play a card you don't have" in {
val trickCard = new Card(generic_type = "Spell", cost = 99)
sim.start
sim.playerOne.play(trickCard)
sim.playerOne.hand.size should be (7)
}
it should "not discard a card you don't have" in {
val trickCard = new Card(generic_type = "Spell", cost = 99)
sim.start
sim.playerOne.discard(trickCard)
sim.playerOne.hand.size should be (7)
}
it should "not be able to play an expensive unit without sufficient power" in {
val trickCard = new Card(generic_type = "Unit", cost = 99)
sim.start
sim.playerOne.hand += trickCard // force expensive card into hand
sim.playerOne.maxPower = 5 // force power to a medium value
sim.playerOne.currentPower = 5
sim.playerOne.play(trickCard)
sim.playerOne.board should be ('empty)
sim.playerOne.hand.size should be (8)
}
it should "not test this function, but I'm lazy" in {
val c = Prefab.testCard
c.cost should be (3) // because we know stuff
}
it should "not test this function either, but I'm lazy" in {
val c = Prefab.testCollection
c.size should be > 480 // because we know more stuff
}
}
|
osake/EternalCardGameSimulator
|
src/test/scala/example/SimSpec.scala
|
Scala
|
gpl-3.0
| 3,845
|
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.graph
import akka.http.scaladsl.model.Uri
import com.netflix.atlas.chart.model.PlotBound
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.Query
import com.netflix.atlas.core.model.StyleExpr
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class GraphUriSuite extends FunSuite {
private val grapher = Grapher(ConfigFactory.load())
private def parseUri(uri: String): GraphConfig = {
grapher.toGraphConfig(Uri(uri))
}
test("simple expr") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum")
assertEquals(cfg.exprs, List(StyleExpr(DataExpr.Sum(Query.Equal("name", "foo")), Map.empty)))
}
test("empty title") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&title=")
assertEquals(cfg.flags.title, None)
}
test("with title") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&title=foo")
assertEquals(cfg.flags.title, Some("foo"))
}
test("empty ylabel") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&ylabel=")
assertEquals(cfg.flags.axes(0).ylabel, None)
}
test("with ylabel") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&ylabel=foo")
assertEquals(cfg.flags.axes(0).ylabel, Some("foo"))
}
test("empty ylabel.1") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&ylabel.1=")
assertEquals(cfg.flags.axes(1).ylabel, None)
}
test("empty ylabel.1 with ylabel") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&ylabel.1=&ylabel=foo")
assertEquals(cfg.flags.axes(1).ylabel, None)
}
test("lower bound") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&l=0")
assertEquals(cfg.flags.axes(0).newPlotDef().lower, PlotBound.Explicit(0.0))
}
test("lower bound auto-data") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&l=auto-data")
assertEquals(cfg.flags.axes(0).newPlotDef().lower, PlotBound.AutoData)
}
test("lower bound auto-style") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum&l=auto-style")
assertEquals(cfg.flags.axes(0).newPlotDef().lower, PlotBound.AutoStyle)
}
test("lower bound default") {
val cfg = parseUri("/api/v1/graph?q=name,foo,:eq,:sum")
assertEquals(cfg.flags.axes(0).newPlotDef().lower, PlotBound.AutoStyle)
}
}
|
Netflix/atlas
|
atlas-eval/src/test/scala/com/netflix/atlas/eval/graph/GraphUriSuite.scala
|
Scala
|
apache-2.0
| 2,927
|
package us.theatr.akka.quartz
import org.specs2.execute._
import org.specs2.mutable._
import akka.testkit.TestActorRef
import akka.actor.{Props, ActorSystem, Actor}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent.duration.Duration
import scala.util.{Success}
class QuartzActor$Test extends Specification {
object SpecActors {
case class Tickle(id: Int)
case class PopTickle()
class RecvActor extends Actor {
var lastMsg : Option[Tickle] = None
def receive = {
case a : Tickle => lastMsg = Some(a)
case PopTickle() => {
context.sender ! lastMsg
lastMsg = None
}
case c => println("Unknown message on the recvactor!!" + c)
}
}
}
def withSystem(b : ActorSystem => ResultLike) = {
implicit val system = ActorSystem("GAT")
try {
b(system).toResult
} finally {
system.shutdown()
}
}
"Basic single actors should" should {
implicit val timeout = Timeout(Duration(5, "seconds"))
"add a cron job" in {
withSystem { implicit system =>
val ar = TestActorRef(new QuartzActor)
val recv = TestActorRef(new SpecActors.RecvActor)
val f = (ar ? AddCronSchedule(recv, "* * * * * ?", SpecActors.Tickle(100), true))
f.value.get must beLike {
case Success(t : AddCronScheduleResult) => ok
}
Thread.sleep(5000)
(recv ? SpecActors.PopTickle()).value.get must beLike {
case Success(Some(SpecActors.Tickle(100))) => ok
}
}
}
"add a cron job with open spigot" in {
withSystem { implicit system =>
val ar = TestActorRef(new QuartzActor)
val recv = TestActorRef(new SpecActors.RecvActor)
ar ? AddCronSchedule(recv, "* * * * * ?", SpecActors.Tickle(150), true, new Spigot(){val open = true})
Thread.sleep(5000)
(recv ? SpecActors.PopTickle()).value.get must beLike {
case Success(Some(SpecActors.Tickle(150))) => ok
}
}
}
"add a cron job with closed spigot" in {
withSystem { implicit system =>
val ar = TestActorRef(new QuartzActor)
val recv = TestActorRef(new SpecActors.RecvActor)
val f = (ar ? AddCronSchedule(recv, "* * * * * ?", SpecActors.Tickle(100), true, new Spigot(){val open = false}))
f.value.get must beLike {
case Success(t : AddCronScheduleResult) => ok
}
Thread.sleep(3000)
(recv ? SpecActors.PopTickle()).value.get must beLike {
case Success(None) => ok
}
}
}
"add then cancel messages" in {
withSystem { implicit system =>
val ar = TestActorRef(new QuartzActor)
val recv = TestActorRef(new SpecActors.RecvActor)
val d = ar ? AddCronSchedule(recv, "4 4 * * * ?", SpecActors.Tickle(200), true)
val cancel = d.value.get match {
case Success(AddCronScheduleSuccess(cancel)) => cancel
}
cancel.cancel()
Thread.sleep(100)
cancel.isCancelled must beEqualTo(true)
}
}
"fail with invalid cron expressions" in {
withSystem { implicit system =>
val ar = TestActorRef(new QuartzActor)
val recv = TestActorRef(new SpecActors.RecvActor)
(ar ? AddCronSchedule(recv, "clearly invalid", SpecActors.Tickle(300), true)).value.get must beLike {
case Success(AddCronScheduleFailure(e)) => ok
}
}
}
}
}
|
theatrus/akka-quartz
|
src/test/scala/us/theatr/akka/quartz/QuartzActor$Test.scala
|
Scala
|
apache-2.0
| 3,202
|
package pl.pholda.malpompaaligxilo.i18n
object EmptytTranslationsJSTest extends EmptyTranslationsTest
|
pholda/MalpompaAligxilo
|
core/js/src/test/scala/pl/pholda/malpompaaligxilo/i18n/EmptytTranslationsJSTest.scala
|
Scala
|
gpl-3.0
| 103
|
import scala.io.StdIn
object TreasureHunting {
def main(args: Array[String]): Unit = {
val Array(x, y) = StdIn.readLine().split(" ").map(_.toDouble)
val Array(a, b) = StdIn.readLine().split(" ").map(_.toDouble)
val k = ((a * x) + (b * y)) / (math.pow(a, 2)+math.pow(b, 2))
val n = ((a * y) - (b * x)) / (math.pow(a, 2)+math.pow(b, 2))
println("%.12f".format(k))
println("%.12f".format(n))
}
}
|
everyevery/programming_study
|
hackerrank/contest/2016w23/TreasureHunting/TreasureHunting.scala
|
Scala
|
mit
| 437
|
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.core.WireMockConfiguration
import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig
import iht.connector.IhtConnector
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.libs.ws.WSClient
object WiremockHelper {
val wiremockPort = 11111
val wiremockHost = "localhost"
val url = s"http://$wiremockHost:$wiremockPort"
}
trait WiremockHelper {
self: GuiceOneAppPerSuite =>
import WiremockHelper._
val wmConfig: WireMockConfiguration = wireMockConfig().port(wiremockPort)
val wireMockServer = new WireMockServer(wmConfig)
def startWiremock() = {
wireMockServer.start()
WireMock.configureFor(wiremockHost, wiremockPort)
}
def stopWiremock() = wireMockServer.stop()
def resetWiremock() = WireMock.reset()
lazy val ws: WSClient = app.injector.instanceOf(classOf[WSClient])
lazy val injectedIhtConnector: IhtConnector = app.injector.instanceOf(classOf[IhtConnector])
def stubGet(url: String, status: Integer, body: String) =
stubFor(get(urlMatching(url))
.willReturn(
aResponse().
withStatus(status).
withBody(body)
)
)
def stubPost(url: String, status: Integer, responseBody: String) =
stubFor(post(urlMatching(url))
.willReturn(
aResponse().
withStatus(status).
withBody(responseBody)
)
)
def stubPut(url: String, status: Integer, responseBody: String) =
stubFor(put(urlMatching(url))
.willReturn(
aResponse().
withStatus(status).
withBody(responseBody)
)
)
def stubPatch(url: String, status: Integer, responseBody: String) =
stubFor(patch(urlMatching(url))
.willReturn(
aResponse().
withStatus(status).
withBody(responseBody)
)
)
def stubDelete(url: String, status: Integer, responseBody: String) =
stubFor(delete(urlMatching(url))
.willReturn(
aResponse().
withStatus(status).
withBody(responseBody)
)
)
}
|
hmrc/iht-frontend
|
it/utils/WiremockHelper.scala
|
Scala
|
apache-2.0
| 2,867
|
package com.aristocrat.mandrill.services
import com.aristocrat.mandrill.MandrillClient
import com.google.inject.{Inject, Singleton}
import com.aristocrat.mandrill.requests.Tags._
import com.twitter.finagle.httpx.Response
import com.twitter.util.Future
@Singleton
class TagsService @Inject()(client: MandrillClient) {
def list(body: List): Future[Response] = client.post("tags/list.json", body)
def delete(body: Delete): Future[Response] = client.post("tags/delete.json", body)
def info(body: Info): Future[Response] = client.post("tags/info.json", body)
def timeSeries(body: TimeSeries): Future[Response] = client.post("tags/time-series.json", body)
def allTimeSeries(body: AllTimeSeries): Future[Response] = client.post("tags/all-time-series.json", body)
}
|
aristocratic/mandrill
|
src/main/scala/com/aristocrat/mandrill/services/TagsService.scala
|
Scala
|
mit
| 786
|
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations.examples
import io.deepsense.deeplang.doperations.{Split, SplitModeChoice}
class SplitExample extends AbstractOperationExample[Split] {
override def dOperation: Split =
new Split()
.setSplitMode(
SplitModeChoice.Random()
.setSeed(0)
.setSplitRatio(0.2))
override def fileNames: Seq[String] = Seq("example_city_beds_price")
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/it/scala/io/deepsense/deeplang/doperations/examples/SplitExample.scala
|
Scala
|
apache-2.0
| 1,012
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.TimeZone
import scala.util.Random
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.plans.logical.OneRowRelation
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
/**
* Test suite for functions in [[org.apache.spark.sql.functions]].
*/
class DataFrameFunctionsSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("array with column name") {
val df = Seq((0, 1)).toDF("a", "b")
val row = df.select(array("a", "b")).first()
val expectedType = ArrayType(IntegerType, containsNull = false)
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Seq[Int]](0) === Seq(0, 1))
}
test("array with column expression") {
val df = Seq((0, 1)).toDF("a", "b")
val row = df.select(array(col("a"), col("b") + col("b"))).first()
val expectedType = ArrayType(IntegerType, containsNull = false)
assert(row.schema(0).dataType === expectedType)
assert(row.getSeq[Int](0) === Seq(0, 2))
}
test("map with column expressions") {
val df = Seq(1 -> "a").toDF("a", "b")
val row = df.select(map($"a" + 1, $"b")).first()
val expectedType = MapType(IntegerType, StringType, valueContainsNull = true)
assert(row.schema(0).dataType === expectedType)
assert(row.getMap[Int, String](0) === Map(2 -> "a"))
}
test("map with arrays") {
val df1 = Seq((Seq(1, 2), Seq("a", "b"))).toDF("k", "v")
val expectedType = MapType(IntegerType, StringType, valueContainsNull = true)
val row = df1.select(map_from_arrays($"k", $"v")).first()
assert(row.schema(0).dataType === expectedType)
assert(row.getMap[Int, String](0) === Map(1 -> "a", 2 -> "b"))
checkAnswer(df1.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> "a", 2 -> "b"))))
val df2 = Seq((Seq(1, 2), Seq(null, "b"))).toDF("k", "v")
checkAnswer(df2.select(map_from_arrays($"k", $"v")), Seq(Row(Map(1 -> null, 2 -> "b"))))
val df3 = Seq((null, null)).toDF("k", "v")
checkAnswer(df3.select(map_from_arrays($"k", $"v")), Seq(Row(null)))
val df4 = Seq((1, "a")).toDF("k", "v")
intercept[AnalysisException] {
df4.select(map_from_arrays($"k", $"v"))
}
val df5 = Seq((Seq("a", null), Seq(1, 2))).toDF("k", "v")
val msg1 = intercept[Exception] {
df5.select(map_from_arrays($"k", $"v")).collect
}.getMessage
assert(msg1.contains("Cannot use null as map key"))
val df6 = Seq((Seq(1, 2), Seq("a"))).toDF("k", "v")
val msg2 = intercept[Exception] {
df6.select(map_from_arrays($"k", $"v")).collect
}.getMessage
assert(msg2.contains("The key array and value array of MapData must have the same length"))
}
test("struct with column name") {
val df = Seq((1, "str")).toDF("a", "b")
val row = df.select(struct("a", "b")).first()
val expectedType = StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Row](0) === Row(1, "str"))
}
test("struct with column expression") {
val df = Seq((1, "str")).toDF("a", "b")
val row = df.select(struct((col("a") * 2).as("c"), col("b"))).first()
val expectedType = StructType(Seq(
StructField("c", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(row.schema(0).dataType === expectedType)
assert(row.getAs[Row](0) === Row(2, "str"))
}
test("struct with column expression to be automatically named") {
val df = Seq((1, "str")).toDF("a", "b")
val result = df.select(struct((col("a") * 2), col("b")))
val expectedType = StructType(Seq(
StructField("col1", IntegerType, nullable = false),
StructField("b", StringType)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Row(Row(2, "str")))
}
test("struct with literal columns") {
val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b")
val result = df.select(struct((col("a") * 2), lit(5.0)))
val expectedType = StructType(Seq(
StructField("col1", IntegerType, nullable = false),
StructField("col2", DoubleType, nullable = false)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Seq(Row(Row(2, 5.0)), Row(Row(4, 5.0))))
}
test("struct with all literal columns") {
val df = Seq((1, "str1"), (2, "str2")).toDF("a", "b")
val result = df.select(struct(lit("v"), lit(5.0)))
val expectedType = StructType(Seq(
StructField("col1", StringType, nullable = false),
StructField("col2", DoubleType, nullable = false)
))
assert(result.first.schema(0).dataType === expectedType)
checkAnswer(result, Seq(Row(Row("v", 5.0)), Row(Row("v", 5.0))))
}
test("constant functions") {
checkAnswer(
sql("SELECT E()"),
Row(scala.math.E)
)
checkAnswer(
sql("SELECT PI()"),
Row(scala.math.Pi)
)
}
test("bitwiseNOT") {
checkAnswer(
testData2.select(bitwiseNOT($"a")),
testData2.collect().toSeq.map(r => Row(~r.getInt(0))))
}
test("bin") {
val df = Seq[(Integer, Integer)]((12, null)).toDF("a", "b")
checkAnswer(
df.select(bin("a"), bin("b")),
Row("1100", null))
checkAnswer(
df.selectExpr("bin(a)", "bin(b)"),
Row("1100", null))
}
test("if function") {
val df = Seq((1, 2)).toDF("a", "b")
checkAnswer(
df.selectExpr("if(a = 1, 'one', 'not_one')", "if(b = 1, 'one', 'not_one')"),
Row("one", "not_one"))
}
test("misc md5 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(md5($"a"), md5($"b")),
Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c"))
checkAnswer(
df.selectExpr("md5(a)", "md5(b)"),
Row("902fbdd2b1df0c4f70b4a5d23525e932", "6ac1e56bc78f031059be7be854522c4c"))
}
test("misc sha1 function") {
val df = Seq(("ABC", "ABC".getBytes(StandardCharsets.UTF_8))).toDF("a", "b")
checkAnswer(
df.select(sha1($"a"), sha1($"b")),
Row("3c01bdbb26f358bab27f267924aa2c9a03fcfdb8", "3c01bdbb26f358bab27f267924aa2c9a03fcfdb8"))
val dfEmpty = Seq(("", "".getBytes(StandardCharsets.UTF_8))).toDF("a", "b")
checkAnswer(
dfEmpty.selectExpr("sha1(a)", "sha1(b)"),
Row("da39a3ee5e6b4b0d3255bfef95601890afd80709", "da39a3ee5e6b4b0d3255bfef95601890afd80709"))
}
test("misc sha2 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(sha2($"a", 256), sha2($"b", 256)),
Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78",
"7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89"))
checkAnswer(
df.selectExpr("sha2(a, 256)", "sha2(b, 256)"),
Row("b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78",
"7192385c3c0605de55bb9476ce1d90748190ecb32a8eed7f5207b30cf6a1fe89"))
intercept[IllegalArgumentException] {
df.select(sha2($"a", 1024))
}
}
test("misc crc32 function") {
val df = Seq(("ABC", Array[Byte](1, 2, 3, 4, 5, 6))).toDF("a", "b")
checkAnswer(
df.select(crc32($"a"), crc32($"b")),
Row(2743272264L, 2180413220L))
checkAnswer(
df.selectExpr("crc32(a)", "crc32(b)"),
Row(2743272264L, 2180413220L))
}
test("string function find_in_set") {
val df = Seq(("abc,b,ab,c,def", "abc,b,ab,c,def")).toDF("a", "b")
checkAnswer(
df.selectExpr("find_in_set('ab', a)", "find_in_set('x', b)"),
Row(3, 0))
}
test("conditional function: least") {
checkAnswer(
testData2.select(least(lit(-1), lit(0), col("a"), col("b"))).limit(1),
Row(-1)
)
checkAnswer(
sql("SELECT least(a, 2) as l from testData2 order by l"),
Seq(Row(1), Row(1), Row(2), Row(2), Row(2), Row(2))
)
}
test("conditional function: greatest") {
checkAnswer(
testData2.select(greatest(lit(2), lit(3), col("a"), col("b"))).limit(1),
Row(3)
)
checkAnswer(
sql("SELECT greatest(a, 2) as g from testData2 order by g"),
Seq(Row(2), Row(2), Row(2), Row(2), Row(3), Row(3))
)
}
test("pmod") {
val intData = Seq((7, 3), (-7, 3)).toDF("a", "b")
checkAnswer(
intData.select(pmod('a, 'b)),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.select(pmod('a, lit(3))),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.select(pmod(lit(-7), 'b)),
Seq(Row(2), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(a, b)"),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(a, 3)"),
Seq(Row(1), Row(2))
)
checkAnswer(
intData.selectExpr("pmod(-7, b)"),
Seq(Row(2), Row(2))
)
val doubleData = Seq((7.2, 4.1)).toDF("a", "b")
checkAnswer(
doubleData.select(pmod('a, 'b)),
Seq(Row(3.1000000000000005)) // same as hive
)
checkAnswer(
doubleData.select(pmod(lit(2), lit(Int.MaxValue))),
Seq(Row(2))
)
}
test("sort_array/array_sort functions") {
val df = Seq(
(Array[Int](2, 1, 3), Array("b", "c", "a")),
(Array.empty[Int], Array.empty[String]),
(null, null)
).toDF("a", "b")
checkAnswer(
df.select(sort_array($"a"), sort_array($"b")),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.select(sort_array($"a", false), sort_array($"b", false)),
Seq(
Row(Seq(3, 2, 1), Seq("c", "b", "a")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("sort_array(a)", "sort_array(b)"),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("sort_array(a, true)", "sort_array(b, false)"),
Seq(
Row(Seq(1, 2, 3), Seq("c", "b", "a")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
val df2 = Seq((Array[Array[Int]](Array(2), Array(1), Array(2, 4), null), "x")).toDF("a", "b")
checkAnswer(
df2.selectExpr("sort_array(a, true)", "sort_array(a, false)"),
Seq(
Row(
Seq[Seq[Int]](null, Seq(1), Seq(2), Seq(2, 4)),
Seq[Seq[Int]](Seq(2, 4), Seq(2), Seq(1), null)))
)
val df3 = Seq(("xxx", "x")).toDF("a", "b")
assert(intercept[AnalysisException] {
df3.selectExpr("sort_array(a)").collect()
}.getMessage().contains("only supports array input"))
checkAnswer(
df.select(array_sort($"a"), array_sort($"b")),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("array_sort(a)", "array_sort(b)"),
Seq(
Row(Seq(1, 2, 3), Seq("a", "b", "c")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df2.selectExpr("array_sort(a)"),
Seq(Row(Seq[Seq[Int]](Seq(1), Seq(2), Seq(2, 4), null)))
)
assert(intercept[AnalysisException] {
df3.selectExpr("array_sort(a)").collect()
}.getMessage().contains("only supports array input"))
}
def testSizeOfArray(sizeOfNull: Any): Unit = {
val df = Seq(
(Seq[Int](1, 2), "x"),
(Seq[Int](), "y"),
(Seq[Int](1, 2, 3), "z"),
(null, "empty")
).toDF("a", "b")
checkAnswer(df.select(size($"a")), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull)))
checkAnswer(df.selectExpr("size(a)"), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull)))
checkAnswer(df.selectExpr("cardinality(a)"), Seq(Row(2L), Row(0L), Row(3L), Row(sizeOfNull)))
}
test("array size function - legacy") {
withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "true") {
testSizeOfArray(sizeOfNull = -1)
}
}
test("array size function") {
withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "false") {
testSizeOfArray(sizeOfNull = null)
}
}
test("dataframe arrays_zip function") {
val df1 = Seq((Seq(9001, 9002, 9003), Seq(4, 5, 6))).toDF("val1", "val2")
val df2 = Seq((Seq("a", "b"), Seq(true, false), Seq(10, 11))).toDF("val1", "val2", "val3")
val df3 = Seq((Seq("a", "b"), Seq(4, 5, 6))).toDF("val1", "val2")
val df4 = Seq((Seq("a", "b", null), Seq(4L))).toDF("val1", "val2")
val df5 = Seq((Seq(-1), Seq(null), Seq(), Seq(null, null))).toDF("val1", "val2", "val3", "val4")
val df6 = Seq((Seq(192.toByte, 256.toByte), Seq(1.1), Seq(), Seq(null, null)))
.toDF("v1", "v2", "v3", "v4")
val df7 = Seq((Seq(Seq(1, 2, 3), Seq(4, 5)), Seq(1.1, 2.2))).toDF("v1", "v2")
val df8 = Seq((Seq(Array[Byte](1.toByte, 5.toByte)), Seq(null))).toDF("v1", "v2")
val expectedValue1 = Row(Seq(Row(9001, 4), Row(9002, 5), Row(9003, 6)))
checkAnswer(df1.select(arrays_zip($"val1", $"val2")), expectedValue1)
checkAnswer(df1.selectExpr("arrays_zip(val1, val2)"), expectedValue1)
val expectedValue2 = Row(Seq(Row("a", true, 10), Row("b", false, 11)))
checkAnswer(df2.select(arrays_zip($"val1", $"val2", $"val3")), expectedValue2)
checkAnswer(df2.selectExpr("arrays_zip(val1, val2, val3)"), expectedValue2)
val expectedValue3 = Row(Seq(Row("a", 4), Row("b", 5), Row(null, 6)))
checkAnswer(df3.select(arrays_zip($"val1", $"val2")), expectedValue3)
checkAnswer(df3.selectExpr("arrays_zip(val1, val2)"), expectedValue3)
val expectedValue4 = Row(Seq(Row("a", 4L), Row("b", null), Row(null, null)))
checkAnswer(df4.select(arrays_zip($"val1", $"val2")), expectedValue4)
checkAnswer(df4.selectExpr("arrays_zip(val1, val2)"), expectedValue4)
val expectedValue5 = Row(Seq(Row(-1, null, null, null), Row(null, null, null, null)))
checkAnswer(df5.select(arrays_zip($"val1", $"val2", $"val3", $"val4")), expectedValue5)
checkAnswer(df5.selectExpr("arrays_zip(val1, val2, val3, val4)"), expectedValue5)
val expectedValue6 = Row(Seq(
Row(192.toByte, 1.1, null, null), Row(256.toByte, null, null, null)))
checkAnswer(df6.select(arrays_zip($"v1", $"v2", $"v3", $"v4")), expectedValue6)
checkAnswer(df6.selectExpr("arrays_zip(v1, v2, v3, v4)"), expectedValue6)
val expectedValue7 = Row(Seq(
Row(Seq(1, 2, 3), 1.1), Row(Seq(4, 5), 2.2)))
checkAnswer(df7.select(arrays_zip($"v1", $"v2")), expectedValue7)
checkAnswer(df7.selectExpr("arrays_zip(v1, v2)"), expectedValue7)
val expectedValue8 = Row(Seq(
Row(Array[Byte](1.toByte, 5.toByte), null)))
checkAnswer(df8.select(arrays_zip($"v1", $"v2")), expectedValue8)
checkAnswer(df8.selectExpr("arrays_zip(v1, v2)"), expectedValue8)
}
testWithWholeStageCodegenOnAndOff("SPARK-24633: arrays_zip splits input " +
"processing correctly") { _ =>
val df = spark.range(1)
val exprs = (0 to 5).map(x => array($"id" + lit(x)))
checkAnswer(df.select(arrays_zip(exprs: _*)),
Row(Seq(Row(0, 1, 2, 3, 4, 5))))
}
def testSizeOfMap(sizeOfNull: Any): Unit = {
val df = Seq(
(Map[Int, Int](1 -> 1, 2 -> 2), "x"),
(Map[Int, Int](), "y"),
(Map[Int, Int](1 -> 1, 2 -> 2, 3 -> 3), "z"),
(null, "empty")
).toDF("a", "b")
checkAnswer(df.select(size($"a")), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull)))
checkAnswer(df.selectExpr("size(a)"), Seq(Row(2), Row(0), Row(3), Row(sizeOfNull)))
}
test("map size function - legacy") {
withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "true") {
testSizeOfMap(sizeOfNull = -1: Int)
}
}
test("map size function") {
withSQLConf(SQLConf.LEGACY_SIZE_OF_NULL.key -> "false") {
testSizeOfMap(sizeOfNull = null)
}
}
test("map_keys/map_values function") {
val df = Seq(
(Map[Int, Int](1 -> 100, 2 -> 200), "x"),
(Map[Int, Int](), "y"),
(Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300), "z")
).toDF("a", "b")
checkAnswer(
df.selectExpr("map_keys(a)"),
Seq(Row(Seq(1, 2)), Row(Seq.empty), Row(Seq(1, 2, 3)))
)
checkAnswer(
df.selectExpr("map_values(a)"),
Seq(Row(Seq(100, 200)), Row(Seq.empty), Row(Seq(100, 200, 300)))
)
}
test("map_entries") {
// Primitive-type elements
val idf = Seq(
Map[Int, Int](1 -> 100, 2 -> 200, 3 -> 300),
Map[Int, Int](),
null
).toDF("m")
val iExpected = Seq(
Row(Seq(Row(1, 100), Row(2, 200), Row(3, 300))),
Row(Seq.empty),
Row(null)
)
def testPrimitiveType(): Unit = {
checkAnswer(idf.select(map_entries('m)), iExpected)
checkAnswer(idf.selectExpr("map_entries(m)"), iExpected)
checkAnswer(idf.selectExpr("map_entries(map(1, null, 2, null))"),
Seq.fill(iExpected.length)(Row(Seq(Row(1, null), Row(2, null)))))
}
// Test with local relation, the Project will be evaluated without codegen
testPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
idf.cache()
testPrimitiveType()
// Non-primitive-type elements
val sdf = Seq(
Map[String, String]("a" -> "f", "b" -> "o", "c" -> "o"),
Map[String, String]("a" -> null, "b" -> null),
Map[String, String](),
null
).toDF("m")
val sExpected = Seq(
Row(Seq(Row("a", "f"), Row("b", "o"), Row("c", "o"))),
Row(Seq(Row("a", null), Row("b", null))),
Row(Seq.empty),
Row(null)
)
def testNonPrimitiveType(): Unit = {
checkAnswer(sdf.select(map_entries('m)), sExpected)
checkAnswer(sdf.selectExpr("map_entries(m)"), sExpected)
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
sdf.cache()
testNonPrimitiveType()
}
test("map_concat function") {
val df1 = Seq(
(Map[Int, Int](1 -> 100, 2 -> 200), Map[Int, Int](3 -> 300, 4 -> 400)),
(Map[Int, Int](1 -> 100, 2 -> 200), Map[Int, Int](3 -> 300, 1 -> 400)),
(null, Map[Int, Int](3 -> 300, 4 -> 400))
).toDF("map1", "map2")
val expected1a = Seq(
Row(Map(1 -> 100, 2 -> 200, 3 -> 300, 4 -> 400)),
Row(Map(1 -> 400, 2 -> 200, 3 -> 300)),
Row(null)
)
checkAnswer(df1.selectExpr("map_concat(map1, map2)"), expected1a)
checkAnswer(df1.select(map_concat('map1, 'map2)), expected1a)
val expected1b = Seq(
Row(Map(1 -> 100, 2 -> 200)),
Row(Map(1 -> 100, 2 -> 200)),
Row(null)
)
checkAnswer(df1.selectExpr("map_concat(map1)"), expected1b)
checkAnswer(df1.select(map_concat('map1)), expected1b)
val df2 = Seq(
(
Map[Array[Int], Int](Array(1) -> 100, Array(2) -> 200),
Map[String, Int]("3" -> 300, "4" -> 400)
)
).toDF("map1", "map2")
val expected2 = Seq(Row(Map()))
checkAnswer(df2.selectExpr("map_concat()"), expected2)
checkAnswer(df2.select(map_concat()), expected2)
val df3 = {
val schema = StructType(
StructField("map1", MapType(StringType, IntegerType, true), false) ::
StructField("map2", MapType(StringType, IntegerType, false), false) :: Nil
)
val data = Seq(
Row(Map[String, Any]("a" -> 1, "b" -> null), Map[String, Any]("c" -> 3, "d" -> 4)),
Row(Map[String, Any]("a" -> 1, "b" -> 2), Map[String, Any]("c" -> 3, "d" -> 4))
)
spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
}
val expected3 = Seq(
Row(Map[String, Any]("a" -> 1, "b" -> null, "c" -> 3, "d" -> 4)),
Row(Map[String, Any]("a" -> 1, "b" -> 2, "c" -> 3, "d" -> 4))
)
checkAnswer(df3.selectExpr("map_concat(map1, map2)"), expected3)
checkAnswer(df3.select(map_concat('map1, 'map2)), expected3)
val expectedMessage1 = "input to function map_concat should all be the same type"
assert(intercept[AnalysisException] {
df2.selectExpr("map_concat(map1, map2)").collect()
}.getMessage().contains(expectedMessage1))
assert(intercept[AnalysisException] {
df2.select(map_concat('map1, 'map2)).collect()
}.getMessage().contains(expectedMessage1))
val expectedMessage2 = "input to function map_concat should all be of type map"
assert(intercept[AnalysisException] {
df2.selectExpr("map_concat(map1, 12)").collect()
}.getMessage().contains(expectedMessage2))
assert(intercept[AnalysisException] {
df2.select(map_concat('map1, lit(12))).collect()
}.getMessage().contains(expectedMessage2))
}
test("map_from_entries function") {
// Test cases with primitive-type keys and values
val idf = Seq(
Seq((1, 10), (2, 20), (3, 10)),
Seq((1, 10), null, (2, 20)),
Seq.empty,
null
).toDF("a")
val iExpected = Seq(
Row(Map(1 -> 10, 2 -> 20, 3 -> 10)),
Row(null),
Row(Map.empty),
Row(null))
def testPrimitiveType(): Unit = {
checkAnswer(idf.select(map_from_entries('a)), iExpected)
checkAnswer(idf.selectExpr("map_from_entries(a)"), iExpected)
checkAnswer(idf.selectExpr("map_from_entries(array(struct(1, null), struct(2, null)))"),
Seq.fill(iExpected.length)(Row(Map(1 -> null, 2 -> null))))
}
// Test with local relation, the Project will be evaluated without codegen
testPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
idf.cache()
testPrimitiveType()
// Test cases with non-primitive-type keys and values
val sdf = Seq(
Seq(("a", "aa"), ("b", "bb"), ("c", "aa")),
Seq(("a", "aa"), null, ("b", "bb")),
Seq(("a", null), ("b", null)),
Seq.empty,
null
).toDF("a")
val sExpected = Seq(
Row(Map("a" -> "aa", "b" -> "bb", "c" -> "aa")),
Row(null),
Row(Map("a" -> null, "b" -> null)),
Row(Map.empty),
Row(null))
def testNonPrimitiveType(): Unit = {
checkAnswer(sdf.select(map_from_entries('a)), sExpected)
checkAnswer(sdf.selectExpr("map_from_entries(a)"), sExpected)
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
sdf.cache()
testNonPrimitiveType()
}
test("array contains function") {
val df = Seq(
(Seq[Int](1, 2), "x", 1),
(Seq[Int](), "x", 1)
).toDF("a", "b", "c")
// Simple test cases
checkAnswer(
df.select(array_contains(df("a"), 1)),
Seq(Row(true), Row(false))
)
checkAnswer(
df.selectExpr("array_contains(a, 1)"),
Seq(Row(true), Row(false))
)
checkAnswer(
df.select(array_contains(df("a"), df("c"))),
Seq(Row(true), Row(false))
)
checkAnswer(
df.selectExpr("array_contains(a, c)"),
Seq(Row(true), Row(false))
)
// In hive, this errors because null has no type information
intercept[AnalysisException] {
df.select(array_contains(df("a"), null))
}
intercept[AnalysisException] {
df.selectExpr("array_contains(a, null)")
}
intercept[AnalysisException] {
df.selectExpr("array_contains(null, 1)")
}
checkAnswer(
df.selectExpr("array_contains(array(array(1), null)[0], 1)"),
Seq(Row(true), Row(true))
)
checkAnswer(
df.selectExpr("array_contains(array(1, null), array(1, null)[0])"),
Seq(Row(true), Row(true))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(1), 1.23D)"),
Seq(Row(false))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(1), 1.0D)"),
Seq(Row(true))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(1.0D), 1)"),
Seq(Row(true))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(1.23D), 1)"),
Seq(Row(false))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(array(1)), array(1.0D))"),
Seq(Row(true))
)
checkAnswer(
OneRowRelation().selectExpr("array_contains(array(array(1)), array(1.23D))"),
Seq(Row(false))
)
val e1 = intercept[AnalysisException] {
OneRowRelation().selectExpr("array_contains(array(1), .01234567890123456790123456780)")
}
val errorMsg1 =
s"""
|Input to function array_contains should have been array followed by a
|value with same element type, but it's [array<int>, decimal(29,29)].
""".stripMargin.replace("\\n", " ").trim()
assert(e1.message.contains(errorMsg1))
val e2 = intercept[AnalysisException] {
OneRowRelation().selectExpr("array_contains(array(1), 'foo')")
}
val errorMsg2 =
s"""
|Input to function array_contains should have been array followed by a
|value with same element type, but it's [array<int>, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e2.message.contains(errorMsg2))
}
test("arrays_overlap function") {
val df = Seq(
(Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), Some(10))),
(Seq[Option[Int]](Some(1), Some(2)), Seq[Option[Int]](Some(-1), None)),
(Seq[Option[Int]](Some(3), Some(2)), Seq[Option[Int]](Some(1), Some(2)))
).toDF("a", "b")
val answer = Seq(Row(false), Row(null), Row(true))
checkAnswer(df.select(arrays_overlap(df("a"), df("b"))), answer)
checkAnswer(df.selectExpr("arrays_overlap(a, b)"), answer)
checkAnswer(
Seq((Seq(1, 2, 3), Seq(2.0, 2.5))).toDF("a", "b").selectExpr("arrays_overlap(a, b)"),
Row(true))
intercept[AnalysisException] {
sql("select arrays_overlap(array(1, 2, 3), array('a', 'b', 'c'))")
}
intercept[AnalysisException] {
sql("select arrays_overlap(null, null)")
}
intercept[AnalysisException] {
sql("select arrays_overlap(map(1, 2), map(3, 4))")
}
}
test("slice function") {
val df = Seq(
Seq(1, 2, 3),
Seq(4, 5)
).toDF("x")
val answer = Seq(Row(Seq(2, 3)), Row(Seq(5)))
checkAnswer(df.select(slice(df("x"), 2, 2)), answer)
checkAnswer(df.selectExpr("slice(x, 2, 2)"), answer)
val answerNegative = Seq(Row(Seq(3)), Row(Seq(5)))
checkAnswer(df.select(slice(df("x"), -1, 1)), answerNegative)
checkAnswer(df.selectExpr("slice(x, -1, 1)"), answerNegative)
}
test("array_join function") {
val df = Seq(
(Seq[String]("a", "b"), ","),
(Seq[String]("a", null, "b"), ","),
(Seq.empty[String], ",")
).toDF("x", "delimiter")
checkAnswer(
df.select(array_join(df("x"), ";")),
Seq(Row("a;b"), Row("a;b"), Row(""))
)
checkAnswer(
df.select(array_join(df("x"), ";", "NULL")),
Seq(Row("a;b"), Row("a;NULL;b"), Row(""))
)
checkAnswer(
df.selectExpr("array_join(x, delimiter)"),
Seq(Row("a,b"), Row("a,b"), Row("")))
checkAnswer(
df.selectExpr("array_join(x, delimiter, 'NULL')"),
Seq(Row("a,b"), Row("a,NULL,b"), Row("")))
val idf = Seq(Seq(1, 2, 3)).toDF("x")
checkAnswer(
idf.select(array_join(idf("x"), ", ")),
Seq(Row("1, 2, 3"))
)
checkAnswer(
idf.selectExpr("array_join(x, ', ')"),
Seq(Row("1, 2, 3"))
)
intercept[AnalysisException] {
idf.selectExpr("array_join(x, 1)")
}
intercept[AnalysisException] {
idf.selectExpr("array_join(x, ', ', 1)")
}
}
test("array_min function") {
val df = Seq(
Seq[Option[Int]](Some(1), Some(3), Some(2)),
Seq.empty[Option[Int]],
Seq[Option[Int]](None),
Seq[Option[Int]](None, Some(1), Some(-100))
).toDF("a")
val answer = Seq(Row(1), Row(null), Row(null), Row(-100))
checkAnswer(df.select(array_min(df("a"))), answer)
checkAnswer(df.selectExpr("array_min(a)"), answer)
}
test("array_max function") {
val df = Seq(
Seq[Option[Int]](Some(1), Some(3), Some(2)),
Seq.empty[Option[Int]],
Seq[Option[Int]](None),
Seq[Option[Int]](None, Some(1), Some(-100))
).toDF("a")
val answer = Seq(Row(3), Row(null), Row(null), Row(1))
checkAnswer(df.select(array_max(df("a"))), answer)
checkAnswer(df.selectExpr("array_max(a)"), answer)
}
test("sequence") {
checkAnswer(Seq((-2, 2)).toDF().select(sequence('_1, '_2)), Seq(Row(Array(-2, -1, 0, 1, 2))))
checkAnswer(Seq((7, 2, -2)).toDF().select(sequence('_1, '_2, '_3)), Seq(Row(Array(7, 5, 3))))
checkAnswer(
spark.sql("select sequence(" +
" cast('2018-01-01 00:00:00' as timestamp)" +
", cast('2018-01-02 00:00:00' as timestamp)" +
", interval 12 hours)"),
Seq(Row(Array(
Timestamp.valueOf("2018-01-01 00:00:00"),
Timestamp.valueOf("2018-01-01 12:00:00"),
Timestamp.valueOf("2018-01-02 00:00:00")))))
DateTimeTestUtils.withDefaultTimeZone(TimeZone.getTimeZone("UTC")) {
checkAnswer(
spark.sql("select sequence(" +
" cast('2018-01-01' as date)" +
", cast('2018-03-01' as date)" +
", interval 1 month)"),
Seq(Row(Array(
Date.valueOf("2018-01-01"),
Date.valueOf("2018-02-01"),
Date.valueOf("2018-03-01")))))
}
// test type coercion
checkAnswer(
Seq((1.toByte, 3L, 1)).toDF().select(sequence('_1, '_2, '_3)),
Seq(Row(Array(1L, 2L, 3L))))
checkAnswer(
spark.sql("select sequence(" +
" cast('2018-01-01' as date)" +
", cast('2018-01-02 00:00:00' as timestamp)" +
", interval 12 hours)"),
Seq(Row(Array(
Timestamp.valueOf("2018-01-01 00:00:00"),
Timestamp.valueOf("2018-01-01 12:00:00"),
Timestamp.valueOf("2018-01-02 00:00:00")))))
// test invalid data types
intercept[AnalysisException] {
Seq((true, false)).toDF().selectExpr("sequence(_1, _2)")
}
intercept[AnalysisException] {
Seq((true, false, 42)).toDF().selectExpr("sequence(_1, _2, _3)")
}
intercept[AnalysisException] {
Seq((1, 2, 0.5)).toDF().selectExpr("sequence(_1, _2, _3)")
}
}
test("reverse function - string") {
val oneRowDF = Seq(("Spark", 3215)).toDF("s", "i")
def testString(): Unit = {
checkAnswer(oneRowDF.select(reverse('s)), Seq(Row("krapS")))
checkAnswer(oneRowDF.selectExpr("reverse(s)"), Seq(Row("krapS")))
checkAnswer(oneRowDF.select(reverse('i)), Seq(Row("5123")))
checkAnswer(oneRowDF.selectExpr("reverse(i)"), Seq(Row("5123")))
checkAnswer(oneRowDF.selectExpr("reverse(null)"), Seq(Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testString()
// Test with cached relation, the Project will be evaluated with codegen
oneRowDF.cache()
testString()
}
test("reverse function - array for primitive type not containing null") {
val idfNotContainsNull = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkAnswer(
idfNotContainsNull.select(reverse('i)),
Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null))
)
checkAnswer(
idfNotContainsNull.selectExpr("reverse(i)"),
Seq(Row(Seq(7, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5)), Row(Seq.empty), Row(null))
)
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
idfNotContainsNull.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("reverse function - array for primitive type containing null") {
val idfContainsNull = Seq[Seq[Integer]](
Seq(1, 9, 8, null, 7),
Seq(null, 5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkAnswer(
idfContainsNull.select(reverse('i)),
Seq(Row(Seq(7, null, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5, null)), Row(Seq.empty), Row(null))
)
checkAnswer(
idfContainsNull.selectExpr("reverse(i)"),
Seq(Row(Seq(7, null, 8, 9, 1)), Row(Seq(2, 7, 9, 8, 5, null)), Row(Seq.empty), Row(null))
)
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
idfContainsNull.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("reverse function - array for non-primitive type") {
val sdf = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
def testArrayOfNonPrimitiveType(): Unit = {
checkAnswer(
sdf.select(reverse('s)),
Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null))
)
checkAnswer(
sdf.selectExpr("reverse(s)"),
Seq(Row(Seq("b", "a", "c")), Row(Seq(null, "c", null, "b")), Row(Seq.empty), Row(null))
)
checkAnswer(
sdf.selectExpr("reverse(array(array(1, 2), array(3, 4)))"),
Seq.fill(sdf.count().toInt)(Row(Seq(Seq(3, 4), Seq(1, 2))))
)
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
sdf.cache()
testArrayOfNonPrimitiveType()
}
test("reverse function - data type mismatch") {
val ex1 = intercept[AnalysisException] {
sql("select reverse(struct(1, 'a'))")
}
assert(ex1.getMessage.contains("data type mismatch"))
val ex2 = intercept[AnalysisException] {
sql("select reverse(map(1, 'a'))")
}
assert(ex2.getMessage.contains("data type mismatch"))
}
test("array position function") {
val df = Seq(
(Seq[Int](1, 2), "x", 1),
(Seq[Int](), "x", 1)
).toDF("a", "b", "c")
checkAnswer(
df.select(array_position(df("a"), 1)),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.selectExpr("array_position(a, 1)"),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.selectExpr("array_position(a, c)"),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.select(array_position(df("a"), df("c"))),
Seq(Row(1L), Row(0L))
)
checkAnswer(
df.select(array_position(df("a"), null)),
Seq(Row(null), Row(null))
)
checkAnswer(
df.selectExpr("array_position(a, null)"),
Seq(Row(null), Row(null))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(1), 1.23D)"),
Seq(Row(0L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(1), 1.0D)"),
Seq(Row(1L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(1.D), 1)"),
Seq(Row(1L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(1.23D), 1)"),
Seq(Row(0L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(array(1)), array(1.0D))"),
Seq(Row(1L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(array(1)), array(1.23D))"),
Seq(Row(0L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(array(1), null)[0], 1)"),
Seq(Row(1L))
)
checkAnswer(
OneRowRelation().selectExpr("array_position(array(1, null), array(1, null)[0])"),
Seq(Row(1L))
)
val e1 = intercept[AnalysisException] {
Seq(("a string element", "a")).toDF().selectExpr("array_position(_1, _2)")
}
val errorMsg1 =
s"""
|Input to function array_position should have been array followed by a
|value with same element type, but it's [string, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e1.message.contains(errorMsg1))
val e2 = intercept[AnalysisException] {
OneRowRelation().selectExpr("array_position(array(1), '1')")
}
val errorMsg2 =
s"""
|Input to function array_position should have been array followed by a
|value with same element type, but it's [array<int>, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e2.message.contains(errorMsg2))
}
test("element_at function") {
val df = Seq(
(Seq[String]("1", "2", "3"), 1),
(Seq[String](null, ""), -1),
(Seq[String](), 2)
).toDF("a", "b")
intercept[Exception] {
checkAnswer(
df.select(element_at(df("a"), 0)),
Seq(Row(null), Row(null), Row(null))
)
}.getMessage.contains("SQL array indices start at 1")
intercept[Exception] {
checkAnswer(
df.select(element_at(df("a"), 1.1)),
Seq(Row(null), Row(null), Row(null))
)
}
checkAnswer(
df.select(element_at(df("a"), 4)),
Seq(Row(null), Row(null), Row(null))
)
checkAnswer(
df.select(element_at(df("a"), df("b"))),
Seq(Row("1"), Row(""), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, b)"),
Seq(Row("1"), Row(""), Row(null))
)
checkAnswer(
df.select(element_at(df("a"), 1)),
Seq(Row("1"), Row(null), Row(null))
)
checkAnswer(
df.select(element_at(df("a"), -1)),
Seq(Row("3"), Row(""), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, 4)"),
Seq(Row(null), Row(null), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, 1)"),
Seq(Row("1"), Row(null), Row(null))
)
checkAnswer(
df.selectExpr("element_at(a, -1)"),
Seq(Row("3"), Row(""), Row(null))
)
val e1 = intercept[AnalysisException] {
Seq(("a string element", 1)).toDF().selectExpr("element_at(_1, _2)")
}
val errorMsg1 =
s"""
|The first argument to function element_at should have been array or map type, but
|its string type.
""".stripMargin.replace("\\n", " ").trim()
assert(e1.message.contains(errorMsg1))
checkAnswer(
OneRowRelation().selectExpr("element_at(array(2, 1), 2S)"),
Seq(Row(1))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(array('a', 'b'), 1Y)"),
Seq(Row("a"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(array(1, 2, 3), 3)"),
Seq(Row(3))
)
val e2 = intercept[AnalysisException] {
OneRowRelation().selectExpr("element_at(array('a', 'b'), 1L)")
}
val errorMsg2 =
s"""
|Input to function element_at should have been array followed by a int, but it's
|[array<string>, bigint].
""".stripMargin.replace("\\n", " ").trim()
assert(e2.message.contains(errorMsg2))
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2Y)"),
Seq(Row("b"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1S)"),
Seq(Row("a"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2)"),
Seq(Row("b"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 2L)"),
Seq(Row("b"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1.0D)"),
Seq(Row("a"))
)
checkAnswer(
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), 1.23D)"),
Seq(Row(null))
)
val e3 = intercept[AnalysisException] {
OneRowRelation().selectExpr("element_at(map(1, 'a', 2, 'b'), '1')")
}
val errorMsg3 =
s"""
|Input to function element_at should have been map followed by a value of same
|key type, but it's [map<int,string>, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e3.message.contains(errorMsg3))
}
test("array_union functions") {
val df1 = Seq((Array(1, 2, 3), Array(4, 2))).toDF("a", "b")
val ans1 = Row(Seq(1, 2, 3, 4))
checkAnswer(df1.select(array_union($"a", $"b")), ans1)
checkAnswer(df1.selectExpr("array_union(a, b)"), ans1)
val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array(-5, 4, -3, 2, -1))).toDF("a", "b")
val ans2 = Row(Seq(1, 2, null, 4, 5, -5, -3, -1))
checkAnswer(df2.select(array_union($"a", $"b")), ans2)
checkAnswer(df2.selectExpr("array_union(a, b)"), ans2)
val df3 = Seq((Array(1L, 2L, 3L), Array(4L, 2L))).toDF("a", "b")
val ans3 = Row(Seq(1L, 2L, 3L, 4L))
checkAnswer(df3.select(array_union($"a", $"b")), ans3)
checkAnswer(df3.selectExpr("array_union(a, b)"), ans3)
val df4 = Seq((Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array(-5L, 4L, -3L, 2L, -1L)))
.toDF("a", "b")
val ans4 = Row(Seq(1L, 2L, null, 4L, 5L, -5L, -3L, -1L))
checkAnswer(df4.select(array_union($"a", $"b")), ans4)
checkAnswer(df4.selectExpr("array_union(a, b)"), ans4)
val df5 = Seq((Array("b", "a", "c"), Array("b", null, "a", "g"))).toDF("a", "b")
val ans5 = Row(Seq("b", "a", "c", null, "g"))
checkAnswer(df5.select(array_union($"a", $"b")), ans5)
checkAnswer(df5.selectExpr("array_union(a, b)"), ans5)
val df6 = Seq((null, Array("a"))).toDF("a", "b")
assert(intercept[AnalysisException] {
df6.select(array_union($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df6.selectExpr("array_union(a, b)")
}.getMessage.contains("data type mismatch"))
val df7 = Seq((null, null)).toDF("a", "b")
assert(intercept[AnalysisException] {
df7.select(array_union($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df7.selectExpr("array_union(a, b)")
}.getMessage.contains("data type mismatch"))
val df8 = Seq((Array(Array(1)), Array("a"))).toDF("a", "b")
assert(intercept[AnalysisException] {
df8.select(array_union($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df8.selectExpr("array_union(a, b)")
}.getMessage.contains("data type mismatch"))
}
test("concat function - arrays") {
val nseqi : Seq[Int] = null
val nseqs : Seq[String] = null
val df = Seq(
(Seq(1), Seq(2, 3), Seq(5L, 6L), nseqi, Seq("a", "b", "c"), Seq("d", "e"), Seq("f"), nseqs),
(Seq(1, 0), Seq.empty[Int], Seq(2L), nseqi, Seq("a"), Seq.empty[String], Seq(null), nseqs)
).toDF("i1", "i2", "i3", "in", "s1", "s2", "s3", "sn")
// Simple test cases
def simpleTest(): Unit = {
checkAnswer (
df.select(concat($"i1", $"s1")),
Seq(Row(Seq("1", "a", "b", "c")), Row(Seq("1", "0", "a")))
)
checkAnswer(
df.select(concat($"i1", $"i2", $"i3")),
Seq(Row(Seq(1, 2, 3, 5, 6)), Row(Seq(1, 0, 2)))
)
checkAnswer(
df.selectExpr("concat(array(1, null), i2, i3)"),
Seq(Row(Seq(1, null, 2, 3, 5, 6)), Row(Seq(1, null, 2)))
)
checkAnswer(
df.select(concat($"s1", $"s2", $"s3")),
Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null)))
)
checkAnswer(
df.selectExpr("concat(s1, s2, s3)"),
Seq(Row(Seq("a", "b", "c", "d", "e", "f")), Row(Seq("a", null)))
)
}
// Test with local relation, the Project will be evaluated without codegen
simpleTest()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
simpleTest()
// Null test cases
def nullTest(): Unit = {
checkAnswer(
df.select(concat($"i1", $"in")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"in", $"i1")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"s1", $"sn")),
Seq(Row(null), Row(null))
)
checkAnswer(
df.select(concat($"sn", $"s1")),
Seq(Row(null), Row(null))
)
}
// Test with local relation, the Project will be evaluated without codegen
df.unpersist(blocking = true)
nullTest()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
nullTest()
// Type error test cases
intercept[AnalysisException] {
df.selectExpr("concat(i1, i2, null)")
}
intercept[AnalysisException] {
df.selectExpr("concat(i1, array(i1, i2))")
}
val e = intercept[AnalysisException] {
df.selectExpr("concat(map(1, 2), map(3, 4))")
}
assert(e.getMessage.contains("string, binary or array"))
}
test("flatten function") {
// Test cases with a primitive type
val intDF = Seq(
(Seq(Seq(1, 2, 3), Seq(4, 5), Seq(6))),
(Seq(Seq(1, 2))),
(Seq(Seq(1), Seq.empty)),
(Seq(Seq.empty, Seq(1))),
(Seq(Seq.empty, Seq.empty)),
(Seq(Seq(1), null)),
(Seq(null, Seq(1))),
(Seq(null, null))
).toDF("i")
val intDFResult = Seq(
Row(Seq(1, 2, 3, 4, 5, 6)),
Row(Seq(1, 2)),
Row(Seq(1)),
Row(Seq(1)),
Row(Seq.empty),
Row(null),
Row(null),
Row(null))
def testInt(): Unit = {
checkAnswer(intDF.select(flatten($"i")), intDFResult)
checkAnswer(intDF.selectExpr("flatten(i)"), intDFResult)
}
// Test with local relation, the Project will be evaluated without codegen
testInt()
// Test with cached relation, the Project will be evaluated with codegen
intDF.cache()
testInt()
// Test cases with non-primitive types
val strDF = Seq(
(Seq(Seq("a", "b"), Seq("c"), Seq("d", "e", "f"))),
(Seq(Seq("a", "b"))),
(Seq(Seq("a", null), Seq(null, "b"), Seq(null, null))),
(Seq(Seq("a"), Seq.empty)),
(Seq(Seq.empty, Seq("a"))),
(Seq(Seq.empty, Seq.empty)),
(Seq(Seq("a"), null)),
(Seq(null, Seq("a"))),
(Seq(null, null))
).toDF("s")
val strDFResult = Seq(
Row(Seq("a", "b", "c", "d", "e", "f")),
Row(Seq("a", "b")),
Row(Seq("a", null, null, "b", null, null)),
Row(Seq("a")),
Row(Seq("a")),
Row(Seq.empty),
Row(null),
Row(null),
Row(null))
def testString(): Unit = {
checkAnswer(strDF.select(flatten($"s")), strDFResult)
checkAnswer(strDF.selectExpr("flatten(s)"), strDFResult)
}
// Test with local relation, the Project will be evaluated without codegen
testString()
// Test with cached relation, the Project will be evaluated with codegen
strDF.cache()
testString()
val arrDF = Seq((1, "a", Seq(1, 2, 3))).toDF("i", "s", "arr")
def testArray(): Unit = {
checkAnswer(
arrDF.selectExpr("flatten(array(arr, array(null, 5), array(6, null)))"),
Seq(Row(Seq(1, 2, 3, null, 5, 6, null))))
checkAnswer(
arrDF.selectExpr("flatten(array(array(arr, arr), array(arr)))"),
Seq(Row(Seq(Seq(1, 2, 3), Seq(1, 2, 3), Seq(1, 2, 3)))))
}
// Test with local relation, the Project will be evaluated without codegen
testArray()
// Test with cached relation, the Project will be evaluated with codegen
arrDF.cache()
testArray()
// Error test cases
val oneRowDF = Seq((1, "a", Seq(1, 2, 3))).toDF("i", "s", "arr")
intercept[AnalysisException] {
oneRowDF.select(flatten($"arr"))
}
intercept[AnalysisException] {
oneRowDF.select(flatten($"i"))
}
intercept[AnalysisException] {
oneRowDF.select(flatten($"s"))
}
intercept[AnalysisException] {
oneRowDF.selectExpr("flatten(null)")
}
}
test("array_repeat function") {
val strDF = Seq(
("hi", 2),
(null, 2)
).toDF("a", "b")
val strDFTwiceResult = Seq(
Row(Seq("hi", "hi")),
Row(Seq(null, null))
)
def testString(): Unit = {
checkAnswer(strDF.select(array_repeat($"a", 2)), strDFTwiceResult)
checkAnswer(strDF.select(array_repeat($"a", $"b")), strDFTwiceResult)
checkAnswer(strDF.selectExpr("array_repeat(a, 2)"), strDFTwiceResult)
checkAnswer(strDF.selectExpr("array_repeat(a, b)"), strDFTwiceResult)
}
// Test with local relation, the Project will be evaluated without codegen
testString()
// Test with cached relation, the Project will be evaluated with codegen
strDF.cache()
testString()
val intDF = {
val schema = StructType(Seq(
StructField("a", IntegerType),
StructField("b", IntegerType)))
val data = Seq(
Row(3, 2),
Row(null, 2)
)
spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
}
val intDFTwiceResult = Seq(
Row(Seq(3, 3)),
Row(Seq(null, null))
)
def testInt(): Unit = {
checkAnswer(intDF.select(array_repeat($"a", 2)), intDFTwiceResult)
checkAnswer(intDF.select(array_repeat($"a", $"b")), intDFTwiceResult)
checkAnswer(intDF.selectExpr("array_repeat(a, 2)"), intDFTwiceResult)
checkAnswer(intDF.selectExpr("array_repeat(a, b)"), intDFTwiceResult)
}
// Test with local relation, the Project will be evaluated without codegen
testInt()
// Test with cached relation, the Project will be evaluated with codegen
intDF.cache()
testInt()
val nullCountDF = {
val schema = StructType(Seq(
StructField("a", StringType),
StructField("b", IntegerType)))
val data = Seq(
Row("hi", null),
Row(null, null)
)
spark.createDataFrame(spark.sparkContext.parallelize(data), schema)
}
def testNull(): Unit = {
checkAnswer(
nullCountDF.select(array_repeat($"a", $"b")),
Seq(Row(null), Row(null))
)
}
// Test with local relation, the Project will be evaluated without codegen
testNull()
// Test with cached relation, the Project will be evaluated with codegen
nullCountDF.cache()
testNull()
// Error test cases
val invalidTypeDF = Seq(("hi", "1")).toDF("a", "b")
intercept[AnalysisException] {
invalidTypeDF.select(array_repeat($"a", $"b"))
}
intercept[AnalysisException] {
invalidTypeDF.select(array_repeat($"a", lit("1")))
}
intercept[AnalysisException] {
invalidTypeDF.selectExpr("array_repeat(a, 1.0)")
}
}
test("array remove") {
val df = Seq(
(Array[Int](2, 1, 2, 3), Array("a", "b", "c", "a"), Array("", ""), 2),
(Array.empty[Int], Array.empty[String], Array.empty[String], 2),
(null, null, null, 2)
).toDF("a", "b", "c", "d")
checkAnswer(
df.select(array_remove($"a", 2), array_remove($"b", "a"), array_remove($"c", "")),
Seq(
Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]),
Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]),
Row(null, null, null))
)
checkAnswer(
df.select(array_remove($"a", $"d")),
Seq(
Row(Seq(1, 3)),
Row(Seq.empty[Int]),
Row(null))
)
checkAnswer(
df.selectExpr("array_remove(a, d)"),
Seq(
Row(Seq(1, 3)),
Row(Seq.empty[Int]),
Row(null))
)
checkAnswer(
OneRowRelation().selectExpr("array_remove(array(1, 2), 1.23D)"),
Seq(
Row(Seq(1.0, 2.0))
)
)
checkAnswer(
OneRowRelation().selectExpr("array_remove(array(1, 2), 1.0D)"),
Seq(
Row(Seq(2.0))
)
)
checkAnswer(
OneRowRelation().selectExpr("array_remove(array(1.0D, 2.0D), 2)"),
Seq(
Row(Seq(1.0))
)
)
checkAnswer(
OneRowRelation().selectExpr("array_remove(array(1.1D, 1.2D), 1)"),
Seq(
Row(Seq(1.1, 1.2))
)
)
checkAnswer(
df.selectExpr("array_remove(a, 2)", "array_remove(b, \\"a\\")",
"array_remove(c, \\"\\")"),
Seq(
Row(Seq(1, 3), Seq("b", "c"), Seq.empty[String]),
Row(Seq.empty[Int], Seq.empty[String], Seq.empty[String]),
Row(null, null, null))
)
val e1 = intercept[AnalysisException] {
Seq(("a string element", "a")).toDF().selectExpr("array_remove(_1, _2)")
}
val errorMsg1 =
s"""
|Input to function array_remove should have been array followed by a
|value with same element type, but it's [string, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e1.message.contains(errorMsg1))
val e2 = intercept[AnalysisException] {
OneRowRelation().selectExpr("array_remove(array(1, 2), '1')")
}
val errorMsg2 =
s"""
|Input to function array_remove should have been array followed by a
|value with same element type, but it's [array<int>, string].
""".stripMargin.replace("\\n", " ").trim()
assert(e2.message.contains(errorMsg2))
}
test("array_distinct functions") {
val df = Seq(
(Array[Int](2, 1, 3, 4, 3, 5), Array("b", "c", "a", "c", "b", "", "")),
(Array.empty[Int], Array.empty[String]),
(null, null)
).toDF("a", "b")
checkAnswer(
df.select(array_distinct($"a"), array_distinct($"b")),
Seq(
Row(Seq(2, 1, 3, 4, 5), Seq("b", "c", "a", "")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
checkAnswer(
df.selectExpr("array_distinct(a)", "array_distinct(b)"),
Seq(
Row(Seq(2, 1, 3, 4, 5), Seq("b", "c", "a", "")),
Row(Seq.empty[Int], Seq.empty[String]),
Row(null, null))
)
}
// Shuffle expressions should produce same results at retries in the same DataFrame.
private def checkShuffleResult(df: DataFrame): Unit = {
checkAnswer(df, df.collect())
}
test("shuffle function - array for primitive type not containing null") {
val idfNotContainsNull = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkShuffleResult(idfNotContainsNull.select(shuffle('i)))
checkShuffleResult(idfNotContainsNull.selectExpr("shuffle(i)"))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
idfNotContainsNull.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("shuffle function - array for primitive type containing null") {
val idfContainsNull = Seq[Seq[Integer]](
Seq(1, 9, 8, null, 7),
Seq(null, 5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkShuffleResult(idfContainsNull.select(shuffle('i)))
checkShuffleResult(idfContainsNull.selectExpr("shuffle(i)"))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
idfContainsNull.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("shuffle function - array for non-primitive type") {
val sdf = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
def testNonPrimitiveType(): Unit = {
checkShuffleResult(sdf.select(shuffle('s)))
checkShuffleResult(sdf.selectExpr("shuffle(s)"))
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
sdf.cache()
testNonPrimitiveType()
}
test("array_except functions") {
val df1 = Seq((Array(1, 2, 4), Array(4, 2))).toDF("a", "b")
val ans1 = Row(Seq(1))
checkAnswer(df1.select(array_except($"a", $"b")), ans1)
checkAnswer(df1.selectExpr("array_except(a, b)"), ans1)
val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array[Integer](-5, 4, null, 2, -1)))
.toDF("a", "b")
val ans2 = Row(Seq(1, 5))
checkAnswer(df2.select(array_except($"a", $"b")), ans2)
checkAnswer(df2.selectExpr("array_except(a, b)"), ans2)
val df3 = Seq((Array(1L, 2L, 4L), Array(4L, 2L))).toDF("a", "b")
val ans3 = Row(Seq(1L))
checkAnswer(df3.select(array_except($"a", $"b")), ans3)
checkAnswer(df3.selectExpr("array_except(a, b)"), ans3)
val df4 = Seq(
(Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array[java.lang.Long](-5L, 4L, null, 2L, -1L)))
.toDF("a", "b")
val ans4 = Row(Seq(1L, 5L))
checkAnswer(df4.select(array_except($"a", $"b")), ans4)
checkAnswer(df4.selectExpr("array_except(a, b)"), ans4)
val df5 = Seq((Array("c", null, "a", "f"), Array("b", null, "a", "g"))).toDF("a", "b")
val ans5 = Row(Seq("c", "f"))
checkAnswer(df5.select(array_except($"a", $"b")), ans5)
checkAnswer(df5.selectExpr("array_except(a, b)"), ans5)
val df6 = Seq((null, null)).toDF("a", "b")
intercept[AnalysisException] {
df6.select(array_except($"a", $"b"))
}
intercept[AnalysisException] {
df6.selectExpr("array_except(a, b)")
}
val df7 = Seq((Array(1), Array("a"))).toDF("a", "b")
intercept[AnalysisException] {
df7.select(array_except($"a", $"b"))
}
intercept[AnalysisException] {
df7.selectExpr("array_except(a, b)")
}
val df8 = Seq((Array("a"), null)).toDF("a", "b")
intercept[AnalysisException] {
df8.select(array_except($"a", $"b"))
}
intercept[AnalysisException] {
df8.selectExpr("array_except(a, b)")
}
val df9 = Seq((null, Array("a"))).toDF("a", "b")
intercept[AnalysisException] {
df9.select(array_except($"a", $"b"))
}
intercept[AnalysisException] {
df9.selectExpr("array_except(a, b)")
}
val df10 = Seq(
(Array[Integer](1, 2), Array[Integer](2)),
(Array[Integer](1, 2), Array[Integer](1, null)),
(Array[Integer](1, null, 3), Array[Integer](1, 2)),
(Array[Integer](1, null), Array[Integer](2, null))
).toDF("a", "b")
val result10 = df10.select(array_except($"a", $"b"))
val expectedType10 = ArrayType(IntegerType, containsNull = true)
assert(result10.first.schema(0).dataType === expectedType10)
}
test("array_intersect functions") {
val df1 = Seq((Array(1, 2, 4), Array(4, 2))).toDF("a", "b")
val ans1 = Row(Seq(2, 4))
checkAnswer(df1.select(array_intersect($"a", $"b")), ans1)
checkAnswer(df1.selectExpr("array_intersect(a, b)"), ans1)
val df2 = Seq((Array[Integer](1, 2, null, 4, 5), Array[Integer](-5, 4, null, 2, -1)))
.toDF("a", "b")
val ans2 = Row(Seq(2, null, 4))
checkAnswer(df2.select(array_intersect($"a", $"b")), ans2)
checkAnswer(df2.selectExpr("array_intersect(a, b)"), ans2)
val df3 = Seq((Array(1L, 2L, 4L), Array(4L, 2L))).toDF("a", "b")
val ans3 = Row(Seq(2L, 4L))
checkAnswer(df3.select(array_intersect($"a", $"b")), ans3)
checkAnswer(df3.selectExpr("array_intersect(a, b)"), ans3)
val df4 = Seq(
(Array[java.lang.Long](1L, 2L, null, 4L, 5L), Array[java.lang.Long](-5L, 4L, null, 2L, -1L)))
.toDF("a", "b")
val ans4 = Row(Seq(2L, null, 4L))
checkAnswer(df4.select(array_intersect($"a", $"b")), ans4)
checkAnswer(df4.selectExpr("array_intersect(a, b)"), ans4)
val df5 = Seq((Array("c", null, "a", "f"), Array("b", "a", null, "g"))).toDF("a", "b")
val ans5 = Row(Seq(null, "a"))
checkAnswer(df5.select(array_intersect($"a", $"b")), ans5)
checkAnswer(df5.selectExpr("array_intersect(a, b)"), ans5)
val df6 = Seq((null, null)).toDF("a", "b")
assert(intercept[AnalysisException] {
df6.select(array_intersect($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df6.selectExpr("array_intersect(a, b)")
}.getMessage.contains("data type mismatch"))
val df7 = Seq((Array(1), Array("a"))).toDF("a", "b")
assert(intercept[AnalysisException] {
df7.select(array_intersect($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df7.selectExpr("array_intersect(a, b)")
}.getMessage.contains("data type mismatch"))
val df8 = Seq((null, Array("a"))).toDF("a", "b")
assert(intercept[AnalysisException] {
df8.select(array_intersect($"a", $"b"))
}.getMessage.contains("data type mismatch"))
assert(intercept[AnalysisException] {
df8.selectExpr("array_intersect(a, b)")
}.getMessage.contains("data type mismatch"))
}
test("transform function - array for primitive type not containing null") {
val df = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkAnswer(df.selectExpr("transform(i, x -> x + 1)"),
Seq(
Row(Seq(2, 10, 9, 8)),
Row(Seq(6, 9, 10, 8, 3)),
Row(Seq.empty),
Row(null)))
checkAnswer(df.selectExpr("transform(i, (x, i) -> x + i)"),
Seq(
Row(Seq(1, 10, 10, 10)),
Row(Seq(5, 9, 11, 10, 6)),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("transform function - array for primitive type containing null") {
val df = Seq[Seq[Integer]](
Seq(1, 9, 8, null, 7),
Seq(5, null, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkAnswer(df.selectExpr("transform(i, x -> x + 1)"),
Seq(
Row(Seq(2, 10, 9, null, 8)),
Row(Seq(6, null, 9, 10, 8, 3)),
Row(Seq.empty),
Row(null)))
checkAnswer(df.selectExpr("transform(i, (x, i) -> x + i)"),
Seq(
Row(Seq(1, 10, 10, null, 11)),
Row(Seq(5, null, 10, 12, 11, 7)),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("transform function - array for non-primitive type") {
val df = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
def testNonPrimitiveType(): Unit = {
checkAnswer(df.selectExpr("transform(s, x -> concat(x, x))"),
Seq(
Row(Seq("cc", "aa", "bb")),
Row(Seq("bb", null, "cc", null)),
Row(Seq.empty),
Row(null)))
checkAnswer(df.selectExpr("transform(s, (x, i) -> concat(x, i))"),
Seq(
Row(Seq("c0", "a1", "b2")),
Row(Seq("b0", null, "c2", null)),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testNonPrimitiveType()
}
test("transform function - special cases") {
val df = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("arg")
def testSpecialCases(): Unit = {
checkAnswer(df.selectExpr("transform(arg, arg -> arg)"),
Seq(
Row(Seq("c", "a", "b")),
Row(Seq("b", null, "c", null)),
Row(Seq.empty),
Row(null)))
checkAnswer(df.selectExpr("transform(arg, arg)"),
Seq(
Row(Seq(Seq("c", "a", "b"), Seq("c", "a", "b"), Seq("c", "a", "b"))),
Row(Seq(
Seq("b", null, "c", null),
Seq("b", null, "c", null),
Seq("b", null, "c", null),
Seq("b", null, "c", null))),
Row(Seq.empty),
Row(null)))
checkAnswer(df.selectExpr("transform(arg, x -> concat(arg, array(x)))"),
Seq(
Row(Seq(Seq("c", "a", "b", "c"), Seq("c", "a", "b", "a"), Seq("c", "a", "b", "b"))),
Row(Seq(
Seq("b", null, "c", null, "b"),
Seq("b", null, "c", null, null),
Seq("b", null, "c", null, "c"),
Seq("b", null, "c", null, null))),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testSpecialCases()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testSpecialCases()
}
test("transform function - invalid") {
val df = Seq(
(Seq("c", "a", "b"), 1),
(Seq("b", null, "c", null), 2),
(Seq.empty, 3),
(null, 4)
).toDF("s", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("transform(s, (x, y, z) -> x + y + z)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '3' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("transform(i, x -> x)")
}
assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("transform(a, x -> x)")
}
assert(ex3.getMessage.contains("cannot resolve '`a`'"))
}
test("map_filter") {
val dfInts = Seq(
Map(1 -> 10, 2 -> 20, 3 -> 30),
Map(1 -> -1, 2 -> -2, 3 -> -3),
Map(1 -> 10, 2 -> 5, 3 -> -3)).toDF("m")
checkAnswer(dfInts.selectExpr(
"map_filter(m, (k, v) -> k * 10 = v)", "map_filter(m, (k, v) -> k = -v)"),
Seq(
Row(Map(1 -> 10, 2 -> 20, 3 -> 30), Map()),
Row(Map(), Map(1 -> -1, 2 -> -2, 3 -> -3)),
Row(Map(1 -> 10), Map(3 -> -3))))
val dfComplex = Seq(
Map(1 -> Seq(Some(1)), 2 -> Seq(Some(1), Some(2)), 3 -> Seq(Some(1), Some(2), Some(3))),
Map(1 -> null, 2 -> Seq(Some(-2), Some(-2)), 3 -> Seq[Option[Int]](None))).toDF("m")
checkAnswer(dfComplex.selectExpr(
"map_filter(m, (k, v) -> k = v[0])", "map_filter(m, (k, v) -> k = size(v))"),
Seq(
Row(Map(1 -> Seq(1)), Map(1 -> Seq(1), 2 -> Seq(1, 2), 3 -> Seq(1, 2, 3))),
Row(Map(), Map(2 -> Seq(-2, -2)))))
// Invalid use cases
val df = Seq(
(Map(1 -> "a"), 1),
(Map.empty[Int, String], 2),
(null, 3)
).toDF("s", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("map_filter(s, (x, y, z) -> x + y + z)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '3' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("map_filter(s, x -> x)")
}
assert(ex2.getMessage.contains("The number of lambda function arguments '1' does not match"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("map_filter(i, (k, v) -> k > v)")
}
assert(ex3.getMessage.contains("data type mismatch: argument 1 requires map type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("map_filter(a, (k, v) -> k > v)")
}
assert(ex4.getMessage.contains("cannot resolve '`a`'"))
}
test("filter function - array for primitive type not containing null") {
val df = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkAnswer(df.selectExpr("filter(i, x -> x % 2 == 0)"),
Seq(
Row(Seq(8)),
Row(Seq(8, 2)),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("filter function - array for primitive type containing null") {
val df = Seq[Seq[Integer]](
Seq(1, 9, 8, null, 7),
Seq(5, null, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkAnswer(df.selectExpr("filter(i, x -> x % 2 == 0)"),
Seq(
Row(Seq(8)),
Row(Seq(8, 2)),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("filter function - array for non-primitive type") {
val df = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
def testNonPrimitiveType(): Unit = {
checkAnswer(df.selectExpr("filter(s, x -> x is not null)"),
Seq(
Row(Seq("c", "a", "b")),
Row(Seq("b", "c")),
Row(Seq.empty),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testNonPrimitiveType()
}
test("filter function - invalid") {
val df = Seq(
(Seq("c", "a", "b"), 1),
(Seq("b", null, "c", null), 2),
(Seq.empty, 3),
(null, 4)
).toDF("s", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("filter(s, (x, y) -> x + y)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("filter(i, x -> x)")
}
assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("filter(s, x -> x)")
}
assert(ex3.getMessage.contains("data type mismatch: argument 2 requires boolean type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("filter(a, x -> x)")
}
assert(ex4.getMessage.contains("cannot resolve '`a`'"))
}
test("exists function - array for primitive type not containing null") {
val df = Seq(
Seq(1, 9, 8, 7),
Seq(5, 9, 7),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkAnswer(df.selectExpr("exists(i, x -> x % 2 == 0)"),
Seq(
Row(true),
Row(false),
Row(false),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("exists function - array for primitive type containing null") {
val df = Seq[Seq[Integer]](
Seq(1, 9, 8, null, 7),
Seq(5, null, null, 9, 7, null),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkAnswer(df.selectExpr("exists(i, x -> x % 2 == 0)"),
Seq(
Row(true),
Row(false),
Row(false),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("exists function - array for non-primitive type") {
val df = Seq(
Seq("c", "a", "b"),
Seq("b", null, "c", null),
Seq.empty,
null
).toDF("s")
def testNonPrimitiveType(): Unit = {
checkAnswer(df.selectExpr("exists(s, x -> x is null)"),
Seq(
Row(false),
Row(true),
Row(false),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testNonPrimitiveType()
}
test("exists function - invalid") {
val df = Seq(
(Seq("c", "a", "b"), 1),
(Seq("b", null, "c", null), 2),
(Seq.empty, 3),
(null, 4)
).toDF("s", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("exists(s, (x, y) -> x + y)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("exists(i, x -> x)")
}
assert(ex2.getMessage.contains("data type mismatch: argument 1 requires array type"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("exists(s, x -> x)")
}
assert(ex3.getMessage.contains("data type mismatch: argument 2 requires boolean type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("exists(a, x -> x)")
}
assert(ex4.getMessage.contains("cannot resolve '`a`'"))
}
test("aggregate function - array for primitive type not containing null") {
val df = Seq(
Seq(1, 9, 8, 7),
Seq(5, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeNotContainsNull(): Unit = {
checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x)"),
Seq(
Row(25),
Row(31),
Row(0),
Row(null)))
checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x, acc -> acc * 10)"),
Seq(
Row(250),
Row(310),
Row(0),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeNotContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeNotContainsNull()
}
test("aggregate function - array for primitive type containing null") {
val df = Seq[Seq[Integer]](
Seq(1, 9, 8, 7),
Seq(5, null, 8, 9, 7, 2),
Seq.empty,
null
).toDF("i")
def testArrayOfPrimitiveTypeContainsNull(): Unit = {
checkAnswer(df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x)"),
Seq(
Row(25),
Row(null),
Row(0),
Row(null)))
checkAnswer(
df.selectExpr("aggregate(i, 0, (acc, x) -> acc + x, acc -> coalesce(acc, 0) * 10)"),
Seq(
Row(250),
Row(0),
Row(0),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testArrayOfPrimitiveTypeContainsNull()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testArrayOfPrimitiveTypeContainsNull()
}
test("aggregate function - array for non-primitive type") {
val df = Seq(
(Seq("c", "a", "b"), "a"),
(Seq("b", null, "c", null), "b"),
(Seq.empty, "c"),
(null, "d")
).toDF("ss", "s")
def testNonPrimitiveType(): Unit = {
checkAnswer(df.selectExpr("aggregate(ss, s, (acc, x) -> concat(acc, x))"),
Seq(
Row("acab"),
Row(null),
Row("c"),
Row(null)))
checkAnswer(
df.selectExpr("aggregate(ss, s, (acc, x) -> concat(acc, x), acc -> coalesce(acc , ''))"),
Seq(
Row("acab"),
Row(""),
Row("c"),
Row(null)))
}
// Test with local relation, the Project will be evaluated without codegen
testNonPrimitiveType()
// Test with cached relation, the Project will be evaluated with codegen
df.cache()
testNonPrimitiveType()
}
test("aggregate function - invalid") {
val df = Seq(
(Seq("c", "a", "b"), 1),
(Seq("b", null, "c", null), 2),
(Seq.empty, 3),
(null, 4)
).toDF("s", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("aggregate(s, '', x -> x)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("aggregate(s, '', (acc, x) -> x, (acc, x) -> x)")
}
assert(ex2.getMessage.contains("The number of lambda function arguments '2' does not match"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("aggregate(i, 0, (acc, x) -> x)")
}
assert(ex3.getMessage.contains("data type mismatch: argument 1 requires array type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("aggregate(s, 0, (acc, x) -> x)")
}
assert(ex4.getMessage.contains("data type mismatch: argument 3 requires int type"))
val ex5 = intercept[AnalysisException] {
df.selectExpr("aggregate(a, 0, (acc, x) -> x)")
}
assert(ex5.getMessage.contains("cannot resolve '`a`'"))
}
test("map_zip_with function - map of primitive types") {
val df = Seq(
(Map(8 -> 6L, 3 -> 5L, 6 -> 2L), Map[Integer, Integer]((6, 4), (8, 2), (3, 2))),
(Map(10 -> 6L, 8 -> 3L), Map[Integer, Integer]((8, 4), (4, null))),
(Map.empty[Int, Long], Map[Integer, Integer]((5, 1))),
(Map(5 -> 1L), null)
).toDF("m1", "m2")
checkAnswer(df.selectExpr("map_zip_with(m1, m2, (k, v1, v2) -> k == v1 + v2)"),
Seq(
Row(Map(8 -> true, 3 -> false, 6 -> true)),
Row(Map(10 -> null, 8 -> false, 4 -> null)),
Row(Map(5 -> null)),
Row(null)))
}
test("map_zip_with function - map of non-primitive types") {
val df = Seq(
(Map("z" -> "a", "y" -> "b", "x" -> "c"), Map("x" -> "a", "z" -> "c")),
(Map("b" -> "a", "c" -> "d"), Map("c" -> "a", "b" -> null, "d" -> "k")),
(Map("a" -> "d"), Map.empty[String, String]),
(Map("a" -> "d"), null)
).toDF("m1", "m2")
checkAnswer(df.selectExpr("map_zip_with(m1, m2, (k, v1, v2) -> (v1, v2))"),
Seq(
Row(Map("z" -> Row("a", "c"), "y" -> Row("b", null), "x" -> Row("c", "a"))),
Row(Map("b" -> Row("a", null), "c" -> Row("d", "a"), "d" -> Row(null, "k"))),
Row(Map("a" -> Row("d", null))),
Row(null)))
}
test("map_zip_with function - invalid") {
val df = Seq(
(Map(1 -> 2), Map(1 -> "a"), Map("a" -> "b"), Map(Map(1 -> 2) -> 2), 1)
).toDF("mii", "mis", "mss", "mmi", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("map_zip_with(mii, mis, (x, y) -> x + y)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '2' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("map_zip_with(mis, mmi, (x, y, z) -> concat(x, y, z))")
}
assert(ex2.getMessage.contains("The input to function map_zip_with should have " +
"been two maps with compatible key types"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("map_zip_with(i, mis, (x, y, z) -> concat(x, y, z))")
}
assert(ex3.getMessage.contains("type mismatch: argument 1 requires map type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("map_zip_with(mis, i, (x, y, z) -> concat(x, y, z))")
}
assert(ex4.getMessage.contains("type mismatch: argument 2 requires map type"))
val ex5 = intercept[AnalysisException] {
df.selectExpr("map_zip_with(mmi, mmi, (x, y, z) -> x)")
}
assert(ex5.getMessage.contains("function map_zip_with does not support ordering on type map"))
}
test("transform keys function - primitive data types") {
val dfExample1 = Seq(
Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7)
).toDF("i")
val dfExample2 = Seq(
Map[Int, Double](1 -> 1.0, 2 -> 1.40, 3 -> 1.70)
).toDF("j")
val dfExample3 = Seq(
Map[Int, Boolean](25 -> true, 26 -> false)
).toDF("x")
val dfExample4 = Seq(
Map[Array[Int], Boolean](Array(1, 2) -> false)
).toDF("y")
def testMapOfPrimitiveTypesCombination(): Unit = {
checkAnswer(dfExample1.selectExpr("transform_keys(i, (k, v) -> k + v)"),
Seq(Row(Map(2 -> 1, 18 -> 9, 16 -> 8, 14 -> 7))))
checkAnswer(dfExample2.selectExpr("transform_keys(j, " +
"(k, v) -> map_from_arrays(ARRAY(1, 2, 3), ARRAY('one', 'two', 'three'))[k])"),
Seq(Row(Map("one" -> 1.0, "two" -> 1.4, "three" -> 1.7))))
checkAnswer(dfExample2.selectExpr("transform_keys(j, (k, v) -> CAST(v * 2 AS BIGINT) + k)"),
Seq(Row(Map(3 -> 1.0, 4 -> 1.4, 6 -> 1.7))))
checkAnswer(dfExample2.selectExpr("transform_keys(j, (k, v) -> k + v)"),
Seq(Row(Map(2.0 -> 1.0, 3.4 -> 1.4, 4.7 -> 1.7))))
checkAnswer(dfExample3.selectExpr("transform_keys(x, (k, v) -> k % 2 = 0 OR v)"),
Seq(Row(Map(true -> true, true -> false))))
checkAnswer(dfExample3.selectExpr("transform_keys(x, (k, v) -> if(v, 2 * k, 3 * k))"),
Seq(Row(Map(50 -> true, 78 -> false))))
checkAnswer(dfExample3.selectExpr("transform_keys(x, (k, v) -> if(v, 2 * k, 3 * k))"),
Seq(Row(Map(50 -> true, 78 -> false))))
checkAnswer(dfExample4.selectExpr("transform_keys(y, (k, v) -> array_contains(k, 3) AND v)"),
Seq(Row(Map(false -> false))))
}
// Test with local relation, the Project will be evaluated without codegen
testMapOfPrimitiveTypesCombination()
dfExample1.cache()
dfExample2.cache()
dfExample3.cache()
dfExample4.cache()
// Test with cached relation, the Project will be evaluated with codegen
testMapOfPrimitiveTypesCombination()
}
test("transform keys function - Invalid lambda functions and exceptions") {
val dfExample1 = Seq(
Map[String, String]("a" -> null)
).toDF("i")
val dfExample2 = Seq(
Seq(1, 2, 3, 4)
).toDF("j")
val ex1 = intercept[AnalysisException] {
dfExample1.selectExpr("transform_keys(i, k -> k)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match"))
val ex2 = intercept[AnalysisException] {
dfExample1.selectExpr("transform_keys(i, (k, v, x) -> k + 1)")
}
assert(ex2.getMessage.contains(
"The number of lambda function arguments '3' does not match"))
val ex3 = intercept[Exception] {
dfExample1.selectExpr("transform_keys(i, (k, v) -> v)").show()
}
assert(ex3.getMessage.contains("Cannot use null as map key"))
val ex4 = intercept[AnalysisException] {
dfExample2.selectExpr("transform_keys(j, (k, v) -> k + 1)")
}
assert(ex4.getMessage.contains(
"data type mismatch: argument 1 requires map type"))
}
test("transform values function - test primitive data types") {
val dfExample1 = Seq(
Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7)
).toDF("i")
val dfExample2 = Seq(
Map[Boolean, String](false -> "abc", true -> "def")
).toDF("x")
val dfExample3 = Seq(
Map[String, Int]("a" -> 1, "b" -> 2, "c" -> 3)
).toDF("y")
val dfExample4 = Seq(
Map[Int, Double](1 -> 1.0, 2 -> 1.40, 3 -> 1.70)
).toDF("z")
val dfExample5 = Seq(
Map[Int, Array[Int]](1 -> Array(1, 2))
).toDF("c")
def testMapOfPrimitiveTypesCombination(): Unit = {
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> k + v)"),
Seq(Row(Map(1 -> 2, 9 -> 18, 8 -> 16, 7 -> 14))))
checkAnswer(dfExample2.selectExpr(
"transform_values(x, (k, v) -> if(k, v, CAST(k AS String)))"),
Seq(Row(Map(false -> "false", true -> "def"))))
checkAnswer(dfExample2.selectExpr("transform_values(x, (k, v) -> NOT k AND v = 'abc')"),
Seq(Row(Map(false -> true, true -> false))))
checkAnswer(dfExample3.selectExpr("transform_values(y, (k, v) -> v * v)"),
Seq(Row(Map("a" -> 1, "b" -> 4, "c" -> 9))))
checkAnswer(dfExample3.selectExpr(
"transform_values(y, (k, v) -> k || ':' || CAST(v as String))"),
Seq(Row(Map("a" -> "a:1", "b" -> "b:2", "c" -> "c:3"))))
checkAnswer(
dfExample3.selectExpr("transform_values(y, (k, v) -> concat(k, cast(v as String)))"),
Seq(Row(Map("a" -> "a1", "b" -> "b2", "c" -> "c3"))))
checkAnswer(
dfExample4.selectExpr(
"transform_values(" +
"z,(k, v) -> map_from_arrays(ARRAY(1, 2, 3), " +
"ARRAY('one', 'two', 'three'))[k] || '_' || CAST(v AS String))"),
Seq(Row(Map(1 -> "one_1.0", 2 -> "two_1.4", 3 ->"three_1.7"))))
checkAnswer(
dfExample4.selectExpr("transform_values(z, (k, v) -> k-v)"),
Seq(Row(Map(1 -> 0.0, 2 -> 0.6000000000000001, 3 -> 1.3))))
checkAnswer(
dfExample5.selectExpr("transform_values(c, (k, v) -> k + cardinality(v))"),
Seq(Row(Map(1 -> 3))))
}
// Test with local relation, the Project will be evaluated without codegen
testMapOfPrimitiveTypesCombination()
dfExample1.cache()
dfExample2.cache()
dfExample3.cache()
dfExample4.cache()
dfExample5.cache()
// Test with cached relation, the Project will be evaluated with codegen
testMapOfPrimitiveTypesCombination()
}
test("transform values function - test empty") {
val dfExample1 = Seq(
Map.empty[Integer, Integer]
).toDF("i")
val dfExample2 = Seq(
Map.empty[BigInt, String]
).toDF("j")
def testEmpty(): Unit = {
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> NULL)"),
Seq(Row(Map.empty[Integer, Integer])))
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> k)"),
Seq(Row(Map.empty[Integer, Integer])))
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> v)"),
Seq(Row(Map.empty[Integer, Integer])))
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> 0)"),
Seq(Row(Map.empty[Integer, Integer])))
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> 'value')"),
Seq(Row(Map.empty[Integer, String])))
checkAnswer(dfExample1.selectExpr("transform_values(i, (k, v) -> true)"),
Seq(Row(Map.empty[Integer, Boolean])))
checkAnswer(dfExample2.selectExpr("transform_values(j, (k, v) -> k + cast(v as BIGINT))"),
Seq(Row(Map.empty[BigInt, BigInt])))
}
testEmpty()
dfExample1.cache()
dfExample2.cache()
testEmpty()
}
test("transform values function - test null values") {
val dfExample1 = Seq(
Map[Int, Integer](1 -> 1, 2 -> 2, 3 -> 3, 4 -> 4)
).toDF("a")
val dfExample2 = Seq(
Map[Int, String](1 -> "a", 2 -> "b", 3 -> null)
).toDF("b")
def testNullValue(): Unit = {
checkAnswer(dfExample1.selectExpr("transform_values(a, (k, v) -> null)"),
Seq(Row(Map[Int, Integer](1 -> null, 2 -> null, 3 -> null, 4 -> null))))
checkAnswer(dfExample2.selectExpr(
"transform_values(b, (k, v) -> IF(v IS NULL, k + 1, k + 2))"),
Seq(Row(Map(1 -> 3, 2 -> 4, 3 -> 4))))
}
testNullValue()
dfExample1.cache()
dfExample2.cache()
testNullValue()
}
test("transform values function - test invalid functions") {
val dfExample1 = Seq(
Map[Int, Int](1 -> 1, 9 -> 9, 8 -> 8, 7 -> 7)
).toDF("i")
val dfExample2 = Seq(
Map[String, String]("a" -> "b")
).toDF("j")
val dfExample3 = Seq(
Seq(1, 2, 3, 4)
).toDF("x")
def testInvalidLambdaFunctions(): Unit = {
val ex1 = intercept[AnalysisException] {
dfExample1.selectExpr("transform_values(i, k -> k)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match"))
val ex2 = intercept[AnalysisException] {
dfExample2.selectExpr("transform_values(j, (k, v, x) -> k + 1)")
}
assert(ex2.getMessage.contains("The number of lambda function arguments '3' does not match"))
val ex3 = intercept[AnalysisException] {
dfExample3.selectExpr("transform_values(x, (k, v) -> k + 1)")
}
assert(ex3.getMessage.contains(
"data type mismatch: argument 1 requires map type"))
}
testInvalidLambdaFunctions()
dfExample1.cache()
dfExample2.cache()
dfExample3.cache()
testInvalidLambdaFunctions()
}
test("arrays zip_with function - for primitive types") {
val df1 = Seq[(Seq[Integer], Seq[Integer])](
(Seq(9001, 9002, 9003), Seq(4, 5, 6)),
(Seq(1, 2), Seq(3, 4)),
(Seq.empty, Seq.empty),
(null, null)
).toDF("val1", "val2")
val df2 = Seq[(Seq[Integer], Seq[Long])](
(Seq(1, null, 3), Seq(1L, 2L)),
(Seq(1, 2, 3), Seq(4L, 11L))
).toDF("val1", "val2")
val expectedValue1 = Seq(
Row(Seq(9005, 9007, 9009)),
Row(Seq(4, 6)),
Row(Seq.empty),
Row(null))
checkAnswer(df1.selectExpr("zip_with(val1, val2, (x, y) -> x + y)"), expectedValue1)
val expectedValue2 = Seq(
Row(Seq(Row(1L, 1), Row(2L, null), Row(null, 3))),
Row(Seq(Row(4L, 1), Row(11L, 2), Row(null, 3))))
checkAnswer(df2.selectExpr("zip_with(val1, val2, (x, y) -> (y, x))"), expectedValue2)
}
test("arrays zip_with function - for non-primitive types") {
val df = Seq(
(Seq("a"), Seq("x", "y", "z")),
(Seq("a", null), Seq("x", "y")),
(Seq.empty[String], Seq.empty[String]),
(Seq("a", "b", "c"), null)
).toDF("val1", "val2")
val expectedValue1 = Seq(
Row(Seq(Row("x", "a"), Row("y", null), Row("z", null))),
Row(Seq(Row("x", "a"), Row("y", null))),
Row(Seq.empty),
Row(null))
checkAnswer(df.selectExpr("zip_with(val1, val2, (x, y) -> (y, x))"), expectedValue1)
}
test("arrays zip_with function - invalid") {
val df = Seq(
(Seq("c", "a", "b"), Seq("x", "y", "z"), 1),
(Seq("b", null, "c", null), Seq("x"), 2),
(Seq.empty, Seq("x", "z"), 3),
(null, Seq("x", "z"), 4)
).toDF("a1", "a2", "i")
val ex1 = intercept[AnalysisException] {
df.selectExpr("zip_with(a1, a2, x -> x)")
}
assert(ex1.getMessage.contains("The number of lambda function arguments '1' does not match"))
val ex2 = intercept[AnalysisException] {
df.selectExpr("zip_with(a1, a2, (acc, x) -> x, (acc, x) -> x)")
}
assert(ex2.getMessage.contains("Invalid number of arguments for function zip_with"))
val ex3 = intercept[AnalysisException] {
df.selectExpr("zip_with(i, a2, (acc, x) -> x)")
}
assert(ex3.getMessage.contains("data type mismatch: argument 1 requires array type"))
val ex4 = intercept[AnalysisException] {
df.selectExpr("zip_with(a1, a, (acc, x) -> x)")
}
assert(ex4.getMessage.contains("cannot resolve '`a`'"))
}
private def assertValuesDoNotChangeAfterCoalesceOrUnion(v: Column): Unit = {
import DataFrameFunctionsSuite.CodegenFallbackExpr
for ((codegenFallback, wholeStage) <- Seq((true, false), (false, false), (false, true))) {
val c = if (codegenFallback) {
Column(CodegenFallbackExpr(v.expr))
} else {
v
}
withSQLConf(
(SQLConf.CODEGEN_FALLBACK.key, codegenFallback.toString),
(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStage.toString)) {
val df = spark.range(0, 4, 1, 4).withColumn("c", c)
val rows = df.collect()
val rowsAfterCoalesce = df.coalesce(2).collect()
assert(rows === rowsAfterCoalesce, "Values changed after coalesce when " +
s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.")
val df1 = spark.range(0, 2, 1, 2).withColumn("c", c)
val rows1 = df1.collect()
val df2 = spark.range(2, 4, 1, 2).withColumn("c", c)
val rows2 = df2.collect()
val rowsAfterUnion = df1.union(df2).collect()
assert(rowsAfterUnion === rows1 ++ rows2, "Values changed after union when " +
s"codegenFallback=$codegenFallback and wholeStage=$wholeStage.")
}
}
}
test("SPARK-14393: values generated by non-deterministic functions shouldn't change after " +
"coalesce or union") {
Seq(
monotonically_increasing_id(), spark_partition_id(),
rand(Random.nextLong()), randn(Random.nextLong())
).foreach(assertValuesDoNotChangeAfterCoalesceOrUnion(_))
}
test("SPARK-21281 use string types by default if array and map have no argument") {
val ds = spark.range(1)
var expectedSchema = new StructType()
.add("x", ArrayType(StringType, containsNull = false), nullable = false)
assert(ds.select(array().as("x")).schema == expectedSchema)
expectedSchema = new StructType()
.add("x", MapType(StringType, StringType, valueContainsNull = false), nullable = false)
assert(ds.select(map().as("x")).schema == expectedSchema)
}
test("SPARK-21281 fails if functions have no argument") {
val df = Seq(1).toDF("a")
val funcsMustHaveAtLeastOneArg =
("coalesce", (df: DataFrame) => df.select(coalesce())) ::
("coalesce", (df: DataFrame) => df.selectExpr("coalesce()")) ::
("hash", (df: DataFrame) => df.select(hash())) ::
("hash", (df: DataFrame) => df.selectExpr("hash()")) :: Nil
funcsMustHaveAtLeastOneArg.foreach { case (name, func) =>
val errMsg = intercept[AnalysisException] { func(df) }.getMessage
assert(errMsg.contains(s"input to function $name requires at least one argument"))
}
val funcsMustHaveAtLeastTwoArgs =
("greatest", (df: DataFrame) => df.select(greatest())) ::
("greatest", (df: DataFrame) => df.selectExpr("greatest()")) ::
("least", (df: DataFrame) => df.select(least())) ::
("least", (df: DataFrame) => df.selectExpr("least()")) :: Nil
funcsMustHaveAtLeastTwoArgs.foreach { case (name, func) =>
val errMsg = intercept[AnalysisException] { func(df) }.getMessage
assert(errMsg.contains(s"input to function $name requires at least two arguments"))
}
}
test("SPARK-24734: Fix containsNull of Concat for array type") {
val df = Seq((Seq(1), Seq[Integer](null), Seq("a", "b"))).toDF("k1", "k2", "v")
val ex = intercept[Exception] {
df.select(map_from_arrays(concat($"k1", $"k2"), $"v")).show()
}
assert(ex.getMessage.contains("Cannot use null as map key"))
}
test("SPARK-26370: Fix resolution of higher-order function for the same identifier") {
val df = Seq(
(Seq(1, 9, 8, 7), 1, 2),
(Seq(5, 9, 7), 2, 2),
(Seq.empty, 3, 2),
(null, 4, 2)
).toDF("i", "x", "d")
checkAnswer(df.selectExpr("x", "exists(i, x -> x % d == 0)"),
Seq(
Row(1, true),
Row(2, false),
Row(3, false),
Row(4, null)))
checkAnswer(df.filter("exists(i, x -> x % d == 0)"),
Seq(Row(Seq(1, 9, 8, 7), 1, 2)))
checkAnswer(df.select("x").filter("exists(i, x -> x % d == 0)"),
Seq(Row(1)))
}
}
object DataFrameFunctionsSuite {
case class CodegenFallbackExpr(child: Expression) extends Expression with CodegenFallback {
override def children: Seq[Expression] = Seq(child)
override def nullable: Boolean = child.nullable
override def dataType: DataType = child.dataType
override lazy val resolved = true
override def eval(input: InternalRow): Any = child.eval(input)
}
}
|
WindCanDie/spark
|
sql/core/src/test/scala/org/apache/spark/sql/DataFrameFunctionsSuite.scala
|
Scala
|
apache-2.0
| 96,610
|
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.internal
import org.mongodb.scala.{ Observable, Observer, Subscription }
private[scala] case class FilterObservable[T](observable: Observable[T], p: T => Boolean) extends Observable[T] {
override def subscribe(observer: Observer[_ >: T]): Unit = {
observable.subscribe(
new Observer[T] {
override def onError(throwable: Throwable): Unit = observer.onError(throwable)
override def onSubscribe(subscription: Subscription): Unit = observer.onSubscribe(subscription)
override def onComplete(): Unit = observer.onComplete()
override def onNext(tResult: T): Unit = if (p(tResult)) observer.onNext(tResult)
}
)
}
}
|
anand-singh/mongo-scala-driver
|
driver/src/main/scala/org/mongodb/scala/internal/FilterObservable.scala
|
Scala
|
apache-2.0
| 1,291
|
package io.getquill.context.sql.norm
import io.getquill.ast.Visibility.Hidden
import io.getquill.ast._
import io.getquill.quat.Quat
import io.getquill.quat.QuatNestingHelper._
object ExpandDistinct {
def apply(q: Ast): Ast =
q match {
case Distinct(q) =>
Distinct(apply(q))
case q =>
Transform(q) {
case Aggregation(op, Distinct(q)) =>
Aggregation(op, Distinct(apply(q)))
case Distinct(Map(q, x, cc @ Tuple(values))) =>
val newIdent = Ident(x.name, valueQuat(cc.quat))
Map(Distinct(Map(q, x, cc)), newIdent,
Tuple(values.zipWithIndex.map {
case (_, i) => Property(newIdent, s"_${i + 1}")
}))
// Situations like this:
// case class AdHocCaseClass(id: Int, name: String)
// val q = quote {
// query[SomeTable].map(st => AdHocCaseClass(st.id, st.name)).distinct
// }
// ... need some special treatment. Otherwise their values will not be correctly expanded.
case Distinct(Map(q, x, cc @ CaseClass(values))) =>
val newIdent = Ident(x.name, valueQuat(cc.quat))
Map(Distinct(Map(q, x, cc)), newIdent,
CaseClass(values.map {
case (name, _) => (name, Property(newIdent, name))
}))
// Need some special handling to address issues with distinct returning a single embedded entity i.e:
// query[Parent].map(p => p.emb).distinct.map(e => (e.name, e.id))
// cannot treat such a case normally or "confused" queries will result e.g:
// SELECT p.embname, p.embid FROM (SELECT DISTINCT emb.name /* Where the heck is 'emb' coming from? */ AS embname, emb.id AS embid FROM Parent p) AS p
case d @ Distinct(Map(q, x, p @ Property.Opinionated(_, _, _, Hidden))) => d
// In situations where a simple, single property is being distinct-ed it is not necessary to translate it into a single-element tuple
// for example query[Person].map(p => p.name).distinct.map(p => (p.name, pname)) can be:
// SELECT p.name, p.name FROM (SELECT DISTINCT p.name from Person p) AS p
// This would normally become:
// SELECT p._1, p._1 FROM (SELECT DISTINCT p.name AS _1 from Person p) AS p
case Distinct(Map(q, x, p @ Property(id: Ident, name))) =>
val newQuat = valueQuat(id.quat) // force quat recomputation for perf purposes
Map(Distinct(Map(q, x, p)), x, Property(id.copy(quat = newQuat), name))
// Problems with distinct were first discovered in #1032. Basically, unless
// the distinct is "expanded" adding an outer map, Ident's representing a Table will end up in invalid places
// such as "ORDER BY tableIdent" etc...
case Distinct(Map(q, x, p)) =>
val newMap = Map(q, x, Tuple(List(p)))
val newQuat = Quat.Tuple(valueQuat(p.quat)) // force quat recomputation for perf purposes
val newIdent = Ident(x.name, newQuat)
Map(Distinct(newMap), newIdent, Property(newIdent, "_1"))
}
}
}
|
getquill/quill
|
quill-sql-portable/src/main/scala/io/getquill/sql/norm/ExpandDistinct.scala
|
Scala
|
apache-2.0
| 3,175
|
package net.hamnaberg.json.collection
import org.json4s._
sealed trait Value[A] extends ToJson {
def value: A
def toJson: JValue = {
Value.toJson(this)
}
}
object Value {
case class StringValue(value: String) extends Value[String]
case class NumberValue(value: BigDecimal) extends Value[BigDecimal]
case class BooleanValue(value: Boolean) extends Value[Boolean]
case object NullValue extends Value[Null] {
def value = null
}
def apply(v: JValue): Option[Value[_]] = {
v match {
case JString(s) => Some(StringValue(s))
case JDouble(d) => Some(NumberValue(BigDecimal(d)))
case JInt(d) => Some(NumberValue(BigDecimal(d)))
case JBool(d) => Some(BooleanValue(d))
case JNull => Some(NullValue)
case _ => throw new IllegalArgumentException("Illegal value type")
}
}
private def toJson(value: Value[_]): JValue = value match {
case StringValue(s) => JString(s)
case NumberValue(n) => if (n.isValidInt) JInt(n.intValue()) else JDouble(n.doubleValue())
case BooleanValue(n) => JBool(n)
case NullValue => JNull
case _ => throw new IllegalArgumentException("Unknown value type")
}
private[collection] def toValue(any: Any): Value[_] = {
import util.control.Exception.allCatch
def toNumberValue(n: Any) = allCatch.opt(NumberValue(BigDecimal(n.toString))).getOrElse(throw new IllegalArgumentException(n + " is not a number"))
any match {
case null => NullValue
case v: String => StringValue(v)
case v: Boolean => BooleanValue(v)
case v => toNumberValue(v)
}
}
}
trait ValueConverter[-A, B] {
def convert(input: A): Value[B]
}
object ValueConverter {
import Value._
implicit def stringToValue = new ValueConverter[String, String] {
def convert(s: String) = StringValue(s)
}
implicit def numericToValue[A: Numeric] = new ValueConverter[A, BigDecimal] {
def convert(s: A) = NumberValue(BigDecimal(s.toString))
}
implicit def booleanToValue = new ValueConverter[Boolean, Boolean] {
def convert(s: Boolean) = BooleanValue(s)
}
implicit def nullToValue = new ValueConverter[Null, Null] {
def convert(s: Null) = NullValue
}
}
|
hamnis/scala-collection-json
|
src/main/scala/net/hamnaberg/json/collection/Value.scala
|
Scala
|
apache-2.0
| 2,192
|
package dispatch.spec
import java.nio.charset.Charset
import org.scalacheck._
object BasicSpecification
extends Properties("Basic")
with DispatchCleanup {
import java.net.{URLEncoder,URLDecoder}
import Prop.{forAll,AnyOperators}
val server = {
import unfiltered.netty
import unfiltered.response._
import unfiltered.request._
object Echo extends Params.Extract("echo", Params.first)
netty.Http.anylocal.handler(netty.cycle.Planify {
case req @ Path("/echo") & Params(Echo(echo)) =>
PlainTextContent ~> ResponseString(req.method + echo)
case req @ Path(Seg("echopath" :: echo :: _)) =>
PlainTextContent ~> ResponseString(req.method + URLDecoder.decode(echo, "utf-8"))
case req @ Path(Seg("echopath" :: Nil)) =>
PlainTextContent ~> ResponseString(req.method)
case req @ Path(Seg("echobody" :: Nil)) =>
PlainTextContent ~> ResponseString(req.method + Body.string(req))
case Path(Seg("agent" :: Nil)) & UserAgent(agent) =>
PlainTextContent ~> ResponseString(agent)
case Path(Seg("contenttype" :: Nil)) & RequestContentType(contenttype) =>
PlainTextContent ~> ResponseString(contenttype)
}).start()
}
import dispatch._
val localhost = host("127.0.0.1", server.port)
// a shim until we can update scalacheck to a version that non-alpha strings that don't break Java
val syms = "&#$@%"
def cyrillicChars = Gen.choose( 0x0400, 0x04FF) map {_.toChar}
def cyrillic = for {
cs <- Gen.listOf(cyrillicChars)
} yield {
cs.mkString
}
property("url() should encode non-ascii chars in the path") = forAll(cyrillic) { (sample: String) =>
val path = if (sample.isEmpty) "" else "/" + sample
val wiki = "http://wikipedia.com" + path
val uri = url(wiki)
uri.toRequest.getUrl() ?= RawUri(wiki).toString
}
property("Path segments can be before and after query parameters") = forAll(Gen.alphaStr) { (sample: String) =>
val segmentLast = (localhost <<? Map("key" -> "value")) / sample
val segmentFirst = localhost / sample <<? Map("key" -> "value")
segmentLast.toRequest.getUrl() ?= segmentFirst.toRequest.getUrl()
}
property("Path segments can be optional") = forAll(Gen.alphaStr) { (sample: String) =>
val segmentLast = (localhost <<? Map("key" -> "value")) / sample
val segmentOptional = localhost /? Some(sample) /? None <<? Map("key" -> "value")
segmentLast.toRequest.getUrl ?= segmentOptional.toRequest.getUrl
}
property("POST and handle") = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http(
localhost / "echo" << Map("echo" -> sample) > as.String
)
res() ?= ("POST" + sample)
}
property("POST non-ascii chars body and get response") = forAll(cyrillic) { (sample: String) =>
val res = Http(
localhost / "echobody" << sample > as.String
)
res() ?= ("POST" + sample)
}
property("GET and handle") = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http(
localhost / "echo" <<? Map("echo" -> sample) > as.String
)
res() ?= ("GET" + sample)
}
property("GET and get response") = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http(
localhost / "echo" <<? Map("echo" -> sample)
)
res().getResponseBody ?= ("GET" + sample)
}
property("GET with encoded path") = forAll(Gen.alphaStr) { (sample: String) =>
// (second sample in request path is ignored)
val res = Http(
localhost / "echopath" / (sample + syms) / sample OK as.String
)
("GET" + sample + syms) ?= res()
}
property("GET with encoded path as url") = forAll(Gen.alphaStr) { (sample: String) =>
val requesturl = "http://127.0.0.1:%d/echopath/%s".format(server.port, URLEncoder.encode(sample + syms, "utf-8"))
val res = Http(url(requesturl) / sample OK as.String)
res() == ("GET" + sample + syms)
}
property("OPTIONS and handle") = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http(
localhost.OPTIONS / "echo" <<? Map("echo" -> sample) > as.String
)
res() ?= ("OPTIONS" + sample)
}
property("Send Dispatch/%s User-Agent" format BuildInfo.version) = forAll(Gen.alphaStr) { (sample: String) =>
val res = Http(
localhost / "agent" > as.String
)
res() ?= ("Dispatch/%s" format BuildInfo.version)
}
property("Send a default content-type with <<") = forAll(Gen.const("unused")) { (sample: String) =>
val res = Http(
localhost / "contenttype" << "request body" > as.String
)
res() ?= ("text/plain; charset=UTF-8")
}
property("Send a custom content type after <<") = forAll(Gen.oneOf("application/json", "application/foo")) { (sample: String) =>
val res = Http(
(localhost / "contenttype" << "request body").setContentType(sample, Charset.forName("UTF-8")) > as.String
)
res() ?= (sample + "; charset=UTF-8")
}
property("Send a custom content type with <:< after <<") = forAll(Gen.oneOf("application/json", "application/foo")) { (sample: String) =>
val res: Future[String] = Http(
localhost / "contenttype" << "request body" <:< Map("Content-Type" -> sample) > as.String
)
res() ?= (sample)
}
}
|
maiflai/reboot
|
core/src/test/scala/basic.scala
|
Scala
|
lgpl-3.0
| 5,197
|
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.pool
import com.github.mauricio.async.db.util.ExecutorServiceUtils
import com.github.mauricio.async.db.{QueryResult, Connection}
import scala.concurrent.{ExecutionContext, Future}
/**
* Pool specialized in database connections that also simplifies connection handling by
* implementing the [[com.github.mauricio.async.db.Connection]] trait and saving clients from having to implement
* the "give back" part of pool management. This lets you do your job without having to worry
* about managing and giving back connection objects to the pool.
*
* The downside of this is that you should not start transactions or any kind of long running process
* in this object as the object will be sent back to the pool right after executing a query. If you
* need to start transactions you will have to take an object from the pool, do it and then give it
* back manually.
*
* @param factory
* @param configuration
*/
class ConnectionPool[T <: Connection](
factory: ObjectFactory[T],
configuration: PoolConfiguration,
executionContext: ExecutionContext = ExecutorServiceUtils.CachedExecutionContext
) extends SingleThreadedAsyncObjectPool[T](factory, configuration)
with Connection {
/**
* Closes the pool, you should discard the object.
*
* @return
*/
def disconnect: Future[Connection] =
if (this.isConnected) {
this.close.map(item => this)(executionContext)
} else {
Future.successful(this)
}
/**
* Always returns an empty map.
*
* @return
*/
def connect: Future[Connection] = Future.successful(this)
def isConnected: Boolean = !this.isClosed
/**
* Picks one connection and runs this query against it. The query should be stateless, it should not
* start transactions and should not leave anything to be cleaned up in the future. The behavior of this
* object is undefined if you start a transaction from this method.
*
* @param query
* @return
*/
def sendQuery(query: String): Future[QueryResult] =
this.use(_.sendQuery(query))(executionContext)
/**
* Picks one connection and runs this query against it. The query should be stateless, it should not
* start transactions and should not leave anything to be cleaned up in the future. The behavior of this
* object is undefined if you start a transaction from this method.
*
* @param query
* @param values
* @return
*/
def sendPreparedStatement(
query: String,
values: Seq[Any] = List()
): Future[QueryResult] =
this.use(_.sendPreparedStatement(query, values))(executionContext)
/**
* Picks one connection and executes an (asynchronous) function on it within a transaction block.
* If the function completes successfully, the transaction is committed, otherwise it is aborted.
* Either way, the connection is returned to the pool on completion.
*
* @param f operation to execute on a connection
* @return result of f, conditional on transaction operations succeeding
*/
override def inTransaction[A](
f: Connection => Future[A]
)(implicit context: ExecutionContext = executionContext): Future[A] =
this.use(_.inTransaction[A](f)(context))(executionContext)
}
|
dripower/postgresql-async
|
db-async-common/src/main/scala/com/github/mauricio/async/db/pool/ConnectionPool.scala
|
Scala
|
apache-2.0
| 3,869
|
package vultura.factor.inference.calibration
import vultura.factor.inference.calibration.ConvergenceStatistic.ValuedEdge
trait ConvergenceStatistic[-E <: Edge, +S]{
def apply(e: E)(old: e.TOut, updated: e.TOut): S
}
object ConvergenceStatistic{
type ValuedEdge[A] = Edge{type TOut <: A}
}
trait ConvergenceTest[-E <: Edge]{
def isConverged(e: E)(old: e.TOut, updated: e.TOut): Boolean
}
object ConvergenceTest{
case class MaxDiff(tol: Double = 1e-12) extends ConvergenceTest[ValuedEdge[Array[Double]]]{
override def isConverged(e: ValuedEdge[Array[Double]])(old: e.type#TOut, updated: e.type#TOut): Boolean =
vultura.util.maxDiff(old,updated) <= tol
}
}
|
ziggystar/vultura-factor
|
src/main/scala/vultura/factor/inference/calibration/ConvergenceTest.scala
|
Scala
|
mit
| 679
|
/**
* Digi-Lib-Mesh - distributed mesh library for Digi components
*
* Copyright (c) 2012-2013 Alexey Aksenov ezh@ezh.msk.ru
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.digi.lib.mesh.hexapod
import java.util.UUID
import scala.Option.option2Iterable
import scala.collection.mutable.SynchronizedMap
import scala.collection.mutable.WeakHashMap
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.enc.DiffieHellman
import org.digimead.digi.lib.log.api.Loggable
import org.digimead.digi.lib.mesh.communication.Communication
import org.digimead.digi.lib.mesh.communication.Communication.communication2implementation
import org.digimead.digi.lib.mesh.communication.Stimulus
import org.digimead.digi.lib.mesh.endpoint.Endpoint
import org.digimead.digi.lib.mesh.message.Message
class AppHexapod(override val uuid: UUID, val initDiffieHellman: Option[DiffieHellman] = None) extends AppHexapod.Interface(uuid) {
protected val endpointSubscribers = new WeakHashMap[Endpoint[_ <: Endpoint.Nature], Endpoint[Endpoint.Nature]#Sub] with SynchronizedMap[Endpoint[_ <: Endpoint.Nature], Endpoint[Endpoint.Nature]#Sub]
if (initDiffieHellman.nonEmpty) {
authDiffieHellman = initDiffieHellman
} else if (authDiffieHellman.isEmpty) {
log.debug("Diffie Hellman authentification data not found, generate new")
val p = DiffieHellman.randomPrime(128)
val g = 5
authDiffieHellman = Some(new DiffieHellman(g, p))
}
@log
def connect(): Boolean = endpoint.filter(_.connect).nonEmpty
@log
def connected() = endpoint.exists(_.connected)
@log
def disconnect() = endpoint.forall(_.disconnect)
@log
def receive(message: Message) = {
log.debug("receive message " + message)
message.destinationHexapod match {
case Some(hexapod) if uuid == this.uuid =>
Communication.react(Stimulus.IncomingMessage(message))
case Some(hexapod) =>
log.fatal("receive message instead of neighbor".format(message))
case None =>
Communication.react(Stimulus.IncomingMessage(message))
}
}
@log
def reconnect(): Boolean = endpoint.forall(_.reconnect)
/* @log
override def registerEndpoint(endpoint: Endpoint[_ <: Endpoint.Nature]) {
super.registerEndpoint(endpoint)
val endpointSubscriber = new endpoint.Sub {
def notify(pub: endpoint.Pub, event: Endpoint.Event): Unit = event match {
case Endpoint.Event.Connect(endpoint) =>
publish(Hexapod.Event.Connect(endpoint))
case Endpoint.Event.Disconnect(endpoint) =>
publish(Hexapod.Event.Disconnect(endpoint))
}
}
endpoint.subscribe(endpointSubscriber)
endpointSubscribers(endpoint) = endpointSubscriber
}*/
@log
def send(message: Message): Option[Endpoint[_ <: Endpoint.Nature]] = {
log.debug("send " + message)
/* val remoteHexapods = Hexapod.getRemoteHexapods(message)
val localEndpoint = endpoint.filter(ep => ep.connected && (ep.direction == Endpoint.Direction.Out ||
ep.direction == Endpoint.Direction.InOut)).sortBy(_.priority)
if (localEndpoint.isEmpty) {
log.warn("AppHexapod: There is no endpoints with direction Out/InOut. Sending aborted.")
} else {
for {
lEndpoint <- localEndpoint
rHexapod <- remoteHexapods
} {
val rEndpoints = Hexapod.getRemoteEndpoints(rHexapod, lEndpoint)
log.___glance("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
//localEndpoint.send(message)
None
}
}*/
None
}
override def toString = "AppHexapod[%08X]".format(this.hashCode())
}
object AppHexapod {
abstract class Interface(override val uuid: UUID) extends Hexapod(uuid) with Loggable {
def connect(): Boolean
def connected(): Boolean
def disconnect(): Boolean
def receive(message: Message)
def reconnect(): Boolean
}
}
|
ezh/digi-lib-mesh
|
src/main/scala/org/digimead/digi/lib/mesh/hexapod/AppHexapod.scala
|
Scala
|
apache-2.0
| 4,370
|
/*
* Copyright (C) 2019 Mikhail Vorozhtsov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mvv.routineer
trait PathParser {
def segment: PathParser.GetSegment
}
object PathParser {
sealed trait GetSegment
final case class Segment(segment: String, parser: PathParser) extends GetSegment
final case class PathEnd(parser: QueryParser) extends GetSegment
final case class Failure(error: String) extends GetSegment
}
|
mvv/routineer
|
core/src/main/scala/com/github/mvv/routineer/PathParser.scala
|
Scala
|
apache-2.0
| 959
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.services
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.{HttpChallenge, HttpCookie, HttpCookiePair}
import akka.http.scaladsl.model.{RemoteAddress, StatusCodes, Uri}
import akka.http.scaladsl.server.AuthenticationFailedRejection.{CredentialsMissing, CredentialsRejected}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.http.scaladsl.server.directives.FormFieldDirectives.FieldMagnet
import akka.stream.Materializer
import com.typesafe.config.Config
import com.softwaremill.session.SessionDirectives._
import com.softwaremill.session.SessionOptions._
import com.softwaremill.session.{MultiValueSessionSerializer, SessionConfig, SessionManager}
import upickle.default.write
import org.apache.gearpump.security.{Authenticator => BaseAuthenticator}
import org.apache.gearpump.services.SecurityService.{User, UserSession}
import org.apache.gearpump.services.security.oauth2.OAuth2Authenticator
import org.apache.gearpump.util.{Constants, LogUtil}
// NOTE: This cannot be removed!!!
import org.apache.gearpump.services.util.UpickleUtil._
/**
* Security authentication endpoint.
*
* - When user cannot be authenticated, will reject with 401 AuthenticationFailedRejection
* - When user can be authenticated, but are not authorized to access certail resource, will
* return a 405 AuthorizationFailedRejection.
* - When web UI frontend receive 401, it should redirect the UI to login page.
* - When web UI receive 405,it should display errors like
* "current user is not authorized to access this resource."
*
* The Authenticator used is pluggable, the current Authenticator is resolved by looking up
* config path [[org.apache.gearpump.util.Constants.GEARPUMP_UI_AUTHENTICATOR_CLASS]].
*
* See [[org.apache.gearpump.security.Authenticator]] to find more info on custom Authenticator.
*/
class SecurityService(inner: RouteService, implicit val system: ActorSystem) extends RouteService {
// Use scheme "GearpumpBasic" to avoid popping up web browser native authentication box.
private val challenge = HttpChallenge(scheme = "GearpumpBasic", realm = Some("gearpump"),
params = Map.empty[String, String])
val LOG = LogUtil.getLogger(getClass, "AUDIT")
private val config = system.settings.config
private val sessionConfig = SessionConfig.fromConfig(config)
private implicit val sessionManager: SessionManager[UserSession] =
new SessionManager[UserSession](sessionConfig)
private val authenticator = {
val clazz = Class.forName(config.getString(Constants.GEARPUMP_UI_AUTHENTICATOR_CLASS))
val constructor = clazz.getConstructor(classOf[Config])
val authenticator = constructor.newInstance(config).asInstanceOf[BaseAuthenticator]
authenticator
}
private def configToMap(config: Config, path: String) = {
import scala.collection.JavaConverters._
config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString }
}
private val oauth2Providers: Map[String, String] = {
if (config.getBoolean(Constants.GEARPUMP_UI_OAUTH2_AUTHENTICATOR_ENABLED)) {
val map = configToMap(config, Constants.GEARPUMP_UI_OAUTH2_AUTHENTICATORS)
map.keys.toList.map { key =>
val iconPath = config.getString(s"${Constants.GEARPUMP_UI_OAUTH2_AUTHENTICATORS}.$key.icon")
(key, iconPath)
}.toMap
} else {
Map.empty[String, String]
}
}
private def authenticate(user: String, pass: String)(implicit ec: ExecutionContext)
: Future[Option[UserSession]] = {
authenticator.authenticate(user, pass, ec).map { result =>
if (result.authenticated) {
Some(UserSession(user, result.permissionLevel))
} else {
None
}
}
}
private def rejectMissingCredentials: Route = {
reject(AuthenticationFailedRejection(CredentialsMissing, challenge))
}
private def rejectWrongCredentials: Route = {
reject(AuthenticationFailedRejection(CredentialsRejected, challenge))
}
private def requireAuthentication(inner: UserSession => Route): Route = {
optionalSession(oneOff, usingCookiesOrHeaders) { sessionOption =>
sessionOption match {
case Some(session) => {
inner(session)
}
case None =>
rejectMissingCredentials
}
}
}
private def login(session: UserSession, ip: String, redirectToRoot: Boolean = false): Route = {
setSession(oneOff, usingCookies, session) {
val user = session.user
// Default: 1 day
val maxAgeMs = 1000 * sessionConfig.sessionMaxAgeSeconds.getOrElse(24 * 3600L)
setCookie(HttpCookie.fromPair(HttpCookiePair("username", user), path = Some("/"),
maxAge = Some(maxAgeMs))) {
LOG.info(s"user $user login from $ip")
if (redirectToRoot) {
redirect(Uri("/"), StatusCodes.TemporaryRedirect)
} else {
complete(write(new User(user)))
}
}
}
}
private def logout(user: UserSession, ip: String): Route = {
invalidateSession(oneOff, usingCookies) { ctx =>
LOG.info(s"user ${user.user} logout from $ip")
ctx.complete(write(new User(user.user)))
}
}
// Only admin are able to access operation like post/delete/put
private def requireAuthorization(user: UserSession, route: => Route): Route = {
// Valid user
if (user.permissionLevel >= BaseAuthenticator.User.permissionLevel) {
route
} else {
// Possibly a guest or not authenticated.
(put | delete | post) {
// Reject with 405 authorization error
reject(AuthorizationFailedRejection)
} ~
get {
route
}
}
}
private val unknownIp: Directive1[RemoteAddress] = {
Directive[Tuple1[RemoteAddress]]{ inner =>
inner(new Tuple1(RemoteAddress.Unknown))
}
}
override val route: Route = {
extractExecutionContext{implicit ec: ExecutionContext =>
extractMaterializer{implicit mat: Materializer =>
(extractClientIP | unknownIp) { ip =>
pathPrefix("login") {
pathEndOrSingleSlash {
get {
getFromResource("login/login.html")
} ~
post {
// Guest account don't have permission to submit new application in UI
formField(FieldMagnet('username.as[String])) {user: String =>
formFields(FieldMagnet('password.as[String])) {pass: String =>
val result = authenticate(user, pass)
onSuccess(result) {
case Some(session) =>
login(session, ip.toString)
case None =>
rejectWrongCredentials
}
}
}
}
} ~
path ("oauth2" / "providers") {
// Responds with a list of OAuth2 providers.
complete(write(oauth2Providers))
} ~
// Support OAUTH Authentication
pathPrefix ("oauth2"/ Segment) {providerName =>
// Resolve OAUTH Authentication Provider
val oauthService = OAuth2Authenticator.get(config, providerName, ec)
if (oauthService == null) {
// OAuth2 is disabled.
complete(StatusCodes.NotFound)
} else {
def loginWithOAuth2Parameters(parameters: Map[String, String]): Route = {
val result = oauthService.authenticate(parameters)
onComplete(result) {
case Success(session) =>
login(session, ip.toString, redirectToRoot = true)
case Failure(ex) => {
LOG.info(s"Failed to login user from ${ip.toString}", ex)
rejectWrongCredentials
}
}
}
path ("authorize") {
// Redirects to OAuth2 service provider for authorization.
redirect(Uri(oauthService.getAuthorizationUrl), StatusCodes.TemporaryRedirect)
} ~
path ("accesstoken") {
post {
// Guest account don't have permission to submit new application in UI
formField(FieldMagnet('accesstoken.as[String])) {accesstoken: String =>
loginWithOAuth2Parameters(Map("accesstoken" -> accesstoken))
}
}
} ~
path("callback") {
// Login with authorization code or access token.
parameterMap {parameters =>
loginWithOAuth2Parameters(parameters)
}
}
}
}
} ~
path("logout") {
post {
requireAuthentication {session =>
logout(session, ip.toString())
}
}
} ~
requireAuthentication {user =>
requireAuthorization(user, inner.route)
}
}}}
}
}
object SecurityService {
val SESSION_MANAGER_KEY = "akka.http.session.server-secret"
case class UserSession(user: String, permissionLevel: Int)
object UserSession {
private val User = "user"
private val PermissionLevel = "permissionLevel"
implicit def serializer: MultiValueSessionSerializer[UserSession] = {
new MultiValueSessionSerializer[UserSession](
toMap = {t: UserSession =>
Map(User -> t.user, PermissionLevel -> t.permissionLevel.toString)
},
fromMap = {m: Map[String, String] =>
if (m.contains(User)) {
Try(UserSession(m(User), m(PermissionLevel).toInt))
} else {
Failure[UserSession](new Exception("Fail to parse session "))
}
}
)
}
}
case class User(user: String)
}
|
manuzhang/incubator-gearpump
|
services/jvm/src/main/scala/org/apache/gearpump/services/SecurityService.scala
|
Scala
|
apache-2.0
| 10,574
|
package ch.epfl.scala.index
package server
package routes
import model._
import release._
import data.project.ProjectForm
import model.misc._
import com.softwaremill.session._, SessionDirectives._, SessionOptions._
import TwirlSupport._
import akka.http.scaladsl._
import model._
import server.Directives._
import Uri._
import StatusCodes._
import scala.concurrent.Future
class ProjectPages(dataRepository: DataRepository, session: GithubUserSession) {
import session._
private def canEdit(owner: String, repo: String, userState: Option[UserState]) =
userState.map(s => s.isAdmin || s.repos.contains(GithubRepo(owner, repo))).getOrElse(false)
private def editPage(owner: String, repo: String, userState: Option[UserState]) = {
val user = userState.map(_.user)
if (canEdit(owner, repo, userState)) {
for {
keywords <- dataRepository.keywords()
project <- dataRepository.project(Project.Reference(owner, repo))
} yield {
project.map { p =>
val allKeywords = (p.keywords ++ keywords.keys.toSet).toList.sorted
(OK, views.project.html.editproject(p, allKeywords, user))
}.getOrElse((NotFound, views.html.notfound(user)))
}
} else Future.successful((Forbidden, views.html.forbidden(user)))
}
private def projectPage(owner: String,
repo: String,
artifact: Option[String],
version: Option[SemanticVersion],
userState: Option[UserState]) = {
val user = userState.map(_.user)
println(s"$owner $repo $artifact $version")
dataRepository
.projectPage(Project.Reference(owner, repo), ReleaseSelection(artifact, version))
.map(_.map {
case (project, options) =>
import options._
(OK,
views.project.html.project(
project,
artifacts,
versions,
targets,
release,
user,
canEdit(owner, repo, userState)
))
}.getOrElse((NotFound, views.html.notfound(user))))
}
val routes =
post {
path("edit" / Segment / Segment) { (organization, repository) =>
optionalSession(refreshable, usingCookies) { userId =>
pathEnd {
formFieldSeq { fields =>
formFields(
'contributorsWanted.as[Boolean] ? false,
'keywords.*,
'defaultArtifact.?,
'defaultStableVersion.as[Boolean] ? false,
'deprecated.as[Boolean] ? false,
'artifactDeprecations.*,
'cliArtifacts.*,
'customScalaDoc.?
) {
(contributorsWanted, keywords, defaultArtifact, defaultStableVersion, deprecated,
artifactDeprecations, cliArtifacts, customScalaDoc) =>
val documentationLinks = {
val name = "documentationLinks"
val end = "]".head
fields.filter { case (key, _) => key.startsWith(name) }.groupBy {
case (key, _) => key.drop("documentationLinks[".length).takeWhile(_ != end)
}.values.map {
case Vector((a, b), (c, d)) =>
if (a.contains("label")) (b, d)
else (d, b)
}.toList
}
onSuccess(
dataRepository.updateProject(
Project.Reference(organization, repository),
ProjectForm(
contributorsWanted,
keywords.toSet,
defaultArtifact,
defaultStableVersion,
deprecated,
artifactDeprecations.toSet,
cliArtifacts.toSet,
// documentation
customScalaDoc,
documentationLinks
)
)
) { ret =>
Thread.sleep(1000) // oh yeah
redirect(Uri(s"/$organization/$repository"), SeeOther)
}
}
}
}
}
}
} ~
get {
path("edit" / Segment / Segment) { (organization, repository) =>
optionalSession(refreshable, usingCookies) { userId =>
pathEnd {
complete(editPage(organization, repository, getUser(userId)))
}
}
} ~
path(Segment / Segment) { (organization, repository) =>
optionalSession(refreshable, usingCookies) { userId =>
parameters('artifact, 'version.?) { (artifact, version) =>
val rest = version match {
case Some(v) if !v.isEmpty => "/" + v
case _ => ""
}
redirect(s"/$organization/$repository/$artifact$rest",
StatusCodes.PermanentRedirect)
} ~
pathEnd {
complete(projectPage(organization, repository, None, None, getUser(userId)))
}
}
} ~
path(Segment / Segment / Segment) { (organization, repository, artifact) =>
optionalSession(refreshable, usingCookies) { userId =>
complete(
projectPage(organization, repository, Some(artifact), None, getUser(userId))
)
}
} ~
path(Segment / Segment / Segment / Segment) {
(organization, repository, artifact, version) =>
optionalSession(refreshable, usingCookies) { userId =>
complete(
projectPage(organization,
repository,
Some(artifact),
SemanticVersion(version),
getUser(userId))
)
}
}
}
}
|
adamwy/scaladex
|
server/src/main/scala/ch.epfl.scala.index.server/routes/ProjectPages.scala
|
Scala
|
bsd-3-clause
| 6,103
|
package com.cds.learnscala.extractors
import com.cds.learnscala.extractors.Fraction
object Main {
def testcase(): Unit = {
val test = Currency(29.22, "abc")
test match {
case Currency(29.23, "abc") => println("$" + 29.22)
case _ => println("not match")
}
}
case class Currency(value: Double, unit: String)
def main(args: Array[String]) {
testcase()
}
}
|
anancds/scala-project
|
learn-scala/src/main/scala/com/cds/learnscala/extractors/Main.scala
|
Scala
|
mit
| 397
|
package dk.tennis.compare.rating.multiskill.model.pointcormodel
import org.junit.Assert._
import org.junit.Ignore
import org.junit.Test
import breeze.linalg.DenseMatrix
import breeze.linalg.DenseVector
import dk.bayes.math.gaussian.canonical.DenseCanonicalGaussian
import dk.bayes.math.linear.isIdentical
class GenericPointCorModelTest {
private val perfVarianceOnServe = 17
private val perfVarianceOnReturn = 15
private val pointModel = GenericPointCorModel(perfVarianceOnServe, perfVarianceOnReturn)
@Test def skillMarginals_zero_skill_covariance {
val skills = DenseCanonicalGaussian(DenseVector(0.2, -0.2), new DenseMatrix(2, 2, Array(0.7, 0, 0, 0.5)).t)
val skillsMarginal = pointModel.skillMarginals(skills, true)
assertTrue(isIdentical(DenseVector(0.2914, -0.2654), skillsMarginal.mean,0.0001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(0.6908, 0.0065, 0.0065, 0.4953)).t, skillsMarginal.variance,0.0001))
}
@Test def skillMarginals_positive_skill_covariance {
val skills = DenseCanonicalGaussian(DenseVector(0.2, -0.2), new DenseMatrix(2, 2, Array(0.7, 0.3, 0.3, 0.5)).t)
val skillsMarginal = pointModel.skillMarginals(skills, true)
assertTrue("actual=" + skillsMarginal.mean,isIdentical(DenseVector(0.2527, -0.2263), skillsMarginal.mean,0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(0.6969, 0.3015, 0.3015, 0.4992)).t, skillsMarginal.variance,0.0001))
}
@Test def skillMarginals_till_convergence {
var skills = DenseCanonicalGaussian(DenseVector(0.2, -0.2), new DenseMatrix(2, 2, Array(0.7, 0.0, 0.0, 0.5)).t)
for (i <- 1 to 1000) {
skills = pointModel.skillMarginals(skills, i % 5 == 0)
println(pointModel.pointProb(skills))
}
assertTrue("actual=" + skills.mean,isIdentical(DenseVector(-2.642, 1.830), skills.mean,0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(0.3116, 0.2773, 0.2773, 0.3018)).t, skills.variance,0.001))
}
@Test def skillsMarginals_zero_perf_variance {
val perfVarianceOnServe = 1e-10
val perfVarianceOnReturn = 1e-10
val pointModel = GenericPointCorModel(perfVarianceOnServe, perfVarianceOnReturn)
val skills = DenseCanonicalGaussian(DenseVector(0.8, -0.8), new DenseMatrix(2, 2, Array(1, 0.2, 0.2, 1)).t)
val skillsMarginal = pointModel.skillMarginals(skills, false)
assertTrue(isIdentical(DenseVector(-0.301, 0.301), skillsMarginal.mean,0.001))
assertTrue(isIdentical(new DenseMatrix(2, 2, Array(0.668, 0.532, 0.532, 0.668)).t, skillsMarginal.variance,0.001))
}
@Ignore @Test def skillsMarginals_NaN {
val perfVarianceOnServe = 1
val perfVarianceOnReturn = 1
val pointModel = GenericPointCorModel(perfVarianceOnServe, perfVarianceOnReturn)
var skills = DenseCanonicalGaussian(DenseVector(2.592, 5.251), new DenseMatrix(2, 2, Array(-1d,0,0,-1)).t)
println(pointModel.skillMarginals(skills, true).mean)
}
@Test def pointProb {
assertEquals(0.5276, pointModel.pointProb(DenseCanonicalGaussian(DenseVector(0.2, -0.2), new DenseMatrix(2, 2, Array(0.7, 0.0, 0.0, 0.5)).t)), 0.0001)
assertEquals(0.528, pointModel.pointProb(DenseCanonicalGaussian(DenseVector(0.2, -0.2), new DenseMatrix(2, 2, Array(0.7, 0.45, 0.45, 0.5)).t)), 0.0001)
assertEquals(0.7268, pointModel.pointProb(DenseCanonicalGaussian(DenseVector(1.7, -1.8), new DenseMatrix(2, 2, Array(0.9, 0, 0, 0.8)).t)), 0.0001)
assertEquals(0.7310, pointModel.pointProb(DenseCanonicalGaussian(DenseVector(1.7, -1.8), new DenseMatrix(2, 2, Array(0.9, 0.7, 0.7, 0.8)).t)), 0.0001)
}
}
|
danielkorzekwa/tennis-player-compare
|
multiskill/src/test/scala/dk/tennis/compare/rating/multiskill/model/pointcormodel/GenericPointCorModelTest.scala
|
Scala
|
bsd-2-clause
| 3,581
|
package com.cds.learnscala.json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
object Json4sDemo {
def main(args: Array[String]) {
//=========== 通过字符串解析为json AST ==============
val json1 = """ {"name":"test", "numbers" : [1, 2, 3, 4] } """
println(parse(json1))
//============= 通过DSL解析为json AST ===========
import org.json4s.JsonDSL._
//DSL implicit AST
val json2 = ("name" -> "joe") ~ ("age" -> Some(35))
println(json2)
println(render(json2))
case class Winner(id: Long, numbers: List[Int])
case class Lotto(id: Long, winningNumbers: List[Int], winners: List[Winner], drawDate: Option[java.util.Date])
val winners = List(Winner(23, List(2, 45, 34, 23, 3, 5)), Winner(54, List(52, 3, 12, 11, 18, 22)))
val lotto = Lotto(5, List(2, 45, 34, 23, 7, 5, 3), winners, None)
val json3 =
"lotto" ->
("lotto-id" -> lotto.id) ~
("winning-numbers" -> lotto.winningNumbers) ~
("draw-date" -> lotto.drawDate.map(_.toString)) ~
("winners" ->
lotto.winners.map { w =>
("winner-id" -> w.id) ~
("numbers" -> w.numbers)
})
println(render(json3))
//=================== 转化为String =============
//println(compact(json1))
println(compact(json2))
//render用默认方式格式化空字符
println(compact(render(json2)))
println(compact(render(json3)))
//println(pretty(json1))
println(pretty(render(json2)))
println(pretty(render(json3)))
//=========== querying json ===============
val json4 = parse( """
{ "name": "joe",
"children": [
{
"name": "Mary",
"age": 5
},
{
"name": "Mazy",
"age": 3
}
]
}
""")
// TODO name:"joe"
val ages = for {
JObject(child) <- json4
JField("age", JInt(age)) <- child
if age > 4
} yield age
val name = for{
JString(name) <- json4
} yield name
println("ages:" + ages)
//List(joe, Mary, Mazy)
println(name)
//{"name":"joe","name":"Mary","name":"Mazy"}
println(compact(render(json4 \\\\ "name")))
//"joe"
println(compact(render(json4 \\ "name")))
//[{"name":"Mary","age":5},{"name":"Mazy","age":3}]
println(compact(render(json4 \\\\ "children")))
//["Mary","Mazy"]
println(compact(render(json4 \\ "children" \\ "name")))
//{"name":"joe"}
println(compact(render(json4 findField {
case JField("name", _) => true
case _ => false
})))
//{"name":"joe","name":"Mary","name":"Mazy"}
println(compact(render(json4 filterField {
case JField("name", _) => true
case _ => false
})))
//============== extract value =================
implicit val formats = DefaultFormats
val json5 = parse("""{"first_name":"Mary"}""")
case class Person(`firstName`: String)
val json6=json5 transformField {
case ("first_name", x) => ("firstName", x)
}
println("json6:" + json6.extract[Person])
println(json5.camelizeKeys.extract[Person])
//================ xml 2 json ===================
import org.json4s.Xml.{toJson, toXml}
val xml =
<users>
<user>
<id>1</id>
<name>Harry</name>
</user>
<user>
<id>2</id>
<name>David</name>
</user>
</users>
val json = toJson(xml)
println(pretty(render(json)))
println(pretty(render(json transformField {
case ("id", JString(s)) => ("id", JInt(s.toInt))
case ("user", x: JObject) => ("user", JArray(x :: Nil))
})))
//================ json 2 xml ===================
println(toXml(json))
}
}
|
anancds/scala-project
|
learn-scala/src/main/scala/com/cds/learnscala/json4s/Json4sDemo.scala
|
Scala
|
mit
| 3,841
|
package com.twitter.finagle.memcached.integration
import collection.JavaConversions._
import com.twitter.conversions.time._
import com.twitter.util.{Stopwatch, Duration, RandomSocket}
import java.net.{InetAddress, BindException, ServerSocket, InetSocketAddress}
import scala.collection._
import scala.util.control.NonFatal
object TestMemcachedServer {
def start(): Option[TestMemcachedServer] = start(None)
def start(address: Option[InetSocketAddress]): Option[TestMemcachedServer] = {
if (!Option(System.getProperty("USE_EXTERNAL_MEMCACHED")).isDefined) InternalMemcached.start(address)
else ExternalMemcached.start(address)
}
}
trait TestMemcachedServer {
val address: InetSocketAddress
def stop(): Unit
}
private[memcached] object InternalMemcached {
def start(address: Option[InetSocketAddress]): Option[TestMemcachedServer] = {
try {
val server = new InProcessMemcached(
address.getOrElse(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
)
Some(new TestMemcachedServer {
val address = server.start().boundAddress.asInstanceOf[InetSocketAddress]
def stop() { server.stop(true) }
})
} catch {
case NonFatal(_) => None
}
}
}
private[memcached] object ExternalMemcached { self =>
class MemcachedBinaryNotFound extends Exception
private[this] var processes: List[Process] = List()
private[this] val forbiddenPorts = 11000.until(11900)
private[this] var takenPorts: Set[Int] = Set[Int]()
// prevent us from taking a port that is anything close to a real memcached port.
private[this] def findAddress() = {
var address : Option[InetSocketAddress] = None
var tries = 100
while (address == None && tries >= 0) {
address = Some(RandomSocket.nextAddress())
if (forbiddenPorts.contains(address.get.getPort) ||
takenPorts.contains(address.get.getPort)) {
address = None
tries -= 1
Thread.sleep(5)
}
}
if (address==None) sys.error("Couldn't get an address for the external memcached")
takenPorts += address.getOrElse(
new InetSocketAddress(InetAddress.getLoopbackAddress, 0)
).getPort
address
}
// Use overloads instead of default args to support java integration tests
def start(): Option[TestMemcachedServer] = start(None)
def start(address: Option[InetSocketAddress]): Option[TestMemcachedServer] = {
def exec(address: InetSocketAddress): Process = {
val cmd = Seq("memcached", "-l", address.getHostName,
"-p", address.getPort.toString)
val builder = new ProcessBuilder(cmd.toList)
builder.start()
}
(address orElse findAddress()) flatMap { addr =>
try {
val proc = exec(addr)
processes :+= proc
if (waitForPort(addr.getPort))
Some(new TestMemcachedServer {
val address = addr
def stop() {
proc.destroy()
proc.waitFor()
}
})
else
None
} catch {
case _: Throwable => None
}
}
}
def waitForPort(port: Int, timeout: Duration = 5.seconds): Boolean = {
val elapsed = Stopwatch.start()
def loop(): Boolean = {
if (! isPortAvailable(port))
true
else if (timeout < elapsed())
false
else {
Thread.sleep(100)
loop()
}
}
loop()
}
def isPortAvailable(port: Int): Boolean = {
var ss: ServerSocket = null
var result = false
try {
ss = new ServerSocket(port)
ss.setReuseAddress(true)
result = true
} catch { case ex: BindException =>
result = (ex.getMessage != "Address already in use")
} finally {
if (ss != null)
ss.close()
}
result
}
// Make sure the process is always killed eventually
Runtime.getRuntime().addShutdownHook(new Thread {
override def run() {
processes foreach { p =>
p.destroy()
p.waitFor()
}
}
})
}
|
koshelev/finagle
|
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/integration/ExternalMemcached.scala
|
Scala
|
apache-2.0
| 4,008
|
package se.gigurra.leavu3.datamodel
import se.gigurra.leavu3.interfaces.{Dlink, GameIn}
/**
* Created by kjolh on 3/30/2016.
*/
object self {
def dlinkCallsign: String = Dlink.config.callsign
def planeId: Int = GameIn.snapshot.metaData.planeId
def modelTime: Double = GameIn.snapshot.metaData.modelTime
def coalition: Int = GameIn.snapshot.selfData.coalitionId
def pitch: Float = GameIn.snapshot.selfData.pitch
def roll: Float = GameIn.snapshot.selfData.roll
def heading: Float = GameIn.snapshot.selfData.heading
def position: Vec3 = GameIn.snapshot.selfData.position
def velocity: Vec3 = GameIn.snapshot.flightModel.velocity
}
|
GiGurra/leavu3
|
src/main/scala/se/gigurra/leavu3/datamodel/self.scala
|
Scala
|
mit
| 653
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import org.apache.spark.Partitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._
import org.apache.spark.streaming.{Duration, Time}
import scala.reflect.ClassTag
private[streaming]
class ShuffledDStream[K: ClassTag, V: ClassTag, C: ClassTag](
parent: DStream[(K, V)],
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiner: (C, C) => C,
partitioner: Partitioner,
mapSideCombine: Boolean = true
) extends DStream[(K, C)] (parent.ssc) {
override def dependencies: List[DStream[_]] = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[(K, C)]] = {
parent.getOrCompute(validTime) match {
case Some(rdd) => Some(rdd.combineByKey[C](
createCombiner, mergeValue, mergeCombiner, partitioner, mapSideCombine))
case None => None
}
}
}
|
practice-vishnoi/dev-spark-1
|
streaming/src/main/scala/org/apache/spark/streaming/dstream/ShuffledDStream.scala
|
Scala
|
apache-2.0
| 1,745
|
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.PrintLabel
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: eduardo.moreno@e-evolution.com, http://www.e-evolution.com , http://github.com/e-Evolution
* Created by eduardo.moreno@e-evolution.com , www.e-evolution.com
*/
/**
* Print Label Mapping
*/
trait PrintLabelMapping {
val queryPrintLabel = quote {
querySchema[PrintLabel]("AD_PrintLabel",
_.printLabelId -> "AD_PrintLabel_ID",
_.tenantId -> "AD_Client_ID",
_.organizationId-> "AD_Org_ID",
_.isActive -> "IsActive",
_.created -> "Created",
_.createdBy -> "CreatedBy",
_.updated -> "Updated",
_.updatedBy -> "UpdatedBy",
_.name -> "Name",
_.description -> "Description",
_.entityId -> "AD_Table_ID",
_.printerName -> "PrinterName",
_.isLandscape -> "IsLAndScape",
_.labelHeight -> "LabelHeight",
_.labelWidth -> "LabelWidth",
_.labelPrinterId -> "AD_LabelPrinter_ID",
_.uuid -> "UUID")
}
}
|
adempiere/ADReactiveSystem
|
dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/PrintLabelMapping.scala
|
Scala
|
gpl-3.0
| 1,863
|
import sbt._
import Keys._
object ScorexBuild extends Build {
lazy val buildSettings = Seq(
organization := "org.consensusresearch",
version := "1.0.4-SNAPSHOT",
scalaVersion := "2.11.7"
)
def subModule(id: String): Project = Project(id = id, base = file(s"scorex-$id"))
lazy val root = Project(id = "scorex", base = file(".")).aggregate(crypto).dependsOn(crypto)
lazy val crypto = subModule("crypto")
}
|
pozharko/Scorex-Lagonaki
|
project/ScorexBuild.scala
|
Scala
|
cc0-1.0
| 432
|
/*
*
* Copyright (c) 2016 Sylvain Julmy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to the
* Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package klughdl.core.model
import spinal.core._
sealed trait Port {
lazy val dotName: String = name.replaceAll("\\\\.", "")
val name: String
val hdlType: String
def isInput: Boolean = this match {
case InputPort(_, _) => true
case OutputPort(_, _) => false
}
def isOutput: Boolean = this match {
case InputPort(_, _) => false
case OutputPort(_, _) => true
}
def getType: String = this match {
case _: InputPort => "input"
case _: OutputPort => "output"
}
}
object Port {
def parsePort(rawPort: String): String = {
val tmp = rawPort
.replaceAll(" ", "")
.split(":").head
.split("/").last
.split("_")
s"${tmp(tmp.length - 2)}.${tmp(tmp.length - 1)}"
}
def apply(bt: BaseType): Port = {
def nameIoAndType(baseType: BaseType): (String, String, String) = {
val full = baseType.toString().split("/").last
val name = full.split(":").head.replaceAll(" ", "").replaceAll("_", ".")
val ioType = full.split(":").last.replaceFirst(" ", "").split(" ")
(name, ioType(0), if (ioType(1) == "input") "output" else "input")
}
val (n, io, t) = nameIoAndType(bt)
Port(n, io, t)
}
def apply(name: String, io: String, hdlType: String): Port = io match {
case "input" | "in" => InputPort(name, hdlType)
case "output" | "out" => OutputPort(name, hdlType)
}
}
final case class InputPort(name: String, hdlType: String) extends Port
final case class OutputPort(name: String, hdlType: String) extends Port
|
SnipyJulmy/MSE_1617_PA
|
KlugHDL/src/main/scala/klughdl/core/model/Port.scala
|
Scala
|
gpl-2.0
| 2,294
|
package org.pinky.code.util.jdbc
import java.sql.DriverManager
import DriverManager.{getConnection => connect}
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.Spec
import org.pinky.util.ARM.using
import org.pinky.util.jdbc.ConnectionWrapper
/**
* Created by IntelliJ IDEA.
* User: phausel
* Date: Aug 4, 2009
* Time: 1:02:42 PM
* To change this template use File | Settings | File Templates.
*/
class DBSpec extends Spec with ShouldMatchers {
describe("a jdbc utility") {
it ("should create connection a table and query from table using a prepared statement") {
val setup = Array(
"""
drop table if exists person
""","""
create table person(
id identity,
tp int,
name varchar not null)
""")
val h2driver = Class.forName("org.h2.Driver")
val db = new ConnectionWrapper(connect("jdbc:h2:mem:", "sa", ""))
for (conn <- using (db.connection)) {
db execute setup
val insertPerson = db execute("insert into person(tp, name) values(?, ?)",1,"john")
val ret = db.query("SELECT * FROM PERSON WHERE ID=?",1)
ret.foreach( row => {row("name") should equal("john") } )
db execute("insert into person(tp, name) values(?, ?)",2,"peter")
val ret2 = db.query("SELECT * FROM PERSON WHERE ID=?", 2)
ret2.toList(0)("name") should equal("peter")
val people = db.queryFor[PersonDB]("SELECT * FROM PERSON")
people.size should be (2)
people(0).id should be (1)
assert(people(0).name=="john")
people(1).id should be (2)
}
}
}
}
case class PersonDB(id:Long,tp:Int,name:String)
|
pk11/pinky-util
|
src/test/scala/org/pinky/util/jdbc/DBSpec.scala
|
Scala
|
bsd-3-clause
| 1,860
|
package io.udash.generator.plugins.jetty
import java.io.File
import io.udash.generator.exceptions.InvalidConfiguration
import io.udash.generator.plugins._
import io.udash.generator.plugins.sbt.{SBTModulesPlugin, SBTProjectFiles}
import io.udash.generator.plugins.utils.{FrontendPaths, UtilPaths}
import io.udash.generator.utils._
import io.udash.generator.{FrontendOnlyProject, GeneratorPlugin, GeneratorSettings, StandardProject}
object JettyLauncherPlugin extends GeneratorPlugin with SBTProjectFiles with FrontendPaths with UtilPaths {
override val dependencies = Seq(SBTModulesPlugin)
override def run(settings: GeneratorSettings): GeneratorSettings = {
settings.projectType match {
case FrontendOnlyProject =>
throw InvalidConfiguration("You can not add Jetty launcher into frontend only project.")
case StandardProject(backend, _, frontend) =>
updateSBTConfig(settings, frontend)
createJettyServer(rootPackageInSrc(settings.rootDirectory.subFile(backend), settings), settings, backend)
}
settings
}
private def updateSBTConfig(settings: GeneratorSettings, frontendModuleName: String): Unit = {
val sbtConfigFile = buildSbt(settings)
val sbtDepsFile = dependenciesScala(settings)
val udashBuildFile = udashBuildScala(settings)
requireFilesExist(Seq(sbtConfigFile, sbtDepsFile, udashBuildFile))
appendOnPlaceholder(sbtConfigFile)(RootSettingsPlaceholder,
s""",
| mainClass in Compile := Some("${settings.rootPackage.mkPackage()}.Launcher")""".stripMargin)
appendOnPlaceholder(sbtConfigFile)(BackendSettingsPlaceholder,
s""",
|
| compile := (compile in Compile).value,
| (compile in Compile) := (compile in Compile).dependsOn(copyStatics).value,
| copyStatics := IO.copyDirectory((crossTarget in $frontendModuleName).value / StaticFilesDir, (target in Compile).value / StaticFilesDir),
| copyStatics := copyStatics.dependsOn(compileStatics in $frontendModuleName).value,
|
| mappings in (Compile, packageBin) ++= {
| copyStatics.value
| ((target in Compile).value / StaticFilesDir).***.get map { file =>
| file -> file.getAbsolutePath.stripPrefix((target in Compile).value.getAbsolutePath)
| }
| },
|
| watchSources ++= (sourceDirectory in $frontendModuleName).value.***.get""".stripMargin)
appendOnPlaceholder(sbtDepsFile)(DependenciesVariablesPlaceholder,
s"""
| val jettyVersion = "${settings.jettyVersion}"""".stripMargin)
appendOnPlaceholder(sbtDepsFile)(DependenciesBackendPlaceholder,
s""",
| "org.eclipse.jetty" % "jetty-server" % jettyVersion,
| "org.eclipse.jetty" % "jetty-servlet" % jettyVersion""".stripMargin)
appendOnPlaceholder(udashBuildFile)(UdashBuildPlaceholder,
s"""
| val copyStatics = taskKey[Unit]("Copy frontend static files into backend target.")""".stripMargin)
}
private def createJettyServer(rootPackage: File, settings: GeneratorSettings, backendModuleName: String): Unit = {
val resourcesDir = resources(settings.rootDirectory.subFile(backendModuleName))
val logbackXml = resourcesDir.subFile("logback.xml")
val jettyDir = "jetty"
val jettyPackage = rootPackage.subFile(jettyDir)
val appServerScala = jettyPackage.subFile("ApplicationServer.scala")
val launcherScala = rootPackage.subFile("Launcher.scala")
requireFilesExist(Seq(rootPackage))
createDirs(Seq(jettyPackage, resourcesDir))
createFiles(Seq(appServerScala, launcherScala, logbackXml))
writeFile(appServerScala)(
s"""package ${settings.rootPackage.mkPackage()}.$jettyDir
|
|import org.eclipse.jetty.server.Server
|import org.eclipse.jetty.server.handler.gzip.GzipHandler
|import org.eclipse.jetty.server.session.SessionHandler
|import org.eclipse.jetty.servlet.{DefaultServlet, ServletContextHandler, ServletHolder}
|
|class ApplicationServer(val port: Int, resourceBase: String) {
| private val server = new Server(port)
| private val contextHandler = new ServletContextHandler
|
| contextHandler.setSessionHandler(new SessionHandler)
| contextHandler.setGzipHandler(new GzipHandler)
| server.setHandler(contextHandler)
|
| def start() = server.start()
|
| def stop() = server.stop()
|
| private val appHolder = {
| val appHolder = new ServletHolder(new DefaultServlet)
| appHolder.setAsyncSupported(true)
| appHolder.setInitParameter("resourceBase", resourceBase)
| appHolder
| }
| contextHandler.addServlet(appHolder, "/*")$BackendAppServerPlaceholder
|}
|
""".stripMargin
)
writeFile(logbackXml)(
"""<configuration>
| <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
| <encoder>
| <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
| </encoder>
| </appender>
|
| <timestamp key="bySecond" datePattern="yyyyMMdd'T'HHmmss"/>
|
| <appender name="FILE" class="ch.qos.logback.core.FileAppender">
| <file>logs/udash-guide-${bySecond}.log</file>
| <append>true</append>
| <encoder>
| <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
| </encoder>
| </appender>
|
| <root level="info">
| <appender-ref ref="STDOUT" />
| <appender-ref ref="FILE" />
| </root>
|</configuration>""".stripMargin)
writeFile(launcherScala)(
s"""package ${settings.rootPackage.mkPackage()}
|
|import ${settings.rootPackage.mkPackage()}.$jettyDir.ApplicationServer
|
|object Launcher {
| def main(args: Array[String]): Unit = {
| val server = new ApplicationServer(8080, "$backendModuleName/target/UdashStatic/WebContent")
| server.start()
| }
|}
|
""".stripMargin
)
}
}
|
UdashFramework/udash-generator
|
core/src/main/scala/io/udash/generator/plugins/jetty/JettyLauncherPlugin.scala
|
Scala
|
apache-2.0
| 6,398
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.hadoop.impl
import org.apache.hadoop.mapreduce.InputSplit
import java.io.{DataInput, DataOutput}
import org.apache.hadoop.io.Writable
class SilkInputSplit(var blockIndex : Int, var sourcePartition : Int, var targetPartition : Int, var size : Long, var hosts : Array[String]) extends InputSplit with Writable
{
def this() = this(0, 0, 0, 0, null)
/**
* Get the size of the split, so that the input splits can be sorted by size.
*/
override def getLength() : Long = size
/**
* Get the list of nodes where both partitions of this split would be local if any.
* If no host holds both partitions, returns the list of hosts which hold at least one partition.
*/
override def getLocations() : Array[String] = hosts
override def write(out : DataOutput) : Unit =
{
out.writeInt(blockIndex)
out.writeInt(sourcePartition)
out.writeInt(targetPartition)
out.writeLong(size)
out.writeInt(hosts.length)
for(host <- hosts)
{
out.writeUTF(host)
}
}
override def readFields(in : DataInput) : Unit =
{
blockIndex = in.readInt()
sourcePartition = in.readInt()
targetPartition = in.readInt()
size = in.readLong()
hosts = new Array[String](in.readInt())
for(i <- 0 until hosts.length)
{
hosts(i) = in.readUTF()
}
}
}
|
fusepoolP3/p3-silk
|
silk-mapreduce/src/main/scala/de/fuberlin/wiwiss/silk/hadoop/impl/SilkInputSplit.scala
|
Scala
|
apache-2.0
| 1,972
|
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.NotUsed
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers._
import akka.stream.scaladsl.Compression
import akka.stream.scaladsl.Source
import akka.util.ByteString
import com.typesafe.scalalogging.StrictLogging
import scala.util.Failure
import scala.util.Success
/**
* Helper for creating a stream source for a given host.
*/
private[stream] object HostSource extends StrictLogging {
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
/**
* Create a new stream source for the response of `uri`. The URI should be a streaming
* source such as SSE that can have the messages framed by the new line and will be
* continuously emitting data. If the response ends for any reason, then the source
* will attempt to reconnect. Use a kill switch to shut it down.
*
* @param uri
* URI for the remote stream. Typically this should be an endpoint that returns an
* SSE stream.
* @param client
* Client to use for making the request. This is typically used in tests to provide
* responses without actually making network calls.
* @param delay
* How long to delay between attempts to connect to the host.
* @return
* Source that emits the response stream from the host.
*/
def apply(
uri: String,
client: SimpleClient,
delay: FiniteDuration = 1.second
): Source[ByteString, NotUsed] = {
EvaluationFlows.repeat(uri, delay).flatMapConcat(singleCall(client))
}
private def singleCall(client: SimpleClient)(uri: String): Source[ByteString, Any] = {
logger.info(s"subscribing to $uri")
val headers = List(Accept(MediaTypes.`text/event-stream`))
val request = HttpRequest(HttpMethods.GET, uri, headers)
Source
.single(request)
.via(client)
.flatMapConcat {
case Success(res: HttpResponse) if res.status == StatusCodes.OK =>
// Framing needs to take place on the byte stream before merging chunks
// with other hosts
unzipIfNeeded(res)
.via(EvaluationFlows.sseFraming)
.recover {
case t: Throwable =>
logger.warn(s"stream failed $uri", t)
ByteString.empty
}
.watchTermination() { (_, f) =>
f.onComplete {
case Success(_) =>
logger.info(s"lost connection to $uri")
case Failure(t) =>
logger.info(s"lost connection to $uri", t)
}
}
case Success(res: HttpResponse) =>
logger.warn(s"subscription attempt failed with status ${res.status}")
empty
case Failure(t) =>
logger.warn(s"subscription attempt failed with exception", t)
empty
}
}
private def empty: Source[ByteString, NotUsed] = {
Source.empty[ByteString]
}
private def unzipIfNeeded(res: HttpResponse): Source[ByteString, Any] = {
val isCompressed = res.headers.contains(`Content-Encoding`(HttpEncodings.gzip))
val dataBytes = res.entity.withoutSizeLimit().dataBytes
if (isCompressed) dataBytes.via(Compression.gunzip()) else dataBytes
}
}
|
Netflix/atlas
|
atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/HostSource.scala
|
Scala
|
apache-2.0
| 4,069
|
// Copyright (C) 2014 Open Data ("Open Data" refers to
// one or more of the following companies: Open Data Partners LLC,
// Open Data Research LLC, or Open Data Capital LLC.)
//
// This file is part of Hadrian.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test.scala.lib.parse
import scala.collection.JavaConversions._
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.Matchers
import com.opendatagroup.hadrian.jvmcompiler._
import com.opendatagroup.hadrian.errors._
import com.opendatagroup.hadrian.data._
import test.scala._
@RunWith(classOf[JUnitRunner])
class LibCastSuite extends FlatSpec with Matchers {
"cast" must "do toSigned" taggedAs(Lib, LibCast) in {
val engine2 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.signed: [input, 2]}
""").head
engine2.action(java.lang.Long.valueOf(-2)).asInstanceOf[java.lang.Long].longValue should be (-2L)
engine2.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (-1L)
engine2.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine2.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine2.action(java.lang.Long.valueOf(2)).asInstanceOf[java.lang.Long].longValue should be (-2L)
val engine8 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.signed: [input, 8]}
""").head
engine8.action(java.lang.Long.valueOf(-2 - 256)).asInstanceOf[java.lang.Long].longValue should be (-2L)
engine8.action(java.lang.Long.valueOf(-1 - 256)).asInstanceOf[java.lang.Long].longValue should be (-1L)
engine8.action(java.lang.Long.valueOf(0 - 256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 - 256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 - 256)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2 - 128)).asInstanceOf[java.lang.Long].longValue should be (126L)
engine8.action(java.lang.Long.valueOf(-1 - 128)).asInstanceOf[java.lang.Long].longValue should be (127L)
engine8.action(java.lang.Long.valueOf(0 - 128)).asInstanceOf[java.lang.Long].longValue should be (-128L)
engine8.action(java.lang.Long.valueOf(1 - 128)).asInstanceOf[java.lang.Long].longValue should be (-127L)
engine8.action(java.lang.Long.valueOf(2 - 128)).asInstanceOf[java.lang.Long].longValue should be (-126L)
engine8.action(java.lang.Long.valueOf(-2)).asInstanceOf[java.lang.Long].longValue should be (-2L)
engine8.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (-1L)
engine8.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2 + 128)).asInstanceOf[java.lang.Long].longValue should be (126L)
engine8.action(java.lang.Long.valueOf(-1 + 128)).asInstanceOf[java.lang.Long].longValue should be (127L)
engine8.action(java.lang.Long.valueOf(0 + 128)).asInstanceOf[java.lang.Long].longValue should be (-128L)
engine8.action(java.lang.Long.valueOf(1 + 128)).asInstanceOf[java.lang.Long].longValue should be (-127L)
engine8.action(java.lang.Long.valueOf(2 + 128)).asInstanceOf[java.lang.Long].longValue should be (-126L)
engine8.action(java.lang.Long.valueOf(-2 + 256)).asInstanceOf[java.lang.Long].longValue should be (-2L)
engine8.action(java.lang.Long.valueOf(-1 + 256)).asInstanceOf[java.lang.Long].longValue should be (-1L)
engine8.action(java.lang.Long.valueOf(0 + 256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 + 256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 + 256)).asInstanceOf[java.lang.Long].longValue should be (2L)
val engine64 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.signed: [input, 64]}
""").head
engine64.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MIN_VALUE)
engine64.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE + 1L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MIN_VALUE + 1L)
engine64.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE + 2L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MIN_VALUE + 2L)
engine64.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (-1L)
engine64.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine64.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine64.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE - 2L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE - 2L)
engine64.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE - 1L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE - 1L)
engine64.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE)
}
it must "do toUnsigned" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.unsigned: [input, 1]}
""").head
engine1.action(java.lang.Long.valueOf(-2)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine1.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine1.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine1.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine1.action(java.lang.Long.valueOf(2)).asInstanceOf[java.lang.Long].longValue should be (0L)
val engine8 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.unsigned: [input, 8]}
""").head
engine8.action(java.lang.Long.valueOf(-2 - 2*256)).asInstanceOf[java.lang.Long].longValue should be (254L)
engine8.action(java.lang.Long.valueOf(-1 - 2*256)).asInstanceOf[java.lang.Long].longValue should be (255L)
engine8.action(java.lang.Long.valueOf(0 - 2*256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 - 2*256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 - 2*256)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2 - 256)).asInstanceOf[java.lang.Long].longValue should be (254L)
engine8.action(java.lang.Long.valueOf(-1 - 256)).asInstanceOf[java.lang.Long].longValue should be (255L)
engine8.action(java.lang.Long.valueOf(0 - 256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 - 256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 - 256)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2)).asInstanceOf[java.lang.Long].longValue should be (254L)
engine8.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (255L)
engine8.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2 + 256)).asInstanceOf[java.lang.Long].longValue should be (254L)
engine8.action(java.lang.Long.valueOf(-1 + 256)).asInstanceOf[java.lang.Long].longValue should be (255L)
engine8.action(java.lang.Long.valueOf(0 + 256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 + 256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 + 256)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine8.action(java.lang.Long.valueOf(-2 + 2*256)).asInstanceOf[java.lang.Long].longValue should be (254L)
engine8.action(java.lang.Long.valueOf(-1 + 2*256)).asInstanceOf[java.lang.Long].longValue should be (255L)
engine8.action(java.lang.Long.valueOf(0 + 2*256)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine8.action(java.lang.Long.valueOf(1 + 2*256)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine8.action(java.lang.Long.valueOf(2 + 2*256)).asInstanceOf[java.lang.Long].longValue should be (2L)
val engine63 = PFAEngine.fromYaml("""
input: long
output: long
action: {cast.unsigned: [input, 63]}
""").head
engine63.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine63.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE + 1L)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine63.action(java.lang.Long.valueOf(java.lang.Long.MIN_VALUE + 2L)).asInstanceOf[java.lang.Long].longValue should be (2L)
engine63.action(java.lang.Long.valueOf(-1)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE)
engine63.action(java.lang.Long.valueOf(0)).asInstanceOf[java.lang.Long].longValue should be (0L)
engine63.action(java.lang.Long.valueOf(1)).asInstanceOf[java.lang.Long].longValue should be (1L)
engine63.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE - 2L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE - 2L)
engine63.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE - 1L)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE - 1L)
engine63.action(java.lang.Long.valueOf(java.lang.Long.MAX_VALUE)).asInstanceOf[java.lang.Long].longValue should be (java.lang.Long.MAX_VALUE)
}
it must "do toInt" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: int
output: int
action: {cast.int: input}
""").head.action(java.lang.Integer.valueOf(5)).asInstanceOf[java.lang.Integer] should be (5)
PFAEngine.fromYaml("""
input: long
output: int
action: {cast.int: input}
""").head.action(java.lang.Long.valueOf(5)).asInstanceOf[java.lang.Integer] should be (5)
PFAEngine.fromYaml("""
input: float
output: int
action: {cast.int: input}
""").head.action(java.lang.Float.valueOf(5.0F)).asInstanceOf[java.lang.Integer] should be (5)
PFAEngine.fromYaml("""
input: double
output: int
action: {cast.int: input}
""").head.action(java.lang.Double.valueOf(5.0)).asInstanceOf[java.lang.Integer] should be (5)
}
it must "do toLong" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: int
output: long
action: {cast.long: input}
""").head.action(java.lang.Integer.valueOf(5)).asInstanceOf[java.lang.Long] should be (5L)
PFAEngine.fromYaml("""
input: long
output: long
action: {cast.long: input}
""").head.action(java.lang.Long.valueOf(5)).asInstanceOf[java.lang.Long] should be (5L)
PFAEngine.fromYaml("""
input: float
output: long
action: {cast.long: input}
""").head.action(java.lang.Float.valueOf(5.0F)).asInstanceOf[java.lang.Long] should be (5L)
PFAEngine.fromYaml("""
input: double
output: long
action: {cast.long: input}
""").head.action(java.lang.Double.valueOf(5.0)).asInstanceOf[java.lang.Long] should be (5L)
}
it must "do toFloat" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: int
output: float
action: {cast.float: input}
""").head.action(java.lang.Integer.valueOf(5)).asInstanceOf[java.lang.Float] should be (5.0F)
PFAEngine.fromYaml("""
input: long
output: float
action: {cast.float: input}
""").head.action(java.lang.Long.valueOf(5)).asInstanceOf[java.lang.Float] should be (5.0F)
PFAEngine.fromYaml("""
input: float
output: float
action: {cast.float: input}
""").head.action(java.lang.Float.valueOf(5.0F)).asInstanceOf[java.lang.Float] should be (5.0F)
PFAEngine.fromYaml("""
input: double
output: float
action: {cast.long: input}
""").head.action(java.lang.Double.valueOf(5.0)).asInstanceOf[java.lang.Float] should be (5.0F)
}
it must "do toDouble" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: int
output: double
action: {cast.double: input}
""").head.action(java.lang.Integer.valueOf(5)).asInstanceOf[java.lang.Double] should be (5.0)
PFAEngine.fromYaml("""
input: long
output: double
action: {cast.double: input}
""").head.action(java.lang.Long.valueOf(5)).asInstanceOf[java.lang.Double] should be (5.0)
PFAEngine.fromYaml("""
input: float
output: double
action: {cast.double: input}
""").head.action(java.lang.Float.valueOf(5.0F)).asInstanceOf[java.lang.Double] should be (5.0)
PFAEngine.fromYaml("""
input: double
output: double
action: {cast.double: input}
""").head.action(java.lang.Double.valueOf(5.0)).asInstanceOf[java.lang.Double] should be (5.0)
}
"array fanouts" must "do fanoutBoolean" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input:
type: enum
name: Something
symbols: [zero, one, two, three, four, five, six, seven, eight, nine]
output:
type: array
items: boolean
action:
cast.fanoutBoolean: input
""").head
engine1.action(engine1.jsonInput(""""three"""")).asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, true, false, false, false, false, false, false))
val engine2 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: boolean
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutBoolean: [input, {cell: dictionary}, false]
""").head
engine2.action("three").asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, true, false, false, false, false, false, false))
engine2.action("sdfasdf").asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, false, false, false, false, false, false, false))
val engine3 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: boolean
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutBoolean: [input, {cell: dictionary}, true]
""").head
engine3.action("three").asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, true, false, false, false, false, false, false, false))
engine3.action("adfadfadf").asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, false, false, false, false, false, false, false, true))
val engine4 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: boolean
action:
cast.fanoutBoolean: [input, 10, 20, false]
""").head
engine4.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, true, false, false, false, false, false, false))
engine4.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, false, false, false, false, false, false, false))
val engine5 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: boolean
action:
cast.fanoutBoolean: [input, 10, 20, true]
""").head
engine5.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, true, false, false, false, false, false, false, false))
engine5.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Boolean]].toVector should be (Vector(false, false, false, false, false, false, false, false, false, false, true))
}
it must "do fanoutInt" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input:
type: enum
name: Something
symbols: [zero, one, two, three, four, five, six, seven, eight, nine]
output:
type: array
items: int
action:
cast.fanoutInt: input
""").head
engine1.action(engine1.jsonInput(""""three"""")).asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
val engine2 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: int
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutInt: [input, {cell: dictionary}, false]
""").head
engine2.action("three").asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
engine2.action("sdfasdf").asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
val engine3 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: int
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutInt: [input, {cell: dictionary}, true]
""").head
engine3.action("three").asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
engine3.action("adfadfadf").asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
val engine4 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: int
action:
cast.fanoutInt: [input, 10, 20, false]
""").head
engine4.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 1, 0, 0, 0, 0, 0, 0))
engine4.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
val engine5 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: int
action:
cast.fanoutInt: [input, 10, 20, true]
""").head
engine5.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0))
engine5.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Int]].toVector should be (Vector(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
}
it must "do fanoutLong" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input:
type: enum
name: Something
symbols: [zero, one, two, three, four, five, six, seven, eight, nine]
output:
type: array
items: long
action:
cast.fanoutLong: input
""").head
engine1.action(engine1.jsonInput(""""three"""")).asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L))
val engine2 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: long
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutLong: [input, {cell: dictionary}, false]
""").head
engine2.action("three").asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L))
engine2.action("sdfasdf").asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
val engine3 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: long
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutLong: [input, {cell: dictionary}, true]
""").head
engine3.action("three").asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
engine3.action("adfadfadf").asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L))
val engine4 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: long
action:
cast.fanoutLong: [input, 10, 20, false]
""").head
engine4.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L))
engine4.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
val engine5 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: long
action:
cast.fanoutLong: [input, 10, 20, true]
""").head
engine5.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
engine5.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Long]].toVector should be (Vector(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L))
}
it must "do fanoutFloat" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input:
type: enum
name: Something
symbols: [zero, one, two, three, four, five, six, seven, eight, nine]
output:
type: array
items: float
action:
cast.fanoutFloat: input
""").head
engine1.action(engine1.jsonInput(""""three"""")).asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
val engine2 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: float
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutFloat: [input, {cell: dictionary}, false]
""").head
engine2.action("three").asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
engine2.action("sdfasdf").asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
val engine3 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: float
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutFloat: [input, {cell: dictionary}, true]
""").head
engine3.action("three").asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
engine3.action("adfadfadf").asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 1.0F))
val engine4 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: float
action:
cast.fanoutFloat: [input, 10, 20, false]
""").head
engine4.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
engine4.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
val engine5 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: float
action:
cast.fanoutFloat: [input, 10, 20, true]
""").head
engine5.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F))
engine5.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Float]].toVector should be (Vector(0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 0.0F, 1.0F))
}
it must "do fanoutDouble" taggedAs(Lib, LibCast) in {
val engine1 = PFAEngine.fromYaml("""
input:
type: enum
name: Something
symbols: [zero, one, two, three, four, five, six, seven, eight, nine]
output:
type: array
items: double
action:
cast.fanoutDouble: input
""").head
engine1.action(engine1.jsonInput(""""three"""")).asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
val engine2 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: double
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutDouble: [input, {cell: dictionary}, false]
""").head
engine2.action("three").asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
engine2.action("sdfasdf").asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
val engine3 = PFAEngine.fromYaml("""
input: string
output:
type: array
items: double
cells:
dictionary:
type: {type: array, items: string}
init: [zero, one, two, three, four, five, six, seven, eight, nine]
action:
cast.fanoutDouble: [input, {cell: dictionary}, true]
""").head
engine3.action("three").asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
engine3.action("adfadfadf").asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
val engine4 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: double
action:
cast.fanoutDouble: [input, 10, 20, false]
""").head
engine4.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
engine4.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
val engine5 = PFAEngine.fromYaml("""
input: int
output:
type: array
items: double
action:
cast.fanoutDouble: [input, 10, 20, true]
""").head
engine5.action(java.lang.Integer.valueOf(13)).asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
engine5.action(java.lang.Integer.valueOf(999)).asInstanceOf[PFAArray[Double]].toVector should be (Vector(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0))
}
it must "do cast.avro" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: string
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head.action("hello") should be ("CmhlbGxv")
PFAEngine.fromYaml("""
input: int
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head.action(java.lang.Integer.valueOf(12)) should be ("GA==")
PFAEngine.fromYaml("""
input: double
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head.action(java.lang.Double.valueOf(3.14)) should be ("H4XrUbgeCUA=")
PFAEngine.fromYaml("""
input: [string, int]
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head.action("hello") should be ("AApoZWxsbw==")
PFAEngine.fromYaml("""
input: [string, int]
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head.action(java.lang.Integer.valueOf(12)) should be ("Ahg=")
val engine = PFAEngine.fromYaml("""
input:
type: record
name: Input
fields:
- {name: one, type: int}
- {name: two, type: double}
- {name: three, type: string}
output: string
action: {bytes.toBase64: {cast.avro: input}}
""").head
engine.action(engine.jsonInput("""{"one": 1, "two": 2.2, "three": "THREE"}""")) should be ("ApqZmZmZmQFAClRIUkVF")
}
it must "do cast.json" taggedAs(Lib, LibCast) in {
PFAEngine.fromYaml("""
input: string
output: string
action: {cast.json: input}
""").head.action("hello") should be (""""hello"""")
PFAEngine.fromYaml("""
input: int
output: string
action: {cast.json: input}
""").head.action(java.lang.Integer.valueOf(12)) should be ("12")
PFAEngine.fromYaml("""
input: double
output: string
action: {cast.json: input}
""").head.action(java.lang.Double.valueOf(3.14)) should be ("3.14")
PFAEngine.fromYaml("""
input: [string, int]
output: string
action: {cast.json: input}
""").head.action("hello") should be ("""{"string":"hello"}""")
PFAEngine.fromYaml("""
input: [string, int]
output: string
action: {cast.json: input}
""").head.action(java.lang.Integer.valueOf(12)) should be ("""{"int":12}""")
val engine = PFAEngine.fromYaml("""
input:
type: record
name: Input
fields:
- {name: one, type: int}
- {name: two, type: double}
- {name: three, type: string}
output: string
action: {cast.json: input}
""").head
engine.action(engine.jsonInput("""{"one": 1, "two": 2.2, "three": "THREE"}""")) should be ("""{"one":1,"two":2.2,"three":"THREE"}""")
}
}
|
opendatagroup/hadrian
|
hadrian/src/test/scala/lib/cast.scala
|
Scala
|
apache-2.0
| 28,961
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.basic
import common.rest.WskRest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class WskRestBasicNode6Tests extends WskBasicNode6Tests {
override val wsk: common.rest.WskRest = new WskRest
}
|
paulcastro/openwhisk
|
tests/src/test/scala/system/basic/WskRestBasicNode6Tests.scala
|
Scala
|
apache-2.0
| 1,070
|
package com.thangiee.lolhangouts.ui.sidedrawer
import android.content.Context
import android.graphics.Typeface
import android.view.{View, ViewGroup}
import android.widget.{BaseAdapter, ImageView, TextView}
import at.markushi.ui.RevealColorView
import com.thangiee.lolhangouts.R
import com.thangiee.lolhangouts.ui.sidedrawer.DrawerItem._
import com.thangiee.lolhangouts.ui.utils._
class DrawerItemAdapter(implicit ctx: Context) extends BaseAdapter {
private val drawerItems = List(
DrawerItem(Chat, R.drawable.ic_drawer_chat, isSelected = true), // default selection
DrawerItem(Profile, R.drawable.ic_drawer_person),
DrawerItem(Search, R.drawable.ic_drawer_search),
DrawerItem(GameScouter, R.drawable.ic_drawer_tv),
DrawerItem(Settings, R.drawable.ic_drawer_settings),
DrawerItem(RemoveAds, R.drawable.ic_drawer_thumb_up),
DrawerItem(Logout, R.drawable.ic_drawer_exit))
private var currentDrawerItem = drawerItems.head
override def getCount: Int = drawerItems.size
override def getItemId(i: Int): Long = i
override def getView(i: Int, convertView: View, viewGroup: ViewGroup): View = {
val item = drawerItems(i)
val view = layoutInflater.inflate(R.layout.side_menu_item, viewGroup, false)
// drawer drawer item title and color
view.find[TextView](R.id.tv_menu_item_name)
.text(item.title)
.textColor(if (item.isSelected) R.color.primary_dark.r2Color else R.color.primary_text.r2Color)
.typeface(if (item.isSelected) Typeface.DEFAULT_BOLD else Typeface.DEFAULT)
// drawer drawer item icon
view.find[ImageView](R.id.img_drawer_item)
.imageResource(item.icon)
.colorFilter(if (item.isSelected) R.color.md_teal_600.r2Color else R.color.md_grey_500.r2Color)
// drawer drawer item background color
view.find[ImageView](R.id.bg_side_menu_item)
.backgroundColor(if (item.isSelected) R.color.md_grey_200.r2Color else R.color.md_grey_50.r2Color)
val revealColorView = view.find[RevealColorView](R.id.reveal)
val gestureDetector = GestureDetectorBuilder()
.onLongPress((e) => revealColorView.ripple(e.getX, e.getY, duration = 1000))
.onSingleTapUp((e) => revealColorView.ripple(e.getX, e.getY))
.build
view.onTouch((v, event) => gestureDetector.onTouchEvent(event))
view
}
def setCurrentDrawer(position: Int): Unit = {
currentDrawerItem.isSelected = false
currentDrawerItem = drawerItems(position)
currentDrawerItem.isSelected = true
}
def isDrawerSelected(position: Int): Boolean = {
drawerItems(position).isSelected
}
override def getItem(i: Int): DrawerItem = drawerItems(i)
}
|
Thangiee/LoL-Hangouts
|
src/com/thangiee/lolhangouts/ui/sidedrawer/DrawerItemAdapter.scala
|
Scala
|
apache-2.0
| 2,651
|
package com.gu.automation.api
import org.scalatest._
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by jduffell on 12/06/2014.
*/
class AuthApiTest extends FlatSpec with Matchers {
val apiRoot = "https://idapi.code.dev-theguardian.com"
"The auth api" should "let us log in as a valid user" in {
val future = AuthApi(apiRoot).authenticate("johnduffell@guardian.co.uk", "qwerty")
val accessToken = Await.result(future, 30.seconds) match {
case Right(token) => token.toMap
case Left(error) => fail(error.toString)
}
println(s"accessToken: $accessToken")
accessToken("GU_U") should not be empty
accessToken("SC_GU_U") should not be empty
}
"The auth api" should "return 403 for an invalid user" in {
val future = AuthApi(apiRoot).authenticate("johnduffell@guardian.co.uk", "qwersty")
val errorCode = Await.result(future, 30.seconds) match {
case Right(token) => fail(token.toString)
case Left(error) => error._1
}
println(s"errorCode: $errorCode")
errorCode should be (403)
}
}
|
guardian/scala-automation-api-client
|
src/test/scala/com/gu/support/api/AuthApiTest.scala
|
Scala
|
apache-2.0
| 1,095
|
package part2actors
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
/**
* How to change actor behaviour programmatically.
*/
object ActorBehavior extends App {
object FussyKid {
case object KidAccept
case object KidReject
val HAPPY = "happy"
val SAD = "sad"
}
class FussyKid extends Actor {
import FussyKid._
import Mom._
// The internal state of the kid
private var state = HAPPY
override def receive: Receive = {
case Food(VEGETABLE) => state = SAD
case Food(CHOCOLATE) => state = HAPPY
case Ask(_) =>
if (state == HAPPY) sender() ! KidAccept
else sender() ! KidReject
}
}
/**
* context.become(method) changes the Actor's behavior
* to become the new 'Receive'.
*/
class StatelessFussyKid extends Actor {
import FussyKid._
import Mom._
override def receive: Receive = happyReceive
// This represents the "happy" state
def happyReceive: Receive = {
case Food(VEGETABLE) => context.become(sadReceive) // Forward to the other handler
case Food(CHOCOLATE) =>
case Ask(_) => sender() ! KidAccept
}
// This represents the "sad" state
def sadReceive: Receive = {
case Food(VEGETABLE) => context.become(sadReceive, false)
case Food(CHOCOLATE) => context.unbecome
case Ask(_) => sender() ! KidReject
}
}
object Mom {
case class MomStart(kidRef: ActorRef)
case class Food(food: String)
case class Ask(message: String)
val VEGETABLE = "veggies"
val CHOCOLATE = "chocolate"
}
class Mom extends Actor {
import Mom._
import FussyKid._
override def receive: Receive = {
case MomStart(kidRef) =>
kidRef ! Food(VEGETABLE)
kidRef ! Ask("Do you want to play?")
case KidAccept => println("Yay, my kid is happy!")
case KidReject => println("My kid is sad, but at he is least healthy.")
}
}
import Mom._
import FussyKid._
val system = ActorSystem("actorBehaviour")
val kid = system.actorOf(Props[FussyKid])
val statelessKid = system.actorOf(Props[StatelessFussyKid])
val mom = system.actorOf(Props[Mom])
mom ! MomStart(statelessKid)
/**
* Exercises.
* 1 - Recreate the Counter Actor with context.become and no mutable state.
*/
object Counter {
case object Increment
case object Decrement
case object Print
}
class Counter extends Actor {
import Counter._
override def receive: Receive = countReceive(0)
// The handler becomes the same function but with a different argument.
def countReceive(currentCount: Int): Receive = {
case Increment =>
println(s"[counter] $currentCount incrementing")
context.become(countReceive(currentCount + 1))
case Decrement =>
println(s"[counter] $currentCount decrementing")
context.become(countReceive(currentCount - 1))
case Print => println(s"[counter] my current count is $currentCount")
}
}
import Counter._
val counter = system.actorOf(Props[Counter], "myCounter")
(1 to 5).foreach(_ => counter ! Increment)
(1 to 3).foreach(_ => counter ! Decrement)
counter ! Print
/**
* 2. Simplified voting system.
*/
case class Vote(candidate: String)
case object VoteStatusRequest
case class VoteStatusReply(candidate: Option[String])
class Citizen extends Actor {
override def receive: Receive = {
case Vote(c) => context.become(voted(c))
case VoteStatusRequest => sender() ! VoteStatusReply(None)
}
def voted(candidate: String): Receive = {
case VoteStatusRequest => sender() ! VoteStatusReply(Some(candidate))
}
}
case class AggregateVotes(citizens: Set[ActorRef])
class VoteAggregator extends Actor {
override def receive: Receive = awaiting
def awaiting: Receive = {
case AggregateVotes(citizens) =>
citizens.foreach(citizenRef => citizenRef ! VoteStatusRequest)
context.become(awaitingStatuses(citizens, Map()))
}
def awaitingStatuses(citizens: Set[ActorRef], currentStats: Map[String, Int]): Receive = {
case VoteStatusReply(None) =>
// Citizen has not voted yet
sender() ! VoteStatusRequest // This may end in an infinite loop
case VoteStatusReply(Some(candidate)) =>
val citizensLeftWaiting = citizens - sender()
val currentVotes = currentStats.getOrElse(candidate, 0)
val newStats = currentStats + (candidate -> (currentVotes + 1))
if (citizensLeftWaiting.isEmpty) {
println(s"[VotesAggregator] Pool stats: $newStats")
} else {
context.become(awaitingStatuses(citizensLeftWaiting, newStats))
}
}
}
val alice = system.actorOf(Props[Citizen])
val bob = system.actorOf(Props[Citizen])
val charlie = system.actorOf(Props[Citizen])
val daniel = system.actorOf(Props[Citizen])
alice ! Vote("Martin")
bob ! Vote("Jonas")
charlie ! Vote("Roland")
daniel ! Vote("Roland")
val voteAggregator = system.actorOf(Props[VoteAggregator])
voteAggregator ! AggregateVotes(Set(alice, bob, charlie, daniel))
/**
* Prints the status of the votes.
* Map("Martin" -> 1, "Jonas" -> 1, "Roland" -> 2)
*/
}
|
guhemama/moocs
|
RockAkka/src/main/scala/part2actors/ActorBehavior.scala
|
Scala
|
bsd-3-clause
| 5,254
|
package ch5
import scala.annotation.tailrec
import Stream._
object Exercise6 {
implicit class StreamExt[+A](val self: Stream[A]) extends AnyVal {
def foldRight[B](z: => B)(f: (A, => B) => B): B = self match {
case Cons(h, t) => f(h(), t().foldRight(z)(f))
case _ => z
}
def headOption: Option[A] = foldRight[Option[A]](None)((x, _) => Some(x))
def toList: List[A] = self match {
case Empty => List.empty
case Cons(h, t) => h() :: t().toList
}
}
}
import Exercise6._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch5/Stream.scala
:load src/main/scala/fpinscala/ch5/Exercise6.scala
cons(1, cons(2, cons(3, empty))).headOption
empty.headOption
*/
|
rucka/fpinscala
|
src/main/scala/fpinscala/ch5/Exercise6.scala
|
Scala
|
gpl-2.0
| 725
|
/*
* This file is part of AnyMime, a program to help you swap files
* wirelessly between mobile devices.
*
* Copyright (C) 2011 Timur Mehrvarz, timur.mehrvarz(a)gmail(.)com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.timur.anymime
import java.util.ArrayList
import java.io.File
import android.app.Activity
import android.app.AlertDialog
import android.app.AlertDialog.Builder
import android.net.Uri
import android.content.Context
import android.content.Intent
import android.content.DialogInterface
import android.content.SharedPreferences
import android.os.Bundle
import android.util.Log
import android.view.KeyEvent
import android.view.Window
import android.view.View
import android.view.MenuItem
import android.view.ContextMenu
import android.widget.Toast
import android.widget.ListView
import android.widget.TextView
import android.widget.EditText
import android.widget.AdapterView
import android.widget.AdapterView.OnItemClickListener
import android.webkit.MimeTypeMap
import org.timur.rfcomm.AndrTools
class ShowSelectedFilesActivity extends Activity {
private val TAG = "ShowSelectedFilesActivity"
private val D = Static.DBGLOG
private val REQUEST_SELECT_FILE = 1
private val REQUEST_SELECTED_SLOT = 2
private var context:Context = null
private var selectedFilesStringArrayList:ArrayList[String] = null
private var listView:ListView = null
private var fileListAdapter:FileListAdapter = null
private val PREFS_SETTINGS = "org.timur.anymime.settings"
private var prefSettings:SharedPreferences = null
private var prefSettingsEditor:SharedPreferences.Editor = null
private var mTitleLeftView:TextView = null
private var mTitleRightView:TextView = null
private var selectedSlot = 0
private var selectedSlotName = ""
override def onCreate(savedInstanceState:Bundle) {
super.onCreate(savedInstanceState)
if(D) Log.i(TAG, "onCreate")
context = this
var customTitleSupported = false
if(android.os.Build.VERSION.SDK_INT<11) {
// honeycomb issue (3.0 + 3.1)
customTitleSupported = requestWindowFeature(Window.FEATURE_CUSTOM_TITLE)
}
setContentView(R.layout.file_select)
if(customTitleSupported) {
getWindow.setFeatureInt(Window.FEATURE_CUSTOM_TITLE, R.layout.custom_title)
mTitleLeftView = findViewById(R.id.title_left_text).asInstanceOf[TextView]
mTitleRightView = findViewById(R.id.title_right_text).asInstanceOf[TextView]
if(mTitleLeftView!=null)
mTitleLeftView.setText("Files for delivery")
}
selectedFilesStringArrayList = null
val intent = getIntent
if(intent!=null) {
val bundle = intent.getExtras
if(bundle!=null) {
if(D) Log.i(TAG, "onCreate getting selectedFilesStringArrayList from getIntent.getExtras")
selectedFilesStringArrayList = bundle.getStringArrayList("selectedFilesStringArrayList")
}
}
if(selectedFilesStringArrayList==null) {
if(D) Log.i(TAG, "onCreate create empty selectedFilesStringArrayList")
selectedFilesStringArrayList = new ArrayList[String]()
}
if(D) Log.i(TAG, "onCreate selectedFilesStringArrayList.size="+selectedFilesStringArrayList.size)
if(selectedFilesStringArrayList.size<1) {
// todo: send a toast "no files yet selected"
}
listView = findViewById(R.id.selectedFilesList).asInstanceOf[ListView]
if(listView==null) {
// todo: raus
}
// prepare access to prefSettings
if(prefSettings==null) {
prefSettings = getSharedPreferences(PREFS_SETTINGS, Context.MODE_WORLD_WRITEABLE)
if(prefSettings!=null)
prefSettingsEditor = prefSettings.edit
}
if(prefSettings!=null) {
getSelectedSlot
}
fileListAdapter = new FileListAdapter(this, R.layout.file_list_entry)
listView.setAdapter(fileListAdapter)
updateAdapter
listView.setOnItemClickListener(new OnItemClickListener() {
override def onItemClick(adapterView:AdapterView[_], view:View, position:Int, id:Long) {
// user has clicked into the conversation view
var fileString = view.findViewById(R.id.invisibleText).asInstanceOf[TextView].getText.toString
if(D) Log.i(TAG, "onCreate listView onItemClick position="+position+" fileString="+fileString)
registerForContextMenu(view)
view.setLongClickable(false)
view.showContextMenu // -> onCreateContextMenu()
}
})
AndrTools.buttonCallback(this, R.id.buttonClearAll) { () =>
if(D) Log.i(TAG, "onClick buttonClearAll")
val dialogClickListener = new DialogInterface.OnClickListener() {
override def onClick(dialog:DialogInterface, whichButton:Int) {
whichButton match {
case DialogInterface.BUTTON_POSITIVE =>
fileListAdapter.clear
fileListAdapter.notifyDataSetChanged
selectedFilesStringArrayList.clear
persistArrayList(selectedFilesStringArrayList)
case DialogInterface.BUTTON_NEGATIVE =>
// do nothing
}
}
}
new AlertDialog.Builder(context).setTitle("Remove all files from this selection?")
.setPositiveButton("Yes",dialogClickListener)
.setNegativeButton("No", dialogClickListener)
.show
}
AndrTools.buttonCallback(this, R.id.buttonSelectSlot) { () =>
if(D) Log.i(TAG, "onClick buttonSelectSlot")
val intent = new Intent(context, classOf[ShowSelectedSlotActivity])
startActivityForResult(intent, REQUEST_SELECTED_SLOT) // -> onActivityResult()
}
AndrTools.buttonCallback(this, R.id.buttonRenameSlot) { () =>
if(D) Log.i(TAG, "onClick buttonRenameSlot")
val editText = new EditText(context)
editText.setText(selectedSlotName)
val dialogClickListener = new DialogInterface.OnClickListener() {
override def onClick(dialog:DialogInterface, whichButton:Int) {
whichButton match {
case DialogInterface.BUTTON_POSITIVE =>
if(D) Log.i(TAG, "onClick BUTTON_POSITIVE")
selectedSlotName = editText.getText.toString
if(mTitleRightView!=null)
mTitleRightView.setText("Slot "+(selectedSlot+1)+" "+selectedSlotName)
prefSettingsEditor.putString("fileSlotName"+selectedSlot, selectedSlotName)
prefSettingsEditor.commit
case DialogInterface.BUTTON_NEGATIVE =>
if(D) Log.i(TAG, "onClick BUTTON_NEGATIVE")
}
}
}
new AlertDialog.Builder(context)
.setTitle("Rename file slot")
.setView(editText)
.setMessage("Use a name that best describes the use-case for the files in this slot.")
.setIcon(android.R.drawable.ic_menu_edit)
.setPositiveButton("OK",dialogClickListener)
.setNegativeButton("Cancel",dialogClickListener)
.show
}
AndrTools.buttonCallback(this, R.id.buttonAdd) { () =>
if(D) Log.i(TAG, "onClick buttonAdd")
letUserPickAFile
}
AndrTools.buttonCallback(this, R.id.buttonDone) { () =>
if(D) Log.i(TAG, "onClick buttonDone")
setActivityResponse
finish
}
}
//////////////////////////////////////////////// context menu: open/view, remove, close
val CONTEXTMENU_VIEW = 1
val CONTEXTMENU_REMOVE = 2
val CONTEXTMENU_CLOSE = 3
var contextMenuFullPath:String = null
var contextMenuFileName:String = null
override def onCreateContextMenu(menu:ContextMenu, view:View, menuInfo:ContextMenu.ContextMenuInfo) :Unit = {
if(view==null)
return
contextMenuFullPath = view.findViewById(R.id.invisibleText).asInstanceOf[TextView].getText.toString
if(contextMenuFullPath==null)
return
contextMenuFileName = view.findViewById(R.id.visibleText).asInstanceOf[TextView].getText.toString
if(contextMenuFileName==null)
return
if(D) Log.i(TAG, "onCreateContextMenu contextMenuFileName="+contextMenuFileName)
menu.setHeaderTitle(contextMenuFileName)
menu.add(0, CONTEXTMENU_VIEW, 0, "View / Open")
menu.add(0, CONTEXTMENU_REMOVE, 0, "Remove from selection")
menu.add(0, CONTEXTMENU_CLOSE, 0, "Close")
}
override def onContextItemSelected(menuItem:MenuItem) :Boolean = {
val itemId = menuItem.getItemId
//Log.d(TAG, "onContextItemSelected menuItem.getItemId="+itemId)
itemId match {
case CONTEXTMENU_VIEW =>
// open contextMenuFileName
val processFileIntent = new Intent(Intent.ACTION_VIEW)
val selectedUri = Uri.fromFile(new File(contextMenuFullPath))
if(D) Log.i(TAG, "onContextItemSelected contextMenuFullPath="+contextMenuFullPath+" selectedUri="+selectedUri)
val contextMenuFileNameStringLower = contextMenuFileName.toLowerCase
val lastIdxOfDot = contextMenuFileNameStringLower.lastIndexOf(".")
val extension = if(lastIdxOfDot>=0) contextMenuFileNameStringLower.substring(lastIdxOfDot+1) else null
if(extension!=null) {
val mimeTypeMap = MimeTypeMap.getSingleton()
var mimeTypeFromExtension = mimeTypeMap.getMimeTypeFromExtension(extension)
if(extension=="asc") mimeTypeFromExtension="application/pgp"
// note: .html files may contain xhtml context (=> application/xhtml+xml)
if(D) Log.i(TAG, "onContextItemSelected extension="+extension+" mimeType="+mimeTypeFromExtension)
processFileIntent.setDataAndType(selectedUri,mimeTypeFromExtension)
} else {
if(D) Log.i(TAG, "onContextItemSelected extension=null mimeType=*/*")
processFileIntent.setDataAndType(selectedUri,"*/*")
}
if(D) Log.i(TAG, "onContextItemSelected startActivity processFileIntent="+processFileIntent)
startActivity(Intent.createChooser(processFileIntent,"Apply action..."))
return true
case CONTEXTMENU_REMOVE =>
val idxArrayList = selectedFilesStringArrayList.indexOf(contextMenuFullPath)
if(idxArrayList>=0) {
fileListAdapter.remove(contextMenuFullPath)
fileListAdapter.notifyDataSetChanged
selectedFilesStringArrayList.remove(idxArrayList)
persistArrayList(selectedFilesStringArrayList)
}
return true
case CONTEXTMENU_CLOSE =>
return true
case _ =>
return super.onContextItemSelected(menuItem)
}
return false
}
//////////////////////////////////////////////// adding a file to the list
private def letUserPickAFile() {
val intent = new Intent(Intent.ACTION_GET_CONTENT)
intent.setType("*/*")
intent.addCategory(Intent.CATEGORY_OPENABLE)
intent.addFlags(Intent.FLAG_ACTIVITY_EXCLUDE_FROM_RECENTS)
val title = "Select a file to send"
intent.putExtra(Intent.EXTRA_TITLE,title)
intent.putExtra("explorer_title", title)
try {
startActivityForResult(Intent.createChooser(intent, title), REQUEST_SELECT_FILE) // -> onActivityResult
} catch {
case ex:Exception =>
ex.printStackTrace()
val errMsg = ex.getMessage
Toast.makeText(context, errMsg, Toast.LENGTH_LONG).show
}
}
override def onActivityResult(requestCode:Int, resultCode:Int, intent:Intent) {
if(D) Log.i(TAG, "onActivityResult resultCode="+resultCode+" requestCode="+requestCode)
requestCode match {
case REQUEST_SELECT_FILE =>
if(D) Log.i(TAG, "REQUEST_SELECT_FILE intent="+intent)
if(resultCode!=Activity.RESULT_OK) {
Log.e(TAG, "REQUEST_SELECT_FILE resultCode!=Activity.RESULT_OK -> no files selected")
} else if(intent==null) {
Log.e(TAG, "REQUEST_SELECT_FILE intent==null -> no files selected")
} else {
if(intent.getData!=null) {
def getPath(uri:Uri) :String = {
val projection:Array[String] = Array("_data") //{ MediaStore.Images.Media.DATA }
val cursor = managedQuery(uri, projection, null, null, null)
if(cursor!=null) {
val column_index = cursor.getColumnIndexOrThrow("_data") // (MediaStore.Images.Media.DATA)
cursor.moveToFirst()
return cursor.getString(column_index)
}
return null
}
val selectFileUri = intent.getData
var selectedPath = getPath(selectFileUri) // MEDIA GALLERY
if(selectedPath==null)
selectedPath = selectFileUri.getPath // FILE Manager
// todo: don't add selectedPath if already in selectedFilesStringArrayList (?)
Log.e(TAG, "REQUEST_SELECT_FILE add="+selectedPath)
fileListAdapter add selectedPath
fileListAdapter.notifyDataSetChanged
selectedFilesStringArrayList add selectedPath
persistArrayList(selectedFilesStringArrayList)
Log.e(TAG, "REQUEST_SELECT_FILE selectedFilesStringArrayList="+selectedFilesStringArrayList)
} else {
if(D) Log.i(TAG, "REQUEST_SELECT_FILE no response")
}
}
case REQUEST_SELECTED_SLOT =>
if(D) Log.i(TAG, "REQUEST_SELECTED_SLOT resultCode="+resultCode)
if(resultCode==Activity.RESULT_OK) {
getArrayListSelectedFileStrings
updateAdapter
}
}
}
private def getSelectedSlot() {
val selectedSlotString = prefSettings.getString("selectedSlot", null)
selectedSlot = if(selectedSlotString!=null) selectedSlotString.toInt else 0
if(selectedSlot<0 || selectedSlot>ShowSelectedSlotActivity.MAX_SLOTS)
selectedSlot = 0
selectedSlotName = prefSettings.getString("fileSlotName"+selectedSlot, "")
if(D) Log.i(TAG, "onCreate getSelectedSlot selectedSlot="+selectedSlot)
if(mTitleRightView!=null)
mTitleRightView.setText("Slot "+(selectedSlot+1)+" "+selectedSlotName)
}
private def getArrayListSelectedFileStrings() {
if(prefSettings!=null) {
getSelectedSlot
selectedFilesStringArrayList.clear
// read the lists of selected files
var commaSeparatedString = prefSettings.getString("fileSlot"+selectedSlot, null)
if(D) Log.i(TAG, "getArrayListSelectedFileStrings commaSeparatedString="+commaSeparatedString)
if(commaSeparatedString!=null) {
commaSeparatedString = commaSeparatedString.trim
if(commaSeparatedString.size>0) {
val resultArray = commaSeparatedString split ","
if(resultArray!=null) {
if(D) Log.i(TAG,"getArrayListSelectedFileStrings prefSettings selectedFilesStringArrayList resultArray.size="+resultArray.size)
for(filePathString <- resultArray) {
if(filePathString!=null) {
selectedFilesStringArrayList add filePathString.trim
}
}
}
}
}
}
}
private def updateAdapter() {
if(D) Log.i(TAG, "updateAdapter selectedFilesStringArrayList.size="+selectedFilesStringArrayList.size)
fileListAdapter.clear
if(selectedFilesStringArrayList.size>0) {
val iterator = selectedFilesStringArrayList.iterator
while(iterator.hasNext)
fileListAdapter.add(iterator.next)
}
fileListAdapter.notifyDataSetChanged
}
private def persistArrayList(arrayList:ArrayList[String]) {
if(prefSettings!=null && prefSettingsEditor!=null) {
val iterator = arrayList.iterator
var stringBuilder = new StringBuilder()
while(iterator.hasNext) {
if(stringBuilder.size>0)
stringBuilder append ","
stringBuilder append iterator.next
}
if(D) Log.i(TAG, "persistArrayList stringBuilder="+stringBuilder.toString)
prefSettingsEditor.putString("fileSlot"+selectedSlot,stringBuilder.toString)
prefSettingsEditor.commit
}
}
//////////////////////////////////////////////// leaving this activity and handing back the list
override def onBackPressed() {
if(D) Log.i(TAG, "onBackPressed()")
setActivityResponse
super.onBackPressed
}
private def setActivityResponse() {
setResult(Activity.RESULT_OK)
}
}
|
mehrvarz/Anymime
|
src/org/timur/anymime/ShowSelectedFilesActivity.scala
|
Scala
|
gpl-3.0
| 16,807
|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.javadsl.broker.kafka
import java.io.Closeable
import java.io.File
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import akka.japi.{ Pair => JPair }
import akka.persistence.query.NoOffset
import akka.persistence.query.Offset
import akka.persistence.query.Sequence
import akka.stream.OverflowStrategy
import akka.stream.javadsl.{ Source => JSource }
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import akka.stream.scaladsl.SourceQueue
import akka.Done
import akka.NotUsed
import com.google.inject.AbstractModule
import com.lightbend.lagom.dev.MiniLogger
import com.lightbend.lagom.dev.Servers.KafkaServer
import com.lightbend.lagom.internal.javadsl.broker.kafka.JavadslKafkaApiSpec._
import com.lightbend.lagom.internal.javadsl.persistence.OffsetAdapter.dslOffsetToOffset
import com.lightbend.lagom.internal.javadsl.persistence.OffsetAdapter.offsetToDslOffset
import com.lightbend.lagom.javadsl.api.ScalaService._
import com.lightbend.lagom.javadsl.api.broker.kafka.KafkaProperties
import com.lightbend.lagom.javadsl.api.broker.kafka.PartitionKeyStrategy
import com.lightbend.lagom.javadsl.api.broker.Message
import com.lightbend.lagom.javadsl.api.broker.Topic
import com.lightbend.lagom.javadsl.api.Descriptor
import com.lightbend.lagom.javadsl.api.Service
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.broker.TopicProducer
import com.lightbend.lagom.javadsl.broker.kafka.KafkaMetadataKeys
import com.lightbend.lagom.javadsl.client.ConfigurationServiceLocator
import com.lightbend.lagom.javadsl.persistence.AggregateEvent
import com.lightbend.lagom.javadsl.persistence.AggregateEventTag
import com.lightbend.lagom.javadsl.persistence.PersistentEntityRef
import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.javadsl.persistence.{ Offset => JOffset }
import com.lightbend.lagom.javadsl.server.ServiceGuiceSupport
import com.lightbend.lagom.spi.persistence.InMemoryOffsetStore
import com.lightbend.lagom.spi.persistence.OffsetStore
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.slf4j.LoggerFactory
import play.api.inject._
import play.api.inject.guice.GuiceApplicationBuilder
import scala.collection.mutable
import scala.compat.java8.FunctionConverters._
import scala.compat.java8.OptionConverters._
import scala.concurrent.duration._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Promise
class JavadslKafkaApiSpec
extends WordSpecLike
with Matchers
with BeforeAndAfter
with BeforeAndAfterAll
with ScalaFutures
with OptionValues {
private val log = LoggerFactory.getLogger(getClass)
private val miniLogger = new MiniLogger {
def debug(message: => String): Unit = log.debug(message)
def info(message: => String): Unit = log.info(message)
}
private lazy val offsetStore = new InMemoryOffsetStore
private final val kafkaPort = 9092
private final val kafkaZooKeeperPort = 2181
private val application = {
new GuiceApplicationBuilder()
.bindings(
bind[OffsetStore].toInstance(offsetStore),
bind[PersistentEntityRegistry].toInstance(NullPersistentEntityRegistry),
JavadslKafkaApiSpec.testModule,
bind[ServiceLocator].to[ConfigurationServiceLocator]
)
.configure(
"akka.remote.artery.canonical.port" -> "0",
"akka.remote.artery.canonical.hostname" -> "127.0.0.1",
"akka.persistence.journal.plugin" -> "akka.persistence.journal.inmem",
"akka.persistence.snapshot-store.plugin" -> "akka.persistence.snapshot-store.local",
"lagom.cluster.join-self" -> "on",
"lagom.cluster.exit-jvm-when-system-terminated" -> "off",
"lagom.cluster.bootstrap.enabled" -> "off",
"lagom.services.kafka_native" -> s"tcp://localhost:$kafkaPort"
)
.build()
}
private val kafkaServerClasspath: Seq[File] = TestBuildInfo.fullClasspath.toIndexedSeq
private var kafkaServer: Option[Closeable] = None
override def beforeAll(): Unit = {
super.beforeAll()
kafkaServer = Some(
KafkaServer.start(
log = miniLogger,
cp = kafkaServerClasspath,
kafkaPort = kafkaPort,
zooKeeperPort = kafkaZooKeeperPort,
kafkaPropertiesFile = None,
jvmOptions = Nil,
targetDir = TestBuildInfo.target,
cleanOnStart = true,
)
)
}
before {
// Reset the messageTransformer in case a previous test failed after setting it
messageTransformer = identity
}
override def afterAll(): Unit = {
application.stop().futureValue
kafkaServer.foreach(_.close())
kafkaServer = None
super.afterAll()
}
implicit override val patienceConfig = PatienceConfig(30.seconds, 150.millis)
import application.materializer
"The Kafka message broker api" should {
val testService = application.injector.instanceOf(classOf[JavadslKafkaApiSpec.TestService])
"eagerly publish event stream registered in the service topic implementation" in {
val messageReceived = Promise[String]()
testService
.test1Topic()
.subscribe()
.withGroupId("testservice1")
.atLeastOnce {
VarianceCompat.asJava {
Flow[String].map { message =>
messageReceived.trySuccess(message)
Done
}
}
}
val messageToPublish = "msg"
test1EventJournal.append(messageToPublish)
messageReceived.future.futureValue shouldBe messageToPublish
}
"self-heal if failure occurs while running the publishing stream" in {
// Create a subscriber that tracks the first two messages it receives
val firstTimeReceived = Promise[String]()
val secondTimeReceived = Promise[String]()
testService
.test2Topic()
.subscribe()
.withGroupId("testservice2")
.atLeastOnce {
VarianceCompat.asJava {
Flow[String].map { message =>
if (!firstTimeReceived.isCompleted) {
firstTimeReceived.trySuccess(message)
} else if (!secondTimeReceived.isCompleted)
secondTimeReceived.trySuccess(message)
else ()
Done
}
}
}
// Insert a mapping function into the producer flow that transforms each message
val firstMessagePublishedSuccessfully = new CountDownLatch(1)
messageTransformer = { message =>
firstMessagePublishedSuccessfully.countDown()
s"$message-transformed"
}
val firstMessageToPublish = "firstMessage"
test2EventJournal.append(firstMessageToPublish)
// Wait until first message is seen by the publisher
assert(firstMessagePublishedSuccessfully.await(10, TimeUnit.SECONDS))
// Ensure the transformed message is visible to the subscriber
firstTimeReceived.future.futureValue shouldBe s"$firstMessageToPublish-transformed"
// Now simulate a failure: this will result in an exception being
// thrown before committing the offset of the next processed message.
// It should retry automatically, which means it should throw the error
// continuously until successful.
val secondMessageTriggeredErrorTwice = new CountDownLatch(2)
messageTransformer = { message =>
secondMessageTriggeredErrorTwice.countDown()
println(s"Expect to see an error below: Error processing message: [$message]")
throw new RuntimeException(s"Error processing message: [$message]")
}
// Publish a second message.
val secondMessageToPublish = "secondMessage"
test2EventJournal.append(secondMessageToPublish)
// Since the count-down happens before the error is thrown, trying
// twice ensures that the first error was handled completely.
assert(secondMessageTriggeredErrorTwice.await(30, TimeUnit.SECONDS))
// After the exception was handled, remove the cause
// of the failure and check that production resumes.
val secondMessagePublishedSuccessfully = new CountDownLatch(1)
messageTransformer = { message =>
secondMessagePublishedSuccessfully.countDown()
s"$message-transformed"
}
assert(secondMessagePublishedSuccessfully.await(60, TimeUnit.SECONDS))
// The subscriber flow should be unaffected,
// hence it will process the second message
secondTimeReceived.future.futureValue shouldBe s"$secondMessageToPublish-transformed"
}
"keep track of the read-side offset when publishing events" in {
implicit val ec = application.injector.instanceOf(classOf[ExecutionContext])
def reloadOffset() =
offsetStore.prepare("topicProducer-" + testService.test3Topic().topicId().value(), "singleton").futureValue
// No message was consumed from this topic, hence we expect the last stored offset to be NoOffset
val offsetDao = reloadOffset()
val initialOffset = offsetDao.loadedOffset
initialOffset shouldBe NoOffset
// Put some messages in the stream
test3EventJournal.append("firstMessage")
test3EventJournal.append("secondMessage")
test3EventJournal.append("thirdMessage")
// Wait for a subscriber to consume them all (which ensures they've all been published)
val allMessagesReceived = new CountDownLatch(3)
testService
.test3Topic()
.subscribe()
.withGroupId("testservice3")
.atLeastOnce {
VarianceCompat.asJava {
Flow[String].map { _ =>
allMessagesReceived.countDown()
Done
}
}
}
assert(allMessagesReceived.await(10, TimeUnit.SECONDS))
// After publishing all of the messages we expect the offset store
// to have been updated with the offset of the last consumed message
val updatedOffset = reloadOffset().loadedOffset
updatedOffset shouldBe Sequence(2)
}
"self-heal at-least-once consumer stream if a failure occurs" in {
val materialized = new CountDownLatch(2)
@volatile var failOnMessageReceived = true
testService
.test4Topic()
.subscribe()
.withGroupId("testservice4")
.atLeastOnce {
VarianceCompat.asJava {
Flow[String]
.map { _ =>
if (failOnMessageReceived) {
failOnMessageReceived = false
println("Expect to see an error below: Simulate consumer failure")
throw new IllegalStateException("Simulate consumer failure")
} else Done
}
.mapMaterializedValue { _ =>
materialized.countDown()
}
}
}
test4EventJournal.append("message")
// After throwing the error, the flow should be rematerialized, so consumption resumes
assert(materialized.await(10, TimeUnit.SECONDS))
}
"self-heal at-most-once consumer stream if a failure occurs" in {
case object SimulateFailure extends RuntimeException
// Let's publish a message to the topic
test5EventJournal.append("message")
// Now we register a consumer that will fail while processing a message. Because we are using at-most-once
// delivery, the message that caused the failure won't be re-processed.
@volatile var countProcessedMessages = 0
val expectFailure = testService
.test5Topic()
.subscribe()
.withGroupId("testservice5")
.atMostOnceSource()
.asScala
.via {
Flow[String].map { _ =>
countProcessedMessages += 1
throw SimulateFailure
}
}
.runWith(Sink.ignore)
expectFailure.failed.futureValue shouldBe an[SimulateFailure.type]
countProcessedMessages shouldBe 1
}
"allow the consumer to batch" in {
val batchSize = 4
val latch = new CountDownLatch(batchSize)
testService
.test6Topic()
.subscribe()
.withGroupId("testservice6")
.atLeastOnce {
VarianceCompat.asJava {
Flow[String].grouped(batchSize).mapConcat { messages =>
messages.map { _ =>
latch.countDown()
Done
}
}
}
}
for (i <- 1 to batchSize) test6EventJournal.append(i.toString)
assert(latch.await(10, TimeUnit.SECONDS))
}
"attach metadata to the message" in {
test7EventJournal.append("A1")
test7EventJournal.append("A2")
test7EventJournal.append("A3")
val messages = Await.result(
testService.test7Topic().subscribe.withMetadata.atMostOnceSource.asScala.take(3).runWith(Sink.seq),
10.seconds
)
messages.size shouldBe 3
def runAssertions(msg: Message[String]): Unit = {
msg.messageKeyAsString shouldBe "A"
msg.get(KafkaMetadataKeys.TOPIC).asScala.value shouldBe "test7"
msg.get(KafkaMetadataKeys.HEADERS).asScala should not be None
msg.get(KafkaMetadataKeys.TIMESTAMP).asScala should not be None
msg.get(KafkaMetadataKeys.TIMESTAMP_TYPE).asScala should not be None
msg.get(KafkaMetadataKeys.PARTITION).asScala.value shouldBe
messages.head.get(KafkaMetadataKeys.PARTITION).asScala.value
}
messages.foreach(runAssertions)
messages.head.getPayload shouldBe "A1"
val offset = messages.head.get(KafkaMetadataKeys.OFFSET).asScala.value
messages(1).getPayload shouldBe "A2"
messages(1).get(KafkaMetadataKeys.OFFSET).asScala.value shouldBe (offset + 1)
messages(2).getPayload shouldBe "A3"
messages(2).get(KafkaMetadataKeys.OFFSET).asScala.value shouldBe (offset + 2)
}
}
}
object VarianceCompat {
import akka.stream.javadsl
import akka.stream.scaladsl
// Akka 2.5.21 removed the variance of the return types, as a consequence the compiler
// no longer accepts an xx.asJava that returns a supertype. This implementation of asJava
// helps the compiler.
def asJava[T, Q, R](in: scaladsl.Flow[T, Q, R]): javadsl.Flow[T, Q, R] = {
in.asJava
}
}
object JavadslKafkaApiSpec {
private val test1EventJournal = new EventJournal[String]
private val test2EventJournal = new EventJournal[String]
private val test3EventJournal = new EventJournal[String]
private val test4EventJournal = new EventJournal[String]
private val test5EventJournal = new EventJournal[String]
private val test6EventJournal = new EventJournal[String]
private val test7EventJournal = new EventJournal[String]
// Allows tests to insert logic into the producer stream
@volatile var messageTransformer: String => String = identity
trait TestService extends Service {
def test1Topic(): Topic[String]
def test2Topic(): Topic[String]
def test3Topic(): Topic[String]
def test4Topic(): Topic[String]
def test5Topic(): Topic[String]
def test6Topic(): Topic[String]
def test7Topic(): Topic[String]
override def descriptor(): Descriptor =
named("testservice")
.withTopics(
topic("test1", test1Topic _),
topic("test2", test2Topic _),
topic("test3", test3Topic _),
topic("test4", test4Topic _),
topic("test5", test5Topic _),
topic("test6", test6Topic _),
topic("test7", test7Topic _)
.withProperty(KafkaProperties.partitionKeyStrategy(), new PartitionKeyStrategy[String] {
override def computePartitionKey(message: String) = message.take(1)
})
)
}
trait TestEvent extends AggregateEvent[TestEvent]
class TestServiceImpl extends TestService {
override def test1Topic(): Topic[String] = createTopicProducer(test1EventJournal)
override def test2Topic(): Topic[String] = createTopicProducer(test2EventJournal)
override def test3Topic(): Topic[String] = createTopicProducer(test3EventJournal)
override def test4Topic(): Topic[String] = createTopicProducer(test4EventJournal)
override def test5Topic(): Topic[String] = createTopicProducer(test5EventJournal)
override def test6Topic(): Topic[String] = createTopicProducer(test6EventJournal)
override def test7Topic(): Topic[String] = createTopicProducer(test7EventJournal)
private def createTopicProducer(eventJournal: EventJournal[String]): Topic[String] =
TopicProducer.singleStreamWithOffset[String]({ fromOffset: JOffset =>
eventJournal
.eventStream(dslOffsetToOffset(fromOffset))
.map(element => new JPair(messageTransformer(element._1), offsetToDslOffset(element._2)))
.asJava
}.asJava)
}
val testModule = new AbstractModule with ServiceGuiceSupport {
override def configure(): Unit = {
bindService(classOf[TestService], classOf[TestServiceImpl])
}
}
class EventJournal[Event] {
private type Element = (Event, Sequence)
private val offset = new AtomicLong()
private val storedEvents = mutable.ListBuffer.empty[Element]
private val subscribers = mutable.ListBuffer.empty[SourceQueue[Element]]
def eventStream(fromOffset: Offset): Source[(Event, Offset), _] = {
val minOffset: Long = fromOffset match {
case Sequence(value) => value
case NoOffset => -1
case _ => throw new IllegalArgumentException(s"Sequence offset required, but got $fromOffset")
}
Source
.queue[Element](8, OverflowStrategy.fail)
.mapMaterializedValue { queue =>
synchronized {
storedEvents.foreach(queue.offer)
subscribers += queue
}
NotUsed
}
// Skip everything up and including the fromOffset provided
.dropWhile(_._2.value <= minOffset)
}
def append(event: Event): Unit = {
val element = (event, Sequence(offset.getAndIncrement()))
synchronized {
storedEvents += element
subscribers.foreach(_.offer(element))
}
}
}
object NullPersistentEntityRegistry extends PersistentEntityRegistry {
override def eventStream[Event <: AggregateEvent[Event]](
aggregateTag: AggregateEventTag[Event],
fromOffset: JOffset
): JSource[JPair[Event, JOffset], NotUsed] =
JSource.empty()
override def refFor[C](
entityClass: Class[_ <: com.lightbend.lagom.javadsl.persistence.PersistentEntity[C, _, _]],
entityId: String
): PersistentEntityRef[C] =
???
override def register[C, E, S](
entityClass: Class[_ <: com.lightbend.lagom.javadsl.persistence.PersistentEntity[C, E, S]]
): Unit = ()
}
}
|
ignasi35/lagom
|
service/javadsl/kafka/server/src/test/scala/com/lightbend/lagom/internal/javadsl/broker/kafka/JavadslKafkaApiSpec.scala
|
Scala
|
apache-2.0
| 19,108
|
package com.goibibo.sqlshift.commons
import java.util.Properties
import java.util.regex._
import com.goibibo.sqlshift.models.Configurations.{DBConfiguration, S3Config}
import com.goibibo.sqlshift.models.InternalConfs.{IncrementalSettings, InternalConfig, TableDetails, DBField}
import org.apache.spark.sql._
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.immutable.Seq
/*
--packages "org.apache.hadoop:hadoop-aws:2.7.2,com.databricks:spark-redshift_2.10:1.1.0,com.amazonaws:aws-java-sdk:1.7.4,mysql:mysql-connector-java:5.1.39"
--jars=<Some-location>/RedshiftJDBC4-1.1.17.1017.jar
*/
object MySQLToRedshiftMigrator {
private val logger: Logger = LoggerFactory.getLogger(MySQLToRedshiftMigrator.getClass)
private def getWhereCondition(incrementalSettings: IncrementalSettings): Option[String] = {
val deltaTime = incrementalSettings.deltaTime
val column = incrementalSettings.incrementalColumn
val fromOffset = incrementalSettings.fromOffset
val toOffset = incrementalSettings.toOffset
logger.info(s"Found incremental condition. Column: ${column.orNull}, fromOffset: " +
s"${fromOffset.orNull}, toOffset: ${toOffset.orNull}, deltaTime : ${deltaTime.orNull}")
if (column.isDefined && fromOffset.isDefined && toOffset.isDefined) {
Some(s"${column.get} BETWEEN date_sub('${fromOffset.get}' , INTERVAL '${deltaTime.get}' MINUTE ) AND '${toOffset.get}'")
} else if (column.isDefined && fromOffset.isDefined) {
Some(s"${column.get} >= date_sub('${fromOffset.get}' , INTERVAL '${deltaTime.get}' MINUTE )")
} else if (column.isDefined && toOffset.isDefined) {
Some(s"${column.get} <= date_sub('${fromOffset.get}' , INTERVAL '${deltaTime.get}' MINUTE )")
} else {
logger.info("Either of column or (fromOffset/toOffset) is not provided")
None
}
}
/**
* Load table in spark.
*
* @param mysqlConfig Mysql connection configuration
* @param sqlContext Spark SQLContext
* @param crashOnInvalidType ToDo: What is this?
* @return
*/
def loadToSpark(mysqlConfig: DBConfiguration, sqlContext: SQLContext, internalConfig: InternalConfig = new InternalConfig)
(implicit crashOnInvalidType: Boolean): (DataFrame, TableDetails) = {
logger.info("Loading table to Spark from MySQL")
logger.info("MySQL details: \\n{}", mysqlConfig.toString)
val tableDetails: TableDetails = RedshiftUtil.getValidFieldNames(mysqlConfig, internalConfig)
logger.info("Table details: \\n{}", tableDetails.toString)
SqlShiftMySQLDialect.registerDialect()
val partitionDetails: Option[Seq[String]] = internalConfig.shallSplit match {
case Some(false) =>
logger.info("shallSplit is false")
None
case _ =>
logger.info("shallSplit either not set or true")
tableDetails.distributionKey match {
case Some(primaryKey) =>
val typeOfPrimaryKey = tableDetails.validFields.filter(_.fieldName == primaryKey).head.fieldType
//Spark supports only long to break the table into multiple fields
//https://github.com/apache/spark/blob/branch-1.6/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRelation.scala#L33
if (typeOfPrimaryKey.startsWith("INT")) {
val whereCondition = internalConfig.incrementalSettings match {
case Some(incrementalSettings) =>
getWhereCondition(incrementalSettings)
case None =>
logger.info("No incremental condition found")
None
}
val minMaxTmp: (String, String) = Util.getMinMax(mysqlConfig, primaryKey, whereCondition)
val minMax: (Long, Long) = (minMaxTmp._1.toLong, minMaxTmp._2.toLong)
val nr: Long = minMax._2 - minMax._1 + 1
val mapPartitions = internalConfig.mapPartitions match {
case Some(partitions) => partitions
case None => Util.getPartitions(sqlContext, mysqlConfig, minMax)
}
if (mapPartitions == 0) {
None
} else {
val inc: Long = Math.ceil(nr.toDouble / mapPartitions).toLong
val predicates = (0 until mapPartitions).toList.
map { n =>
s"$primaryKey BETWEEN ${minMax._1 + n * inc} AND ${minMax._1 - 1 + (n + 1) * inc} "
}.
map(c => if (whereCondition.isDefined) c + s"AND (${whereCondition.get})" else c)
Some(predicates)
}
} else {
logger.warn(s"primary keys is non INT $typeOfPrimaryKey")
None
}
case None =>
logger.warn("No Distribution key found!!!")
None
}
}
val partitionedReader: DataFrame = partitionDetails match {
case Some(predicates) =>
logger.info("Using partitionedRead {}", predicates)
val properties = new Properties()
properties.setProperty("user", mysqlConfig.userName)
properties.setProperty("password", mysqlConfig.password)
sqlContext.read.
option("driver", "com.mysql.jdbc.Driver").
option("fetchSize", Integer.MIN_VALUE.toString).
option("fetchsize", Integer.MIN_VALUE.toString).
option("user", mysqlConfig.userName).
option("password", mysqlConfig.password).
jdbc(RedshiftUtil.getJDBCUrl(mysqlConfig), mysqlConfig.tableName, predicates.toArray, properties)
case None =>
val tableQuery = internalConfig.incrementalSettings match {
case Some(incrementalSettings) =>
val whereCondition = getWhereCondition(incrementalSettings)
s"""(SELECT * from ${mysqlConfig.tableName}${if (whereCondition.isDefined) " WHERE " + whereCondition.get else ""}) AS A"""
case None => mysqlConfig.tableName
}
logger.info("Using single partition read query = {}", tableQuery)
val dataReader = RedshiftUtil.getDataFrameReader(mysqlConfig, tableQuery, sqlContext)
dataReader.load
}
val data = partitionedReader.selectExpr(tableDetails.validFields.map(_.fieldName): _*)
val dataWithTypesFixed = tableDetails.validFields.filter(_.javaType.isDefined).foldLeft(data) {
(df, dbField) => {
val modifiedCol = df.col(dbField.fieldName).cast(dbField.javaType.get)
df.withColumn(dbField.fieldName, modifiedCol)
}
}
logger.info("Table load in spark is finished!!!")
(dataWithTypesFixed, tableDetails)
}
/**
* Queries for identifying changed records and updating the endtime of corresponding records in original table
*
* @param redshiftTableName Original table
* @param redshiftStagingTableName Staging table containing new records
* @param mergeKey Key on which table will be merged
* @param fieldsToDeduplicateOn A change in any of these fields should create a new record in original table
* @param incrementalColumn Column containing the timestamp on which it is updated
* @param tableDetails Contains all the table details like fieldnames, distkey and sortkey.
*
*/
def getSnapshotCreationSql(redshiftTableName: String, redshiftStagingTableName:String, mergeKey:String,
fieldsToDeduplicateOn:Seq[String], incrementalColumn:String, tableDetails: TableDetails, snapshotOptimizingFilter: String): String = {
val tableColumns = "\\"" + tableDetails.validFields.map(_.fieldName).mkString("\\", \\"") + "\\""
val deDuplicateFieldNames = "\\"" + fieldsToDeduplicateOn.mkString("\\", \\"") + "\\""
val deDuplicateCondition = fieldsToDeduplicateOn.map(x => "nvl(s.\\""+ x +"\\"::varchar,'') = nvl(o.\\""+ x +"\\"::varchar,'')").mkString(" and ")
val changedFilter = snapshotOptimizingFilter.replace("{{o}}",redshiftTableName).replace("{{s}}",redshiftStagingTableName)
val optimiserCondition = if (snapshotOptimizingFilter.nonEmpty) s" and $changedFilter " else ""
s"""create temp table changed_records
|diststyle key
|distkey("$mergeKey")
|sortkey("$mergeKey",$deDuplicateFieldNames) as
|(
| select s.* from $redshiftStagingTableName s
| left join (select * from $redshiftTableName where endtime is null $optimiserCondition) o
| on (s."$mergeKey" = o."$mergeKey" and $deDuplicateCondition)
| where o."$mergeKey" is null
|);
|update $redshiftTableName set endtime = c."$incrementalColumn"
|from changed_records c
|where $redshiftTableName."$mergeKey" = c."$mergeKey" and $redshiftTableName.endtime is null $optimiserCondition;
|insert into $redshiftTableName($tableColumns)
| select *, "$incrementalColumn" as starttime, null::timestamp as endtime
| from changed_records;""".stripMargin
}
/**
* Store Dataframe to Redshift table. Drop table if exists.
* It table doesn't exist it will create table.
*
* @param df dataframe
* @param tableDetails valid and not valid field details
* @param redshiftConf redshift configuration
* @param s3Conf s3 configuration
* @param sqlContext spark SQL context
* @param internalConfig Information about incremental and partitions
*/
def storeToRedshift(df: DataFrame, tableDetails: TableDetails, redshiftConf: DBConfiguration, s3Conf: S3Config,
sqlContext: SQLContext, internalConfig: InternalConfig = new InternalConfig): Unit = {
logger.info("Storing to Redshift")
logger.info("Redshift Details: \\n{}", redshiftConf.toString)
if (s3Conf.accessKey.isDefined && s3Conf.secretKey.isDefined) {
sqlContext.sparkContext.hadoopConfiguration.set("fs.s3a.access.key", s3Conf.accessKey.get)
sqlContext.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", s3Conf.secretKey.get)
}
val redshiftTableName = RedshiftUtil.getTableNameWithSchema(redshiftConf)
val stagingPrepend = "_staging" + {
val r = scala.util.Random
r.nextInt(10000)
}
val redshiftStagingTableName = redshiftTableName + stagingPrepend
val dropTableString = RedshiftUtil.getDropCommand(redshiftConf)
logger.info("dropTableString {}", dropTableString)
val shallOverwrite = internalConfig.shallOverwrite match {
case None =>
internalConfig.incrementalSettings.get.incrementalColumn match {
case None =>
logger.info("internalConfig.shallOverwrite is None and internalConfig.incrementalSettings is None")
true
case Some(_) =>
logger.info("internalConfig.shallOverwrite is None and internalConfig.incrementalSettings is Some")
false
}
case Some(sow) =>
logger.info("internalConfig.shallOverwrite is {}", sow)
sow
}
val (dropStagingTableString: String, mergeKey: String, shallVacuumAfterLoad: Boolean,
customFields: Seq[String],incrementalColumn: String, isSnapshot: Boolean, fieldsToDeduplicateOn: Option[Seq[String]], snapshotOptimizingFilter: Option[String]) = {
internalConfig.incrementalSettings match {
case None =>
logger.info("No dropStagingTableString and No vacuum, internalConfig.incrementalSettings is None")
("", "", false, Seq[String](),"", false, None, None)
case Some(IncrementalSettings(shallMerge, stagingTableMergeKey, vaccumAfterLoad, cs, true, incrementalColumn, fromOffset, toOffset, isSnapshot, fieldsToDeduplicateOn, snapshotOptimizingFilter, deltaTime, _)) =>
logger.info("Incremental update is append only")
("", "", false, Seq[String](),incrementalColumn, false, None, None)
case Some(IncrementalSettings(shallMerge, stagingTableMergeKey, vaccumAfterLoad, cs, false, incrementalColumn, fromOffset, toOffset, isSnapshot, fieldsToDeduplicateOn, snapshotOptimizingFilter, deltaTime, _)) =>
val dropStatingTableStr = if (shallMerge || isSnapshot) s"DROP TABLE IF EXISTS $redshiftStagingTableName;" else ""
logger.info(s"dropStatingTableStr = {}", dropStatingTableStr)
val mKey: String = {
if (shallMerge || isSnapshot) {
stagingTableMergeKey match {
case None =>
logger.info("mergeKey is also not provided, we use primary key in this case {}",
tableDetails.distributionKey.get)
//If primaryKey is not available and mergeKey is also not provided
//This means wrong input, get will crash if tableDetails.distributionKey is None
tableDetails.distributionKey.get
case Some(mk) =>
logger.info(s"Found mergeKey = {}", mk)
mk
}
} else {
logger.info(s"Shall merge is not specified passing mergeKey as empty")
""
}
}
val customFieldsI = cs match {
case Some(customSelect) =>
val pattern = Pattern.compile("(?:AS|as)\\\\s*(\\\\w+)\\\\s*(?:,|$)")
val matcher = pattern.matcher(customSelect)
val cf = scala.collection.mutable.ListBuffer.empty[String]
while (matcher.find()) {
val matched = matcher.group(1)
cf += matched
logger.info("matched => {}", matched)
}
cf.toSeq
case None => Seq[String]()
}
(dropStatingTableStr, mKey, vaccumAfterLoad, customFieldsI, incrementalColumn.getOrElse(""),
isSnapshot, fieldsToDeduplicateOn, snapshotOptimizingFilter)
}
}
val extraFields = tableDetails.validFields ++ Seq(DBField("starttime","timestamp"), DBField("endtime","timestamp"))
val extraSortKeys = Seq(mergeKey, "starttime", "endtime")
val tableDetailsExtra = tableDetails.copy(validFields = extraFields, sortKeys = extraSortKeys)
val createTableString: String = if(isSnapshot){
RedshiftUtil.getCreateTableString(tableDetailsExtra, redshiftConf, None, true)
}
else {
RedshiftUtil.getCreateTableString(tableDetails, redshiftConf)
}
//val redshiftStagingConf = redshiftConf.copy(tableName = redshiftConf.tableName + stagingPrepend
val createStagingTableString = RedshiftUtil.getCreateTableString(tableDetails, redshiftConf, Some(redshiftStagingTableName))
logger.info("createTableString {}", createTableString)
val dropAndCreateTableString = if (shallOverwrite) dropTableString + "\\n" + createTableString else createTableString
val preActions: String = {
redshiftConf.preLoadCmd.map(_.trim).map{ cmd =>
cmd + (if (cmd.endsWith(";")) "" else ";") + "\\n"
}.getOrElse("") +
dropAndCreateTableString + {
if (dropStagingTableString.nonEmpty && !isSnapshot) {
dropStagingTableString +
alterTableQuery(tableDetails, redshiftConf, customFields) +
"\\n" + createStagingTableString
} else if (dropStagingTableString.nonEmpty && isSnapshot) {
dropStagingTableString + alterTableQuery(tableDetailsExtra, redshiftConf, customFields) +
"\\n" + createStagingTableString
} else {
""
}
}
}
val stagingTablePostActions = if (dropStagingTableString.nonEmpty && !isSnapshot) {
val tableColumns = "\\"" + tableDetails.validFields.map(_.fieldName).mkString("\\", \\"") + "\\""
s"""DELETE FROM $redshiftTableName USING $redshiftStagingTableName
| WHERE $redshiftTableName.$mergeKey = $redshiftStagingTableName.$mergeKey; """.stripMargin +
"\\n" + {
if (customFields.isEmpty) {
// Handling columns order mismatch
s"""INSERT INTO $redshiftTableName ($tableColumns)
|SELECT $tableColumns FROM $redshiftStagingTableName;""".stripMargin
} else {
val customFieldsStr = "\\"" + customFields.mkString("\\", \\"") + "\\""
val allColumnsPlusCustomOnes = s"( $tableColumns, $customFieldsStr )"
logger.info("allColumnsPlusCustomOnes => {}", allColumnsPlusCustomOnes)
val customSelect: String = internalConfig.incrementalSettings.get.customSelectFromStaging.get
s"""INSERT INTO $redshiftTableName $allColumnsPlusCustomOnes
|SELECT *, $customSelect FROM $redshiftStagingTableName;""".stripMargin
}
}
} else if (dropStagingTableString.nonEmpty && isSnapshot){
if (fieldsToDeduplicateOn.isEmpty)
throw new RequiredFieldNotPresentException("fieldsToDeduplicateOn is not present")
getSnapshotCreationSql(redshiftTableName, redshiftStagingTableName, mergeKey, fieldsToDeduplicateOn.get, incrementalColumn, tableDetailsExtra, snapshotOptimizingFilter.getOrElse(""))
} else {
""
}
val postActions: String = Seq[String](stagingTablePostActions,
redshiftConf.postLoadCmd.map(_.trim).map { cmd =>
val changedCmd = cmd.replace("{{s}}", redshiftStagingTableName)
changedCmd + (if (changedCmd.endsWith(";")) "" else ";") + "\\n"
}.getOrElse(""),
dropStagingTableString
).filter(_.trim != "").mkString("\\n")
logger.info("Redshift PreActions = {}", preActions)
logger.info("Redshift PostActions = {}", postActions)
val redshiftWriteMode = if (dropStagingTableString == "") "append" else "overwrite"
logger.info("Redshift write mode: {}", redshiftWriteMode)
val redshiftTableNameForIngestion = if (dropStagingTableString != "") redshiftStagingTableName
else redshiftTableName
logger.info("redshiftTableNameForIngestion: {}", redshiftTableNameForIngestion)
val redshiftWriterPartitioned: DataFrame = internalConfig.reducePartitions match {
case Some(reducePartitions) =>
if (df.rdd.getNumPartitions == reducePartitions)
df.repartition(reducePartitions)
else
df
case None => df
}
val extracopyoptions = if (dropStagingTableString != "") {
"TRUNCATECOLUMNS COMPUPDATE OFF STATUPDATE OFF"
} else "TRUNCATECOLUMNS COMPUPDATE OFF "
val redshiftWriter = redshiftWriterPartitioned.write.
format("com.databricks.spark.redshift").
option("url", RedshiftUtil.getJDBCUrl(redshiftConf)).
option("user", redshiftConf.userName).
option("password", redshiftConf.password).
option("jdbcdriver", "com.amazon.redshift.jdbc4.Driver").
option("dbtable", redshiftTableNameForIngestion).
option("tempdir", s3Conf.s3Location).
option("extracopyoptions", extracopyoptions).
mode(redshiftWriteMode)
val redshiftWriterWithPreActions = {
if (preActions != "") redshiftWriter.option("preactions", preActions)
else redshiftWriter
}
val redshiftWriterWithPostActions = {
if (postActions != "") redshiftWriterWithPreActions.option("postactions", postActions)
else redshiftWriterWithPreActions
}
redshiftWriterWithPostActions.save()
try {
if (shallVacuumAfterLoad) {
RedshiftUtil.performVacuum(redshiftConf)
} else {
logger.info("Not opting for Vacuum, shallVacuumAfterLoad is false")
}
} catch {
case e: Exception => logger.warn("Vacuum failed for reason", e)
}
}
/**
* Alter table to add or delete columns in redshift table if any changes occurs in sql table
*
* @param tableDetails sql table details
* @param redshiftConf redshift configuration
* @return Query of add and delete columns from redshift table
*/
private def alterTableQuery(tableDetails: TableDetails, redshiftConf: DBConfiguration, customFields: Seq[String]): String = {
val redshiftTableName: String = RedshiftUtil.getTableNameWithSchema(redshiftConf)
try {
val mainTableColumnNames: Set[String] = RedshiftUtil.getColumnNamesAndTypes(redshiftConf).keys.toSet
// All columns name must be distinct other wise redshift load will fail
val stagingTableColumnAndTypes: Map[String, String] = tableDetails
.validFields
.map { td => td.fieldName.toLowerCase -> td.fieldType }
.toMap
val stagingTableColumnNames: Set[String] = (stagingTableColumnAndTypes.keys ++ customFields).toSet
val addedColumns: Set[String] = stagingTableColumnNames -- mainTableColumnNames
val deletedColumns: Set[String] = mainTableColumnNames -- stagingTableColumnNames
val addColumnsQuery = addedColumns.foldLeft("\\n") { (query, columnName) =>
query + s"""ALTER TABLE $redshiftTableName ADD COLUMN "$columnName" """ +
stagingTableColumnAndTypes.getOrElse(columnName, "") + ";\\n"
}
val deleteColumnQuery = deletedColumns.foldLeft("\\n") { (query, columnName) =>
query + s"""ALTER TABLE $redshiftTableName DROP COLUMN "$columnName" ;\\n"""
}
addColumnsQuery + deleteColumnQuery
} catch {
case e: Exception =>
logger.warn("Error occurred while altering table", e)
""
}
}
}
|
goibibo/SqlShift
|
src/main/scala/com/goibibo/sqlshift/commons/MySQLToRedshiftMigrator.scala
|
Scala
|
mit
| 23,986
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import java.util.Properties
import java.util.concurrent.atomic._
import kafka.common._
import kafka.message._
import kafka.log._
import kafka.utils._
/**
* A stress test that instantiates a log and then runs continual appends against it from one thread and continual reads against it
* from another thread and checks a few basic assertions until the user kills the process.
*/
object StressTestLog {
val running = new AtomicBoolean(true)
def main(args: Array[String]) {
val dir = TestUtils.tempDir()
val time = new MockTime
val logProprties = new Properties()
logProprties.put(LogConfig.SegmentBytesProp, 64*1024*1024: java.lang.Integer)
logProprties.put(LogConfig.MaxMessageBytesProp, Int.MaxValue: java.lang.Integer)
logProprties.put(LogConfig.SegmentIndexBytesProp, 1024*1024: java.lang.Integer)
val log = new Log(dir = dir,
config = LogConfig(logProprties),
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time)
val writer = new WriterThread(log)
writer.start()
val reader = new ReaderThread(log)
reader.start()
Runtime.getRuntime().addShutdownHook(new Thread() {
override def run() = {
running.set(false)
writer.join()
reader.join()
CoreUtils.rm(dir)
}
})
while(running.get) {
println("Reader offset = %d, writer offset = %d".format(reader.offset, writer.offset))
Thread.sleep(1000)
}
}
abstract class WorkerThread extends Thread {
override def run() {
try {
var offset = 0
while(running.get)
work()
} catch {
case e: Exception =>
e.printStackTrace()
running.set(false)
}
println(getClass.getName + " exiting...")
}
def work()
}
class WriterThread(val log: Log) extends WorkerThread {
@volatile var offset = 0
override def work() {
val logAppendInfo = log.append(TestUtils.singleMessageSet(offset.toString.getBytes))
require(logAppendInfo.firstOffset == offset && logAppendInfo.lastOffset == offset)
offset += 1
if(offset % 1000 == 0)
Thread.sleep(500)
}
}
class ReaderThread(val log: Log) extends WorkerThread {
@volatile var offset = 0
override def work() {
try {
log.read(offset, 1024, Some(offset+1)).messageSet match {
case read: FileMessageSet if read.sizeInBytes > 0 => {
val first = read.head
require(first.offset == offset, "We should either read nothing or the message we asked for.")
require(MessageSet.entrySize(first.message) == read.sizeInBytes, "Expected %d but got %d.".format(MessageSet.entrySize(first.message), read.sizeInBytes))
offset += 1
}
case _ =>
}
} catch {
case e: OffsetOutOfRangeException => // this is okay
}
}
}
}
|
usakey/kafka
|
core/src/test/scala/other/kafka/StressTestLog.scala
|
Scala
|
apache-2.0
| 3,792
|
package com.twitter.jvm
import com.twitter.conversions.StorageUnitOps._
import com.twitter.util.Time
object NilJvm extends Jvm {
val opts: Opts = new Opts {
def compileThresh: Option[Int] = None
}
def forceGc(): Unit = System.gc()
val snapCounters: Map[String, String] = Map()
val snap: Snapshot = Snapshot(Time.epoch, Heap(0, 0, Seq()), Seq())
val edenPool: Pool = new Pool { def state() = PoolState(0, 0.bytes, 0.bytes) }
val metaspaceUsage: Option[Jvm.MetaspaceUsage] = None
val safepoint: Safepoint = Safepoint(0, 0, 0)
val applicationTime: Long = 0L
val tenuringThreshold: Long = 0L
}
|
twitter/util
|
util-jvm/src/main/scala/com/twitter/jvm/NilJvm.scala
|
Scala
|
apache-2.0
| 622
|
/*
* Part of NDLA article-api.
* Copyright (C) 2016 NDLA
*
* See LICENSE
*
*/
package no.ndla.articleapi.model.domain
import com.sksamuel.elastic4s.analyzers._
import no.ndla.articleapi.ArticleApiProperties.DefaultLanguage
import no.ndla.language.model.LanguageTag
object Language {
val UnknownLanguage: LanguageTag = LanguageTag("und")
val NoLanguage = ""
val AllLanguages = "*"
val languageAnalyzers = Seq(
LanguageAnalyzer(LanguageTag("nb"), NorwegianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("nn"), NorwegianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("sma"), StandardAnalyzer), // Southern sami
LanguageAnalyzer(LanguageTag("se"), StandardAnalyzer), // Northern Sami
LanguageAnalyzer(LanguageTag("en"), EnglishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ar"), ArabicLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("hy"), ArmenianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("eu"), BasqueLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("pt-br"), BrazilianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("bg"), BulgarianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ca"), CatalanLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ja"), CjkLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ko"), CjkLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("zh"), CjkLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("cs"), CzechLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("da"), DanishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("nl"), DutchLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("fi"), FinnishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("fr"), FrenchLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("gl"), GalicianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("de"), GermanLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("el"), GreekLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("hi"), HindiLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("hu"), HungarianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("id"), IndonesianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ga"), IrishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("it"), ItalianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("lt"), LithuanianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("lv"), LatvianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("fa"), PersianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("pt"), PortugueseLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ro"), RomanianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("ru"), RussianLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("srb"), SoraniLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("es"), SpanishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("sv"), SwedishLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("th"), ThaiLanguageAnalyzer),
LanguageAnalyzer(LanguageTag("tr"), TurkishLanguageAnalyzer),
LanguageAnalyzer(UnknownLanguage, StandardAnalyzer)
)
def findByLanguageOrBestEffort[P <: LanguageField[_]](sequence: Seq[P], language: String): Option[P] = {
sequence
.find(_.language == language)
.orElse(
sequence
.sortBy(lf => languageAnalyzers.map(la => la.languageTag.toString).reverse.indexOf(lf.language))
.lastOption)
}
def languageOrUnknown(language: Option[String]): LanguageTag = {
language.filter(_.nonEmpty) match {
case Some(x) if x == "unknown" => UnknownLanguage
case Some(x) => LanguageTag(x)
case None => UnknownLanguage
}
}
def getSupportedLanguages(sequences: Seq[LanguageField[_]]*): Seq[String] = {
sequences.flatMap(_.map(_.language)).distinct.sortBy { lang =>
languageAnalyzers.map(la => la.languageTag).indexOf(LanguageTag(lang))
}
}
def getSearchLanguage(languageParam: String, supportedLanguages: Seq[String]): String = {
val l = if (languageParam == AllLanguages) DefaultLanguage else languageParam
if (supportedLanguages.contains(l))
l
else
supportedLanguages.head
}
}
case class LanguageAnalyzer(languageTag: LanguageTag, analyzer: Analyzer)
|
NDLANO/article-api
|
src/main/scala/no/ndla/articleapi/model/domain/Language.scala
|
Scala
|
gpl-3.0
| 4,198
|
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Mark Harrah
*/
package xsbt
import scala.tools.nsc.{io, plugins, symtab, Global, Phase}
import io.{AbstractFile, PlainFile, ZipArchive}
import plugins.{Plugin, PluginComponent}
import symtab.Flags
import scala.collection.mutable.{HashMap, HashSet, Map, Set}
import java.io.File
import java.util.zip.ZipFile
import xsbti.AnalysisCallback
object Analyzer
{
def name = "xsbt-analyzer"
}
final class Analyzer(val global: CallbackGlobal) extends Compat
{
import global._
def newPhase(prev: Phase): Phase = new AnalyzerPhase(prev)
private class AnalyzerPhase(prev: Phase) extends Phase(prev)
{
override def description = "Extracts dependency information, finds concrete instances of provided superclasses, and application entry points."
def name = Analyzer.name
def run
{
for(unit <- currentRun.units if !unit.isJava)
{
// build dependencies structure
val sourceFile = unit.source.file.file
callback.beginSource(sourceFile)
for(on <- unit.depends)
{
def binaryDependency(file: File, className: String) = callback.binaryDependency(file, className, sourceFile)
val onSource = on.sourceFile
if(onSource == null)
{
classFile(on) match
{
case Some((f,className,inOutDir)) =>
if(inOutDir && on.isJavaDefined) registerTopLevelSym(on)
f match
{
case ze: ZipArchive#Entry => for(zip <- ze.underlyingSource; zipFile <- Option(zip.file) ) binaryDependency(zipFile, className)
case pf: PlainFile => binaryDependency(pf.file, className)
case _ => ()
}
case None => ()
}
}
else
callback.sourceDependency(onSource.file, sourceFile)
}
// build list of generated classes
for(iclass <- unit.icode)
{
val sym = iclass.symbol
def addGenerated(separatorRequired: Boolean)
{
for(classFile <- outputDirs map (fileForClass(_, sym, separatorRequired)) find (_.exists))
callback.generatedClass(sourceFile, classFile, className(sym, '.', separatorRequired))
}
if(sym.isModuleClass && !sym.isImplClass)
{
if(isTopLevelModule(sym) && sym.companionClass == NoSymbol)
addGenerated(false)
addGenerated(true)
}
else
addGenerated(false)
}
callback.endSource(sourceFile)
}
}
}
private[this] final val classSeparator = '.'
private[this] def classFile(sym: Symbol): Option[(AbstractFile, String, Boolean)] =
// package can never have a corresponding class file; this test does not
// catch package objects (that do not have this flag set)
if (sym hasFlag scala.tools.nsc.symtab.Flags.PACKAGE) None else
{
import scala.tools.nsc.symtab.Flags
val name = flatname(sym, classSeparator) + moduleSuffix(sym)
findClass(name).map { case (file,inOut) => (file, name,inOut) } orElse {
if(isTopLevelModule(sym))
{
val linked = sym.companionClass
if(linked == NoSymbol)
None
else
classFile(linked)
}
else
None
}
}
// doesn't seem to be in 2.7.7, so copied from GenJVM to here
private def moduleSuffix(sym: Symbol) =
if (sym.hasFlag(Flags.MODULE) && !sym.isMethod && !sym.isImplClass && !sym.hasFlag(Flags.JAVA)) "$" else "";
private def flatname(s: Symbol, separator: Char) =
atPhase(currentRun.flattenPhase.next) { s fullName separator }
private def isTopLevelModule(sym: Symbol): Boolean =
atPhase (currentRun.picklerPhase.next) {
sym.isModuleClass && !sym.isImplClass && !sym.isNestedClass
}
private def className(s: Symbol, sep: Char, dollarRequired: Boolean): String =
flatname(s, sep) + (if(dollarRequired) "$" else "")
private def fileForClass(outputDirectory: File, s: Symbol, separatorRequired: Boolean): File =
new File(outputDirectory, className(s, File.separatorChar, separatorRequired) + ".class")
}
abstract class Compat
{
val global: Global
import global._
val LocalChild = global.tpnme.LOCAL_CHILD
val Nullary = global.NullaryMethodType
val ScalaObjectClass = definitions.ScalaObjectClass
private[this] final class MiscCompat
{
// in 2.9, nme.LOCALCHILD was renamed to tpnme.LOCAL_CHILD
def tpnme = nme
def LOCAL_CHILD = nme.LOCALCHILD
def LOCALCHILD = sourceCompatibilityOnly
// in 2.10, ScalaObject was removed
def ScalaObjectClass = definitions.ObjectClass
def NullaryMethodType = NullaryMethodTpe
def MACRO = DummyValue
}
// in 2.9, NullaryMethodType was added to Type
object NullaryMethodTpe {
def unapply(t: Type): Option[Type] = None
}
val DummyValue = 0
def hasMacro(s: Symbol): Boolean =
{
val MACRO = Flags.MACRO // will be DummyValue for versions before 2.10
MACRO != DummyValue && s.hasFlag(MACRO)
}
private[this] def sourceCompatibilityOnly: Nothing = throw new RuntimeException("For source compatibility only: should not get here.")
private[this] final implicit def miscCompat(n: AnyRef): MiscCompat = new MiscCompat
}
|
harrah/xsbt
|
compile/interface/src/main/scala/xsbt/Analyzer.scala
|
Scala
|
bsd-3-clause
| 4,950
|
package android
import java.io.File
import android.Dependencies.{AarLibrary, ApkLibrary, LibraryDependency}
import com.android.builder.core.{AaptPackageProcessBuilder, AndroidBuilder, VariantType}
import com.android.builder.model.AaptOptions
import com.android.builder.dependency.{LibraryDependency => AndroidLibrary}
import com.android.builder.png.VectorDrawableRenderer
import com.android.ide.common.res2._
import com.android.resources.Density
import com.android.utils.ILogger
import sbt.Keys.TaskStreams
import sbt._
import collection.JavaConverters._
import language.postfixOps
import Dependencies.LibrarySeqOps
import sbt.classpath.ClasspathUtilities
import scala.util.Try
import scala.xml.XML
object Resources {
val ANDROID_NS = "http://schemas.android.com/apk/res/android"
def resourceUrl =
Resources.getClass.getClassLoader.getResource _
val reservedWords = Set(
"def",
"forSome",
"implicit",
"lazy",
"match",
"object",
"override",
"sealed",
"trait",
"type",
"val",
"var",
"with",
"yield"
)
def doCollectResources( bldr: AndroidBuilder
, minSdk: Int
, noTestApk: Boolean
, isLib: Boolean
, libs: Seq[LibraryDependency]
, layout: ProjectLayout
, extraAssets: Seq[File]
, extraRes: Seq[File]
, renderVectors: Boolean
, logger: ILogger
, cache: File
, s: TaskStreams
)(implicit m: BuildOutput.Converter): (File,File) = {
val assetBin = layout.mergedAssets
val assets = layout.assets
val resTarget = layout.mergedRes
val rsResources = layout.rsRes
resTarget.mkdirs()
assetBin.mkdirs
val depassets = collectdeps(libs) collect {
case m: ApkLibrary => m
case n: AarLibrary => n
} collect { case n if n.getAssetsFolder.isDirectory => n.getAssetsFolder }
// copy assets to single location
depassets ++ (libs collect {
case r if r.layout.assets.isDirectory => r.layout.assets
}) foreach { a => IO.copyDirectory(a, assetBin, false, true) }
extraAssets foreach { a =>
if (a.isDirectory) IO.copyDirectory(a, assetBin, false, true)
}
if (assets.exists) IO.copyDirectory(assets, assetBin, false, true)
if (noTestApk && layout.testAssets.exists)
IO.copyDirectory(layout.testAssets, assetBin, false, true)
// prepare resource sets for merge
val res = extraRes ++ Seq(layout.res, rsResources) ++
(libs map { _.layout.res } filter { _.isDirectory })
s.log.debug("Local/library-project resources: " + res)
// this needs to wait for other projects to at least finish their
// apklibs tasks--handled if androidBuild() is called properly
val depres = collectdeps(libs) collect {
case m: ApkLibrary => m
case n: AarLibrary => n
} collect { case n if n.getResFolder.isDirectory => n.getResFolder }
s.log.debug("apklib/aar resources: " + depres)
val respaths = depres ++ res.reverse ++
(if (layout.res.isDirectory) Seq(layout.res) else Seq.empty) ++
(if (noTestApk && layout.testRes.isDirectory)
Seq(layout.res) else Seq.empty)
val vectorprocessor = new VectorDrawableRenderer(
if (renderVectors) minSdk else math.max(minSdk,21),
layout.generatedVectors, Set(Density.MEDIUM,
Density.HIGH,
Density.XHIGH,
Density.XXHIGH).asJava,
logger)
val sets = respaths.distinct flatMap { r =>
val set = new ResourceSet(r.getAbsolutePath)
set.addSource(r)
set.setPreprocessor(vectorprocessor)
val generated = new GeneratedResourceSet(set)
set.setGeneratedSet(generated)
s.log.debug("Adding resource path: " + r)
List(generated, set)
}
val inputs = (respaths flatMap { r => (r ***) get }) filter (n =>
!n.getName.startsWith(".") && !n.getName.startsWith("_"))
var needsFullResourceMerge = false
FileFunction.cached(cache / "nuke-res-if-changed", FilesInfo.lastModified) { in =>
needsFullResourceMerge = true
IO.delete(resTarget)
in
}(depres.toSet)
FileFunction.cached(cache / "collect-resources")(
FilesInfo.lastModified, FilesInfo.exists) { (inChanges,outChanges) =>
s.log.info("Collecting resources")
incrResourceMerge(layout, minSdk, resTarget, isLib, libs, cache / "collect-resources",
logger, bldr, sets, vectorprocessor, inChanges, needsFullResourceMerge, s.log)
((resTarget ** FileOnlyFilter).get ++ (layout.generatedVectors ** FileOnlyFilter).get).toSet
}(inputs.toSet)
(assetBin, resTarget)
}
def incrResourceMerge(
layout: ProjectLayout,
minSdk: Int,
resTarget: File,
isLib: Boolean,
libs: Seq[LibraryDependency],
blobDir: File,
logger: ILogger,
bldr: AndroidBuilder,
resources: Seq[ResourceSet],
preprocessor: ResourcePreprocessor,
changes: ChangeReport[File],
needsFullResourceMerge: Boolean,
slog: Logger
)(implicit m: BuildOutput.Converter) {
def merge() = fullResourceMerge(layout, minSdk, resTarget, isLib, libs, blobDir,
logger, bldr, resources, preprocessor, slog)
val merger = new ResourceMerger(minSdk)
if (!merger.loadFromBlob(blobDir, true)) {
slog.debug("Could not load merge blob (no full merge yet?)")
merge()
} else if (!merger.checkValidUpdate(resources.asJava)) {
slog.debug("requesting full merge: !checkValidUpdate")
merge()
} else if (needsFullResourceMerge) {
slog.debug("requesting full merge: dependency resources have changed!")
merge()
} else {
val fileValidity = new FileValidity[ResourceSet]
val exists = changes.added ++ changes.removed ++ changes.modified exists {
file =>
val status = if (changes.added contains file)
FileStatus.NEW
else if (changes.removed contains file)
FileStatus.REMOVED
else if (changes.modified contains file)
FileStatus.CHANGED
else
sys.error("Unknown file status: " + file)
merger.findDataSetContaining(file, fileValidity)
val vstatus = fileValidity.getStatus
if (vstatus == FileValidity.FileStatus.UNKNOWN_FILE) {
merge()
slog.debug("Incremental merge aborted, unknown file: " + file)
true
} else if (vstatus == FileValidity.FileStatus.VALID_FILE) {
// begin workaround
// resource merger doesn't seem to actually copy changed files over...
// values.xml gets merged, but if files are changed...
val targetFile = resTarget / (
file relativeTo fileValidity.getSourceFile).get.getPath
val copy = Seq((file, targetFile))
status match {
case FileStatus.NEW =>
case FileStatus.CHANGED =>
if (targetFile.exists) IO.copy(copy, false, true)
case FileStatus.REMOVED => targetFile.delete()
}
// end workaround
try {
if (!fileValidity.getDataSet.updateWith(
fileValidity.getSourceFile, file, status, logger)) {
slog.debug("Unable to handle changed file: " + file)
merge()
true
} else
false
} catch {
case e: RuntimeException =>
slog.warn("Unable to handle changed file: " + file + ": " + e)
merge()
true
}
} else
false
}
if (!exists) {
slog.info("Performing incremental resource merge")
val writer = new MergedResourceWriter(resTarget,
bldr.getAaptCruncher(SbtProcessOutputHandler(slog)),
true, true, layout.publicTxt, layout.mergeBlame,
preprocessor)
merger.mergeData(writer, true)
merger.writeBlobTo(blobDir, writer)
}
}
}
def fullResourceMerge(layout: ProjectLayout, minSdk: Int, resTarget: File, isLib: Boolean,
libs: Seq[LibraryDependency], blobDir: File, logger: ILogger,
bldr: AndroidBuilder, resources: Seq[ResourceSet],
preprocessor: ResourcePreprocessor, slog: Logger)(implicit m: BuildOutput.Converter) {
slog.info("Performing full resource merge")
val merger = new ResourceMerger(minSdk)
resTarget.mkdirs()
resources foreach { r =>
r.loadFromFiles(logger)
merger.addDataSet(r)
}
val writer = new MergedResourceWriter(resTarget,
bldr.getAaptCruncher(SbtProcessOutputHandler(slog)),
true, true, layout.publicTxt, layout.mergeBlame, preprocessor)
merger.mergeData(writer, false)
merger.writeBlobTo(blobDir, writer)
}
def aapt(bldr: AndroidBuilder, manifest: File, pkg: String,
extraParams: Seq[String],
libs: Seq[LibraryDependency], lib: Boolean, debug: Boolean,
res: File, assets: File, resApk: String, gen: File, proguardTxt: String,
logger: Logger) = synchronized {
gen.mkdirs()
val options = new AaptOptions {
override def getIgnoreAssets = null
override def getNoCompress = null
override def getFailOnMissingConfigEntry = false
override def getAdditionalParameters = extraParams.asJava
}
val genPath = gen.getAbsolutePath
val all = collectdeps(libs)
logger.debug("All libs: " + all)
logger.debug("packageForR: " + pkg)
logger.debug("proguard.txt: " + proguardTxt)
val aaptCommand = new AaptPackageProcessBuilder(manifest, options)
if (res.isDirectory)
aaptCommand.setResFolder(res)
if (assets.isDirectory)
aaptCommand.setAssetsFolder(assets)
aaptCommand.setLibraries(all.asJava)
aaptCommand.setPackageForR(pkg)
aaptCommand.setResPackageOutput(resApk)
aaptCommand.setSourceOutputDir(if (resApk == null) genPath else null)
aaptCommand.setSymbolOutputDir(if (resApk == null) genPath else null)
aaptCommand.setProguardOutput(proguardTxt)
aaptCommand.setType(if (lib) VariantType.LIBRARY else VariantType.DEFAULT)
aaptCommand.setDebuggable(debug)
try {
bldr.processResources(aaptCommand, true, SbtProcessOutputHandler(logger))
} catch {
case e: com.android.ide.common.process.ProcessException =>
PluginFail(e.getMessage)
}
}
def collectdeps(libs: Seq[AndroidLibrary]): Seq[AndroidLibrary] = {
libs
.map(_.getDependencies.asScala)
.flatMap(collectdeps)
.++(libs)
.distinctLibs
}
def generateTR(t: Boolean, a: Seq[File], p: String, layout: ProjectLayout,
platformApi: Int, platform: (String,Seq[String]), sv: String,
l: Seq[LibraryDependency], f: Boolean, i: Seq[String], s: TaskStreams): Seq[File] = {
val j = platform._1
val r = layout.res
val g = layout.gen
val ignores = i.toSet
val tr = p.split("\\.").foldLeft (g) { _ / _ } / "TR.scala"
if (!t)
Seq.empty[File]
else
FileFunction.cached(s.cacheDirectory / "typed-resources-generator", FilesInfo.hash) { in =>
if (in.nonEmpty) {
s.log.info("Regenerating TR.scala because R.java has changed")
val androidjar = ClasspathUtilities.toLoader(file(j))
val layouts = (r ** "layout*" ** "*.xml" get) ++
(for {
lib <- l filterNot {
case p: Dependencies.Pkg => ignores(p.pkg)
case _ => false
}
xml <- lib.getResFolder ** "layout*" ** "*.xml" get
} yield xml)
s.log.debug("Layouts: " + layouts)
// XXX handle package references? @id/android:ID or @id:android/ID
val re = "@\\+id/(.*)".r
def classForLabel(l: String) = {
if (l contains ".") Some(l)
else {
Seq("android.widget."
, "android.view."
, "android.webkit.").flatMap {
pkg => Try(androidjar.loadClass(pkg + l).getName).toOption
}.headOption
}
}
def warn(res: Seq[(String,String)]) = {
// nice to have:
// merge to a common ancestor, this is possible for androidJar
// but to do so is perilous/impossible for project code...
// instead:
// reduce to ViewGroup for *Layout, and View for everything else
val overrides = res.groupBy(r => r._1) filter (
_._2.toSet.size > 1) collect {
case (k,v) =>
s.log.warn("%s was reassigned: %s" format (k,
v map (_._2) mkString " => "))
k -> (if (v endsWith "Layout")
"android.view.ViewGroup" else "android.view.View")
}
(res ++ overrides).toMap
}
val layoutTypes = warn(for {
file <- layouts
layout = XML loadFile file
l <- classForLabel(layout.label)
} yield file.getName.stripSuffix(".xml") -> l)
val resources = warn(for {
b <- layouts
layout = XML loadFile b
n <- layout.descendant_or_self
re(id) <- n.attribute(ANDROID_NS, "id") map { _.head.text }
l <- classForLabel(n.label)
} yield id -> l)
val trTemplate = IO.readLinesURL(
resourceUrl("tr.scala.template")) mkString "\n"
tr.delete()
val resdirs = if (f) {
r +: (for {
lib <- l filterNot {
case p: Dependencies.Pkg => ignores(p.pkg)
case _ => false
}
} yield lib.getResFolder)
} else Nil
val rms1 = processValuesXml(resdirs, s)
val rms2 = processResourceTypeDirs(resdirs, s)
val combined = reduceResourceMap(Seq(rms1, rms2)).filter(_._2.nonEmpty)
val combined1 = combined.map { case (k, xs) =>
val k2 = if (k endsWith "-array") "array" else k
val trt = trTypes(k)
val ys = xs.toSet[String].map { x =>
val y = x.replace('.', '_')
s" final val ${wrap(y)} = TypedRes[TypedResource.$trt](R.$k2.${wrap(y)})"
}
k -> ys
}
val combined2 = combined1.foldLeft(emptyResourceMap) { case (acc, (k, xs)) =>
val k2 = if (k endsWith "-array") "array" else k
acc + ((k2, acc(k2) ++ xs))
}
val trs = combined2.foldLeft(List.empty[String]) { case (acc, (k, xs)) =>
val k2 = if (k endsWith "-array") "array" else k
s"""
| object $k2 {
|${xs.mkString("\n")}
| }""".stripMargin :: acc
}
val deprForward = {
if (platformApi < 21) ""
else {
val color =
"""
| @TargetApi(23)
| @inline def getColor(c: Context, resid: Int): Int = {
| if (Build.VERSION.SDK_INT >= 23)
| c.getColor(resid)
| else
| c.getResources.getColor(resid)
| }""".stripMargin
val drawable =
"""
| @TargetApi(21)
| @inline def getDrawable(c: Context, resid: Int): Drawable = {
| if (Build.VERSION.SDK_INT >= 21)
| c.getDrawable(resid)
| else
| c.getResources.getDrawable(resid)
| }""".stripMargin
val methods = if (platformApi >= 23) color + "\n\n" + drawable else drawable
s"""
| // Helper object to suppress deprecation warnings as discussed in
| // https://issues.scala-lang.org/browse/SI-7934
| @deprecated("", "")
| private trait compat {
|${methods}
| }
| private object compat extends compat""".stripMargin
}
}
val getColor = " " + (if (platformApi >= 23) {
"compat.getColor(c,resid)"
} else {
"c.getResources.getColor(resid)"
})
val getDrawable = " " + (if (platformApi >= 21) {
"compat.getDrawable(c,resid)"
} else {
"c.getResources.getDrawable(resid)"
})
IO.write(tr, trTemplate format (p,
resources map { case (k,v) =>
" final val %s = TypedResource[%s](R.id.%s)" format (wrap(k),v,wrap(k))
} mkString "\n",
layoutTypes map { case (k,v) =>
" final val %s = TypedLayout[%s](R.layout.%s)" format (wrap(k),v,wrap(k))
} mkString "\n", trs.mkString, getColor, getDrawable, getDrawable, deprForward))
Set(tr)
} else Set.empty
}(a.toSet).toSeq
}
def wrap(s: String) = if (reservedWords(s)) s"`$s`" else s
val trTypes = Map(
"anim" -> "ResAnim",
"animator" -> "ResAnimator",
"array" -> "ResArray",
"string-array" -> "ResStringArray",
"integer-array" -> "ResIntegerArray",
"attr" -> "ResAttr",
"bool" -> "ResBool",
"color" -> "ResColor",
"dimen" -> "ResDimen",
"drawable" -> "ResDrawable",
"fraction" -> "ResFraction",
"integer" -> "ResInteger",
"interpolator" -> "ResInterpolator",
"menu" -> "ResMenu",
"mipmap" -> "ResMipMap",
"plurals" -> "ResPlurals",
"raw" -> "ResRaw",
"string" -> "ResString",
"style" -> "ResStyle",
"transition" -> "ResTransition",
"xml" -> "ResXml"
)
val itemTypes = Set(
"anim",
"animator",
"array",
"bool",
"color",
"dimen",
"drawable",
"fraction",
"integer",
"interpolator",
"menu",
"mipmap",
"plurals",
"raw",
"string",
"style",
"transition",
"xml"
)
val formatTypes = List(
"boolean" -> "bool",
"color" -> "color",
"dimension" -> "dimen",
"fraction" -> "fraction",
"integer" -> "integer",
"string" -> "string"
).toMap
type ResourceMap = Map[String,List[String]]
val emptyResourceMap = Map.empty[String,List[String]].withDefaultValue(Nil)
def reduceResourceMap(rms: Seq[ResourceMap]): ResourceMap =
rms.foldLeft(emptyResourceMap) { (m, n) =>
n.keys.foldLeft(m)((m2, k) => m2 + (k -> (m2(k) ++ n(k))))
}
def attributeText(n: xml.Node, attr: String): Option[String] =
n.attribute(attr).flatMap(_.headOption).map(_.text)
def processValuesXml(resdirs: Seq[File], s: TaskStreams): ResourceMap = {
val valuesxmls = resdirs flatMap { d => d * "values*" * "*.xml" get }
val rms = valuesxmls.map { xml =>
val values = XML.loadFile(xml)
val items = values \ "item"
val itemEntries = items.flatMap { node =>
(for {
name <- attributeText(node, "name")
typ <- attributeText(node, "type").filter(itemTypes).orElse(
attributeText(node, "format").flatMap(formatTypes.get))
} yield (typ, name)).toSeq
}
val itemMap = itemEntries.foldLeft(emptyResourceMap) { case (m, (t,n)) =>
m + ((t,n :: m(t)))
}
def foldKey(key: String): (ResourceMap,scala.xml.Node) => ResourceMap = (m,node) => {
node.attribute("name").flatMap(_.headOption).fold(m)(n => m + ((key,n.text :: m(key))))
}
def foldNodes(in: ResourceMap, key: String): ResourceMap = {
(values \ key).foldLeft(in)(foldKey(key))
}
List("string", "string-array", "array", "plurals", "integer",
"integer-array", "bool", "attr", "color", "dimen", "style"
).foldLeft(itemMap)(foldNodes)
}
reduceResourceMap(rms)
}
val resdirTypes = List(
"anim",
"animator",
"color",
"drawable",
"interpolator",
"menu",
"mipmap",
"raw",
"transition",
"xml"
)
def processResourceTypeDirs(resdirs: Seq[File], s: TaskStreams): ResourceMap = {
val rms2 = for {
res <- resdirs
restype <- resdirTypes
} yield restype ->
(res * s"$restype*" * "*").get.map(_.getName.takeWhile(_ != '.')).toList.filter(_.nonEmpty)
rms2.foldLeft(emptyResourceMap) { case (m, (t, xs)) => m + (t -> (m(t) ++ xs)) }
}
}
|
dant3/android-sdk-plugin
|
src/resources.scala
|
Scala
|
bsd-3-clause
| 20,902
|
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.persistence.jdbc.query.journal
import akka.actor.ExtendedActorSystem
import akka.persistence.query.ReadJournalProvider
import com.typesafe.config.Config
class JdbcReadJournalProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider {
override val scaladslReadJournal = new scaladsl.JdbcReadJournal(config)(system)
override val javadslReadJournal = new javadsl.JdbcReadJournal(scaladslReadJournal)
}
|
wwwiiilll/akka-persistence-jdbc
|
src/main/scala/akka/persistence/jdbc/query/journal/JdbcReadJournalProvider.scala
|
Scala
|
apache-2.0
| 1,041
|
package com.twitter.util
import com.twitter.conversions.DurationOps._
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.ExecutionException
import java.util.concurrent.{Future => JFuture}
import java.util.concurrent.atomic.AtomicInteger
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito.never
import org.mockito.Mockito.times
import org.mockito.Mockito.verify
import org.mockito.Mockito.when
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalacheck.Arbitrary
import org.scalacheck.Gen
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import scala.jdk.CollectionConverters._
import scala.runtime.NonLocalReturnControl
import scala.util.Random
import scala.util.control.ControlThrowable
import scala.util.control.NonFatal
import org.scalatest.wordspec.AnyWordSpec
private object FutureTest {
def await[A](f: Future[A], timeout: Duration = 5.seconds): A =
Await.result(f, timeout)
sealed trait Result
case class Ret(num: Int) extends Result
case class Thr(exn: Exception) extends Result
case object Nvr extends Result
def satisfy(p: Promise[Int], result: Result): Unit = result match {
case Nvr =>
case Thr(exn) => p() = Throw(exn)
case Ret(value) => p() = Return(value)
}
def explain(left: Result, right: Result, leftFirst: Boolean): String = {
val leftOrRight = if (leftFirst) "left" else "right"
s": $left.join($right) where $leftOrRight is satisfied first has an unexpected result"
}
}
class FutureTest extends AnyWordSpec with MockitoSugar with ScalaCheckDrivenPropertyChecks {
import FutureTest._
implicit class FutureMatcher[A](future: Future[A]) {
def mustProduce(expected: Try[A]): Unit = {
expected match {
case Throw(ex) =>
val t = intercept[Throwable] {
await(future)
}
assert(t == ex)
case Return(v) =>
assert(await(future) == v)
}
}
}
private object FailingTimer extends Timer {
def scheduleOnce(when: Time)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def schedulePeriodically(when: Time, period: Duration)(f: => Unit): TimerTask =
throw new Exception("schedule called")
def stop(): Unit = ()
}
class HandledMonitor extends Monitor {
var handled: Throwable = _
def handle(exc: Throwable): Boolean = {
handled = exc
true
}
}
class FatalException extends ControlThrowable
trait MkConst {
def apply[A](result: Try[A]): Future[A]
def value[A](a: A): Future[A] = this(Return(a))
def exception[A](exc: Throwable): Future[A] = this(Throw(exc))
}
def test(name: String, const: MkConst): Unit = {
s"object Future ($name)" when {
"times" should {
trait TimesHelper {
val queue = new ConcurrentLinkedQueue[Promise[Unit]]
var complete = false
var failure = false
var ninterrupt = 0
val iteration: Future[Unit] = Future.times(3) {
val promise = new Promise[Unit]
promise.setInterruptHandler { case _ => ninterrupt += 1 }
queue add promise
promise
}
iteration
.onSuccess { _ => complete = true }
.onFailure { _ => failure = true }
assert(!complete)
assert(!failure)
}
"when everything succeeds" in {
new TimesHelper {
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setDone()
assert(complete)
assert(!failure)
}
}
"when some succeed and some fail" in {
new TimesHelper {
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setException(new Exception(""))
assert(!complete)
assert(failure)
}
}
"when interrupted" in {
new TimesHelper {
assert(ninterrupt == 0)
iteration.raise(new Exception)
for (i <- 1 to 3) {
assert(ninterrupt == i)
queue.poll().setDone()
}
}
}
}
"when" in {
var i = 0
await {
Future.when(false) {
Future { i += 1 }
}
}
assert(i == 0)
await {
Future.when(true) {
Future { i += 1 }
}
}
assert(i == 1)
}
"whileDo" should {
trait WhileDoHelper {
var i = 0
val queue = new ConcurrentLinkedQueue[HandledPromise[Unit]]
var complete = false
var failure = false
val iteration: Future[Unit] = Future.whileDo(i < 3) {
i += 1
val promise = new HandledPromise[Unit]
queue.add(promise)
promise
}
iteration
.onSuccess { _ => complete = true }
.onFailure { _ => failure = true }
assert(!complete)
assert(!failure)
}
"when everything succeeds" in {
new WhileDoHelper {
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setDone()
assert(complete)
assert(!failure)
}
}
"when some succeed and some fail" in {
new WhileDoHelper {
queue.poll().setDone()
assert(!complete)
assert(!failure)
queue.poll().setException(new Exception(""))
assert(!complete)
assert(failure)
}
}
"when interrupted" in {
new WhileDoHelper {
assert(!queue.asScala.exists(_.handled.isDefined))
iteration.raise(new Exception)
assert(queue.asScala.forall(_.handled.isDefined))
}
}
}
"proxyTo" should {
"reject satisfied promises" in {
val str = "um um excuse me um"
val p1 = new Promise[String]()
p1.update(Return(str))
val p2 = new Promise[String]()
val ex = intercept[IllegalStateException] { p2.proxyTo(p1) }
assert(ex.getMessage.contains(str))
}
"proxies success" in {
val p1 = new Promise[Int]()
val p2 = new Promise[Int]()
p2.proxyTo(p1)
p2.update(Return(5))
assert(5 == await(p1))
assert(5 == await(p2))
}
"proxies failure" in {
val p1 = new Promise[Int]()
val p2 = new Promise[Int]()
p2.proxyTo(p1)
val t = new RuntimeException("wurmp")
p2.update(Throw(t))
val ex1 = intercept[RuntimeException] { await(p1) }
assert(ex1.getMessage == t.getMessage)
val ex2 = intercept[RuntimeException] { await(p2) }
assert(ex2.getMessage == t.getMessage)
}
}
"batched" should {
implicit val timer: MockTimer = new MockTimer
val result = Seq(4, 5, 6)
"execute after threshold is reached" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3)(f)
when(m.apply(Iterable(1, 2, 3))).thenReturn(Future.value(result))
batcher(1)
verify(m, never()).apply(any[Seq[Int]])
batcher(2)
verify(m, never()).apply(any[Seq[Int]])
batcher(3)
verify(m).apply(Iterable(1, 2, 3))
}
"execute after bufSizeFraction threshold is reached" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3, sizePercentile = 0.67f)(f)
when(m.apply(Iterable(1, 2, 3))).thenReturn(Future.value(result))
batcher(1)
verify(m, never()).apply(any[Seq[Int]])
batcher(2)
verify(m).apply(Iterable(1, 2))
}
"treat bufSizeFraction return value < 0.0f as 1" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3, sizePercentile = 0.4f)(f)
when(m.apply(Iterable(1, 2, 3))).thenReturn(Future.value(result))
batcher(1)
verify(m).apply(Iterable(1))
}
"treat bufSizeFraction return value > 1.0f should return maxSizeThreshold" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3, sizePercentile = 1.3f)(f)
when(m.apply(Iterable(1, 2, 3))).thenReturn(Future.value(result))
batcher(1)
verify(m, never()).apply(any[Seq[Int]])
batcher(2)
verify(m, never()).apply(any[Seq[Int]])
batcher(3)
verify(m).apply(Iterable(1, 2, 3))
}
"execute after time threshold" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3, 3.seconds)(f)
Time.withCurrentTimeFrozen { control =>
when(m(Iterable(1))).thenReturn(Future.value(Seq(4)))
batcher(1)
verify(m, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(m, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(m, never()).apply(any[Seq[Int]])
control.advance(1.second)
timer.tick()
verify(m).apply(Iterable(1))
}
}
"only execute once if both are reached" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { control =>
when(m(Iterable(1, 2, 3))).thenReturn(Future.value(result))
batcher(1)
batcher(2)
batcher(3)
control.advance(10.seconds)
timer.tick()
verify(m).apply(Iterable(1, 2, 3))
}
}
"execute when flushBatch is called" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(4)(f)
batcher(1)
batcher(2)
batcher(3)
batcher.flushBatch()
verify(m).apply(Iterable(1, 2, 3))
}
"only execute for remaining items when flushBatch is called after size threshold is reached" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(4)(f)
batcher(1)
batcher(2)
batcher(3)
batcher(4)
batcher(5)
verify(m, times(1)).apply(Iterable(1, 2, 3, 4))
batcher.flushBatch()
verify(m, times(1)).apply(Iterable(5))
}
"only execute once when time threshold is reached after flushBatch is called" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(4, 3.seconds)(f)
Time.withCurrentTimeFrozen { control =>
batcher(1)
batcher(2)
batcher(3)
batcher.flushBatch()
control.advance(10.seconds)
timer.tick()
verify(m, times(1)).apply(Iterable(1, 2, 3))
}
}
"only execute once when time threshold is reached before flushBatch is called" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(4, 3.seconds)(f)
Time.withCurrentTimeFrozen { control =>
batcher(1)
batcher(2)
batcher(3)
control.advance(10.seconds)
timer.tick()
batcher.flushBatch()
verify(m, times(1)).apply(Iterable(1, 2, 3))
}
}
"propagates results" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { _ =>
when(m(Iterable(1, 2, 3))).thenReturn(Future.value(result))
val res1 = batcher(1)
assert(!res1.isDefined)
val res2 = batcher(2)
assert(!res2.isDefined)
val res3 = batcher(3)
assert(res1.isDefined)
assert(res2.isDefined)
assert(res3.isDefined)
assert(await(res1) == 4)
assert(await(res2) == 5)
assert(await(res3) == 6)
verify(m).apply(Iterable(1, 2, 3))
}
}
"not block other batches" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3)(f)
Time.withCurrentTimeFrozen { _ =>
val blocker = new Promise[Unit]
val thread = new Thread {
override def run(): Unit = {
when(m(result)).thenReturn(Future.value(Seq(7, 8, 9)))
batcher(4)
batcher(5)
batcher(6)
verify(m).apply(result)
blocker.setValue(())
}
}
when(m(Seq(1, 2, 3))).thenAnswer {
new Answer[Future[Seq[Int]]] {
def answer(invocation: InvocationOnMock): Future[Seq[Int]] = {
thread.start()
await(blocker)
Future.value(result)
}
}
}
batcher(1)
batcher(2)
batcher(3)
verify(m).apply(Iterable(1, 2, 3))
}
}
"swallow exceptions" in {
val m = mock[Any => Future[Seq[Int]]]
val f = mock[scala.collection.Seq[Int] => Future[Seq[Int]]]
when(f.compose(any())).thenReturn(m)
val batcher = Future.batched(3)(f)
when(m(Iterable(1, 2, 3))).thenAnswer {
new Answer[Unit] {
def answer(invocation: InvocationOnMock): Unit = {
throw new Exception
}
}
}
batcher(1)
batcher(2)
batcher(3) // Success here implies no exception was thrown.
}
}
"interruptible" should {
"properly ignore the underlying future on interruption" in {
val p = Promise[Unit]()
val i = p.interruptible()
val e = new Exception
i.raise(e)
p.setDone()
assert(p.poll.contains(Return(())))
assert(i.poll.contains(Throw(e)))
}
"respect the underlying future" in {
val p = Promise[Unit]()
val i = p.interruptible()
p.setDone()
assert(p.poll.contains(Return(())))
assert(i.poll.contains(Return(())))
}
"do nothing for const" in {
val f = const.value(())
val i = f.interruptible()
i.raise(new Exception())
assert(f.poll.contains(Return(())))
assert(i.poll.contains(Return(())))
}
}
"traverseSequentially" should {
class TraverseTestSpy() {
var goWasCalled = false
var promise: Promise[Int] = Promise[Int]()
val go: () => Promise[Int] = () => {
goWasCalled = true
promise
}
}
"execute futures in order" in {
val first = new TraverseTestSpy()
val second = new TraverseTestSpy()
val events = Seq(first.go, second.go)
val results = Future.traverseSequentially(events)(f => f())
// At this point, none of the promises
// have been fufilled, so only the first function
// should have been called
assert(first.goWasCalled)
assert(!second.goWasCalled)
// once the first promise completes, the next
// function in the sequence should be executed
first.promise.setValue(1)
assert(second.goWasCalled)
// finally, the second promise is fufilled so
// we can Await on and check the results
second.promise.setValue(2)
assert(await(results) == Seq(1, 2))
}
"return with exception when the first future throws" in {
val first = new TraverseTestSpy()
val second = new TraverseTestSpy()
val events = Seq(first.go, second.go)
val results = Future.traverseSequentially(events)(f => f())
first.promise.setException(new Exception)
intercept[Exception] { await(results) }
// Since first returned an exception, second should
// never have been called
assert(!second.goWasCalled)
}
}
"collect" should {
trait CollectHelper {
val p0, p1 = new HandledPromise[Int]
val f: Future[Seq[Int]] = Future.collect(Seq(p0, p1))
assert(!f.isDefined)
}
"only return when both futures complete" in {
new CollectHelper {
p0() = Return(1)
assert(!f.isDefined)
p1() = Return(2)
assert(f.isDefined)
assert(await(f) == Seq(1, 2))
}
}
"return with exception if the first future throws" in {
new CollectHelper {
p0() = Throw(new Exception)
intercept[Exception] { await(f) }
}
}
"return with exception if the second future throws" in {
new CollectHelper {
p0() = Return(1)
assert(!f.isDefined)
p1() = Throw(new Exception)
intercept[Exception] { await(f) }
}
}
"propagate interrupts" in {
new CollectHelper {
val ps = Seq(p0, p1)
assert(ps.count(_.handled.isDefined) == 0)
f.raise(new Exception)
assert(ps.count(_.handled.isDefined) == 2)
}
}
"accept maps of futures" in {
val map = Map(
"1" -> Future.value("1"),
"2" -> Future.value("2")
)
assert(await(Future.collect(map)) == Map("1" -> "1", "2" -> "2"))
}
"work correctly if the given map is empty" in {
val map = Map.empty[String, Future[String]]
assert(await(Future.collect(map)).isEmpty)
}
"return future exception if one of the map values is future exception" in {
val map = Map(
"1" -> Future.value("1"),
"2" -> Future.exception(new Exception)
)
intercept[Exception] {
await(Future.collect(map))
}
}
}
"collectToTry" should {
trait CollectToTryHelper {
val p0, p1 = new HandledPromise[Int]
val f: Future[Seq[Try[Int]]] = Future.collectToTry(Seq(p0, p1))
assert(!f.isDefined)
}
"only return when both futures complete" in {
new CollectToTryHelper {
p0() = Return(1)
assert(!f.isDefined)
p1() = Return(2)
assert(f.isDefined)
assert(await(f) == Seq(Return(1), Return(2)))
}
}
"be undefined if the first future throws and the second is undefined" in {
new CollectToTryHelper {
p0() = Throw(new Exception)
assert(!f.isDefined)
}
}
"return both results if the first is defined second future throws" in {
new CollectToTryHelper {
val ex = new Exception
p0() = Return(1)
assert(!f.isDefined)
p1() = Throw(ex)
assert(await(f) == Seq(Return(1), Throw(ex)))
}
}
"propagate interrupts" in {
new CollectToTryHelper {
val ps = Seq(p0, p1)
assert(ps.count(_.handled.isDefined) == 0)
f.raise(new Exception)
assert(ps.count(_.handled.isDefined) == 2)
}
}
}
"propagate locals, restoring original context" in {
val local = new Local[Int]
val f = const.value(111)
var ran = 0
local() = 1010
f.ensure {
assert(local().contains(1010))
local() = 1212
f.ensure {
assert(local().contains(1212))
local() = 1313
ran += 1
}
assert(local().contains(1212))
ran += 1
}
assert(local().contains(1010))
assert(ran == 2)
}
"delay execution" in {
val f = const.value(111)
var count = 0
f.onSuccess { _ =>
assert(count == 0)
f.ensure {
assert(count == 1)
count += 1
}
assert(count == 0)
count += 1
}
assert(count == 2)
}
"are monitored" in {
val inner = const.value(123)
val exc = new Exception("a raw exception")
val f = Future.monitored {
inner.ensure { throw exc }
}
assert(f.poll.contains(Throw(exc)))
}
}
s"Future ($name)" should {
"select" which {
trait SelectHelper {
var nhandled = 0
val p0, p1 = new HandledPromise[Int]
val f: Future[Int] = p0.select(p1)
assert(!f.isDefined)
}
"select the first [result] to complete" in {
new SelectHelper {
p0() = Return(1)
p1() = Return(2)
assert(await(f) == 1)
}
}
"select the first [exception] to complete" in {
new SelectHelper {
p0() = Throw(new Exception)
p1() = Return(2)
intercept[Exception] { await(f) }
}
}
"propagate interrupts" in {
new SelectHelper {
val ps = Seq(p0, p1)
assert(!ps.exists(_.handled.isDefined))
f.raise(new Exception)
assert(ps.forall(_.handled.isDefined))
}
}
}
def testJoin(
label: String,
joiner: ((Future[Int], Future[Int]) => Future[(Int, Int)])
): Unit = {
s"join($label)" should {
trait JoinHelper {
val p0 = new HandledPromise[Int]
val p1 = new HandledPromise[Int]
val f = joiner(p0, p1)
assert(!f.isDefined)
}
for {
left <- Seq(Ret(1), Thr(new Exception), Nvr)
right <- Seq(Ret(2), Thr(new Exception), Nvr)
leftFirst <- Seq(true, false)
} {
new JoinHelper {
if (leftFirst) {
satisfy(p0, left)
satisfy(p1, right)
} else {
satisfy(p1, right)
satisfy(p0, left)
}
(left, right) match {
// Two Throws are special because leftFirst determines the
// exception (e0 or e1). Otherwise, every other case with a
// Throw will result in just one exception.
case (Thr(e0), Thr(e1)) =>
val actual = intercept[Exception] { await(f) }
assert(actual == (if (leftFirst) e0 else e1), explain(left, right, leftFirst))
case (_, Thr(exn)) =>
val actual = intercept[Exception] { await(f) }
assert(actual == exn, explain(left, right, leftFirst))
case (Thr(exn), _) =>
val actual = intercept[Exception] { await(f) }
assert(actual == exn, explain(left, right, leftFirst))
case (Nvr, Ret(_)) | (Ret(_), Nvr) | (Nvr, Nvr) => assert(!f.isDefined)
case (Ret(a), Ret(b)) =>
val expected: (Int, Int) = (a, b)
val result = await(f)
val isEqual = result == expected
assert(isEqual, explain(left, right, leftFirst))
}
}
}
"propagate interrupts" in {
new JoinHelper {
assert(p0.handled.isEmpty)
assert(p1.handled.isEmpty)
val exc = new Exception
f.raise(exc)
assert(p0.handled.contains(exc))
assert(p1.handled.contains(exc))
}
}
}
}
testJoin("f join g", _ join _)
testJoin("Future.join(f, g)", Future.join(_, _))
def testJavaFuture(methodName: String, fn: Future[Int] => JFuture[_ <: Int]): Unit = {
methodName should {
"return the same thing as our Future when initialized" which {
val f = const.value(1)
val jf = fn(f)
assert(await(f) == jf.get())
"must both be done" in {
assert(f.isDefined)
assert(jf.isDone)
assert(!jf.isCancelled)
}
}
"return the same thing as our Future when set later" which {
val f = new Promise[Int]
val jf = fn(f)
f.setValue(1)
assert(await(f) == jf.get())
"must both be done" in {
assert(f.isDefined)
assert(jf.isDone)
assert(!jf.isCancelled)
}
}
"java future should throw an exception when failed later" in {
val f = new Promise[Int]
val jf = fn(f)
val e = new RuntimeException()
f.setException(e)
val actual = intercept[ExecutionException] { jf.get() }
val cause = intercept[RuntimeException] { throw actual.getCause }
assert(cause == e)
}
"java future should throw an exception when failed early" in {
val f = new Promise[Int]
val e = new RuntimeException()
f.setException(e)
val jf = fn(f)
val actual = intercept[ExecutionException] { jf.get() }
val cause = intercept[RuntimeException] { throw actual.getCause }
assert(cause == e)
}
"interrupt Future when cancelled" in {
val f = new HandledPromise[Int]
val jf = fn(f)
assert(f.handled.isEmpty)
jf.cancel(true)
intercept[java.util.concurrent.CancellationException] {
throw f.handled.get
}
}
}
}
testJavaFuture("toJavaFuture", { (f: Future[Int]) => f.toJavaFuture })
testJavaFuture("toCompletableFuture", { (f: Future[Int]) => f.toCompletableFuture })
"monitored" should {
trait MonitoredHelper {
val inner = new HandledPromise[Int]
val exc = new Exception("some exception")
}
"catch raw exceptions (direct)" in {
new MonitoredHelper {
val f = Future.monitored {
throw exc
inner
}
assert(f.poll.contains(Throw(exc)))
}
}
"catch raw exceptions (indirect), interrupting computation" in {
new MonitoredHelper {
val inner1 = new Promise[Int]
var ran = false
val f: Future[Int] = Future.monitored {
inner1
.ensure {
// Note that these are sequenced so that interrupts
// will be delivered before inner's handler is cleared.
ran = true
try {
inner.update(Return(1))
} catch {
case _: Throwable => fail()
}
}
.ensure {
throw exc
}
inner
}
assert(!ran)
assert(f.poll.isEmpty)
assert(inner.handled.isEmpty)
inner1.update(Return(1))
assert(ran)
assert(inner.isDefined)
assert(f.poll.contains(Throw(exc)))
assert(inner.handled.contains(exc))
}
}
"link" in {
new MonitoredHelper {
val f: Future[Int] = Future.monitored { inner }
assert(inner.handled.isEmpty)
f.raise(exc)
assert(inner.handled.contains(exc))
}
}
// we only know this works as expected when running with JDK 8
if (System.getProperty("java.version").startsWith("1.8"))
"doesn't leak the underlying promise after completion" in {
new MonitoredHelper {
val inner1 = new Promise[String]
val inner2 = new Promise[String]
val f: Future[String] = Future.monitored { inner2.ensure(()); inner1 }
val s: String = "." * 1024
val sSize: Long = ObjectSizeCalculator.getObjectSize(s)
inner1.setValue(s)
val inner2Size: Long = ObjectSizeCalculator.getObjectSize(inner2)
assert(inner2Size < sSize)
}
}
}
}
s"Promise ($name)" should {
"apply" which {
"when we're inside of a respond block (without deadlocking)" in {
val f = Future(1)
var didRun = false
f.foreach { _ =>
f mustProduce Return(1)
didRun = true
}
assert(didRun)
}
}
"map" which {
"when it's all chill" in {
val f = Future(1).map { x => x + 1 }
assert(await(f) == 2)
}
"when there's a problem in the passed in function" in {
val e = new Exception
val f = Future(1).map { x =>
throw e
x + 1
}
val actual = intercept[Exception] {
await(f)
}
assert(actual == e)
}
}
"transform" should {
val e = new Exception("rdrr")
"values" in {
const
.value(1)
.transform {
case Return(v) => const.value(v + 1)
case Throw(_) => const.value(0)
}
.mustProduce(Return(2))
}
"exceptions" in {
const
.exception(e)
.transform {
case Return(_) => const.value(1)
case Throw(_) => const.value(0)
}
.mustProduce(Return(0))
}
"exceptions thrown during transformation" in {
const
.value(1)
.transform {
case Return(_) => const.value(throw e)
case Throw(_) => const.value(0)
}
.mustProduce(Throw(e))
}
"non local returns executed during transformation" in {
def ret(): String = {
val f = const.value(1).transform {
case Return(_) =>
val fn = { () => return "OK" }
fn()
Future.value(ret())
case Throw(_) => const.value(0)
}
assert(f.poll.isDefined)
val e = intercept[FutureNonLocalReturnControl] {
f.poll.get.get()
}
val g = e.getCause match {
case t: NonLocalReturnControl[_] => t.asInstanceOf[NonLocalReturnControl[String]]
case _ =>
fail()
}
assert(g.value == "OK")
"bleh"
}
ret()
}
"fatal exceptions thrown during transformation" in {
val e = new FatalException()
val actual = intercept[FatalException] {
const.value(1).transform {
case Return(_) => const.value(throw e)
case Throw(_) => const.value(0)
}
}
assert(actual == e)
}
"monitors fatal exceptions" in {
val m = new HandledMonitor()
val exc = new FatalException()
assert(m.handled == null)
val actual = intercept[FatalException] {
Monitor.using(m) {
const.value(1).transform { _ => throw exc }
}
}
assert(actual == exc)
assert(m.handled == exc)
}
}
"transformedBy" should {
val e = new Exception("rdrr")
"flatMap" in {
const
.value(1)
.transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int): Future[Int] = const.value(value + 1)
override def rescue(t: Throwable): Future[Int] = const.value(0)
})
.mustProduce(Return(2))
}
"rescue" in {
const
.exception(e)
.transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int): Future[Int] = const.value(value + 1)
override def rescue(t: Throwable): Future[Int] = const.value(0)
})
.mustProduce(Return(0))
}
"exceptions thrown during transformation" in {
const
.value(1)
.transformedBy(new FutureTransformer[Int, Int] {
override def flatMap(value: Int): Future[Int] = throw e
override def rescue(t: Throwable): Future[Int] = const.value(0)
})
.mustProduce(Throw(e))
}
"map" in {
const
.value(1)
.transformedBy(new FutureTransformer[Int, Int] {
override def map(value: Int): Int = value + 1
override def handle(t: Throwable): Int = 0
})
.mustProduce(Return(2))
}
"handle" in {
const
.exception(e)
.transformedBy(new FutureTransformer[Int, Int] {
override def map(value: Int): Int = value + 1
override def handle(t: Throwable): Int = 0
})
.mustProduce(Return(0))
}
}
def testSequence(
which: String,
seqop: (Future[Unit], () => Future[Unit]) => Future[Unit]
): Unit = {
which when {
"successes" should {
"interruption of the produced future" which {
"before the antecedent Future completes, propagates back to the antecedent" in {
val f1, f2 = new HandledPromise[Unit]
val f3 = seqop(f1, () => f2)
assert(f1.handled.isEmpty)
assert(f2.handled.isEmpty)
f3.raise(new Exception)
assert(f1.handled.isDefined)
assert(f2.handled.isEmpty)
f1() = Return.Unit
assert(f2.handled.isDefined)
}
"after the antecedent Future completes, does not propagate back to the antecedent" in {
val f1, f2 = new HandledPromise[Unit]
val f3 = seqop(f1, () => f2)
assert(f1.handled.isEmpty)
assert(f2.handled.isEmpty)
f1() = Return.Unit
f3.raise(new Exception)
assert(f1.handled.isEmpty)
assert(f2.handled.isDefined)
}
"forward through chains" in {
val f1, f2 = new Promise[Unit]
val exc = new Exception
val f3 = new Promise[Unit]
var didInterrupt = false
f3.setInterruptHandler {
case `exc` => didInterrupt = true
}
val f4 = seqop(f1, () => seqop(f2, () => f3))
f4.raise(exc)
assert(!didInterrupt)
f1.setDone()
assert(!didInterrupt)
f2.setDone()
assert(didInterrupt)
}
}
}
"failures" should {
val e = new Exception
val g = seqop(Future[Unit](throw e), () => Future.Done)
"apply" in {
val actual = intercept[Exception] { await(g) }
assert(actual == e)
}
"respond" in {
g mustProduce Throw(e)
}
"when there is an exception in the passed in function" in {
val e = new Exception
val f = seqop(Future.Done, () => throw e)
val actual = intercept[Exception] { await(f) }
assert(actual == e)
}
}
}
}
testSequence(
"flatMap",
(a, next) => a.flatMap { _ => next() }
)
testSequence("before", (a, next) => a.before { next() })
"flatMap (values)" should {
val f = Future(1).flatMap { x => Future(x + 1) }
"apply" in {
assert(await(f) == 2)
}
"respond" in {
f mustProduce Return(2)
}
}
"flatten" should {
"successes" in {
val f = Future(Future(1))
f.flatten mustProduce Return(1)
}
"shallow failures" in {
val e = new Exception
val f: Future[Future[Int]] = const.exception(e)
f.flatten mustProduce Throw(e)
}
"deep failures" in {
val e = new Exception
val f: Future[Future[Int]] = const.value(const.exception(e))
f.flatten mustProduce Throw(e)
}
"interruption" in {
val f1 = new HandledPromise[Future[Int]]
val f2 = new HandledPromise[Int]
val f = f1.flatten
assert(f1.handled.isEmpty)
assert(f2.handled.isEmpty)
f.raise(new Exception)
f1.handled match {
case Some(_) =>
case None => fail()
}
assert(f2.handled.isEmpty)
f1() = Return(f2)
f2.handled match {
case Some(_) =>
case None => fail()
}
}
}
"rescue" should {
val e = new Exception
"successes" which {
val f = Future(1).rescue { case _ => Future(2) }
"apply" in {
assert(await(f) == 1)
}
"respond" in {
f mustProduce Return(1)
}
}
"failures" which {
val g = Future[Int](throw e).rescue { case _ => Future(2) }
"apply" in {
assert(await(g) == 2)
}
"respond" in {
g mustProduce Return(2)
}
"when the error handler errors" in {
val g = Future[Int](throw e).rescue { case x => throw x; Future(2) }
val actual = intercept[Exception] { await(g) }
assert(actual == e)
}
}
"interruption of the produced future" which {
"before the antecedent Future completes, propagates back to the antecedent" in {
val f1, f2 = new HandledPromise[Int]
val f = f1.rescue { case _ => f2 }
assert(f1.handled.isEmpty)
assert(f2.handled.isEmpty)
f.raise(new Exception)
f1.handled match {
case Some(_) =>
case None => fail()
}
assert(f2.handled.isEmpty)
f1() = Throw(new Exception)
f2.handled match {
case Some(_) =>
case None => fail()
}
}
"after the antecedent Future completes, does not propagate back to the antecedent" in {
val f1, f2 = new HandledPromise[Int]
val f = f1.rescue { case _ => f2 }
assert(f1.handled.isEmpty)
assert(f2.handled.isEmpty)
f1() = Throw(new Exception)
f.raise(new Exception)
assert(f1.handled.isEmpty)
f2.handled match {
case Some(_) =>
case None => fail()
}
}
}
}
"foreach" in {
var wasCalledWith: Option[Int] = None
val f = Future(1)
f.foreach { i => wasCalledWith = Some(i) }
assert(wasCalledWith.contains(1))
}
"respond" should {
"when the result has arrived" in {
var wasCalledWith: Option[Int] = None
val f = Future(1)
f.respond {
case Return(i) => wasCalledWith = Some(i)
case Throw(e) => fail(e.toString)
}
assert(wasCalledWith.contains(1))
}
"when the result has not yet arrived it buffers computations" in {
var wasCalledWith: Option[Int] = None
val f = new Promise[Int]
f.foreach { i => wasCalledWith = Some(i) }
assert(wasCalledWith.isEmpty)
f() = Return(1)
assert(wasCalledWith.contains(1))
}
"runs callbacks just once and in order (lifo)" in {
var i, j, k, h = 0
val p = new Promise[Int]
p.ensure {
i = i + j + k + h + 1
}
.ensure {
j = i + j + k + h + 1
}
.ensure {
k = i + j + k + h + 1
}
.ensure {
h = i + j + k + h + 1
}
assert(i == 0)
assert(j == 0)
assert(k == 0)
assert(h == 0)
p.setValue(1)
assert(i == 8)
assert(j == 4)
assert(k == 2)
assert(h == 1)
}
"monitor exceptions" in {
val m = new HandledMonitor()
val exc = new Exception
assert(m.handled == null)
Monitor.using(m) {
const.value(1).ensure { throw exc }
}
assert(m.handled == exc)
}
}
"willEqual" in {
assert(await(const.value(1).willEqual(const.value(1)), 1.second))
}
"Future() handles exceptions" in {
val e = new Exception
val f = Future[Int] { throw e }
val actual = intercept[Exception] { await(f) }
assert(actual == e)
}
"propagate locals" in {
val local = new Local[Int]
val promise0 = new Promise[Unit]
val promise1 = new Promise[Unit]
local() = 1010
val both = promise0.flatMap { _ =>
val local0 = local()
promise1.map { _ =>
val local1 = local()
(local0, local1)
}
}
local() = 123
promise0() = Return.Unit
local() = 321
promise1() = Return.Unit
assert(both.isDefined)
assert(await(both) == ((Some(1010), Some(1010))))
}
"propagate locals across threads" in {
val local = new Local[Int]
val promise = new Promise[Option[Int]]
local() = 123
val done = promise.map { otherValue => (otherValue, local()) }
val t = new Thread {
override def run(): Unit = {
local() = 1010
promise() = Return(local())
}
}
t.run()
t.join()
assert(done.isDefined)
assert(await(done) == ((Some(1010), Some(123))))
}
"poll" should {
trait PollHelper {
val p = new Promise[Int]
}
"when waiting" in {
new PollHelper {
assert(p.poll.isEmpty)
}
}
"when succeeding" in {
new PollHelper {
p.setValue(1)
assert(p.poll.contains(Return(1)))
}
}
"when failing" in {
new PollHelper {
val e = new Exception
p.setException(e)
assert(p.poll.contains(Throw(e)))
}
}
}
val uw = (p: Promise[Int], d: Duration, t: Timer) => {
p.within(d)(t)
}
val ub = (p: Promise[Int], d: Duration, t: Timer) => {
p.by(d.fromNow)(t)
}
Seq(("within", uw), ("by", ub)).foreach {
case (label, use) =>
label should {
"when we run out of time" in {
implicit val timer: Timer = new JavaTimer
val p = new HandledPromise[Int]
intercept[TimeoutException] { await(use(p, 50.milliseconds, timer)) }
timer.stop()
assert(p.handled.isEmpty)
}
"when everything is chill" in {
implicit val timer: Timer = new JavaTimer
val p = new Promise[Int]
p.setValue(1)
assert(await(use(p, 50.milliseconds, timer)) == 1)
timer.stop()
}
"when timeout is forever" in {
// We manage to throw an exception inside
// the scala compiler if we use MockTimer
// here. Sigh.
implicit val timer: Timer = FailingTimer
val p = new Promise[Int]
assert(use(p, Duration.Top, timer) == p)
}
"when future already satisfied" in {
implicit val timer: Timer = new NullTimer
val p = new Promise[Int]
p.setValue(3)
assert(use(p, 1.minute, timer) == p)
}
"interruption" in Time.withCurrentTimeFrozen { _ =>
implicit val timer: Timer = new MockTimer
val p = new HandledPromise[Int]
val f = use(p, 50.milliseconds, timer)
assert(p.handled.isEmpty)
f.raise(new Exception)
p.handled match {
case Some(_) =>
case None => fail()
}
}
}
}
"raiseWithin" should {
"when we run out of time" in {
implicit val timer: Timer = new JavaTimer
val p = new HandledPromise[Int]
intercept[TimeoutException] {
await(p.raiseWithin(50.milliseconds))
}
timer.stop()
p.handled match {
case Some(_) =>
case None => fail()
}
}
"when we run out of time, throw our stuff" in {
implicit val timer: Timer = new JavaTimer
class SkyFallException extends Exception("let the skyfall")
val skyFall = new SkyFallException
val p = new HandledPromise[Int]
intercept[SkyFallException] {
await(p.raiseWithin(50.milliseconds, skyFall))
}
timer.stop()
p.handled match {
case Some(_) =>
case None => fail()
}
assert(p.handled.contains(skyFall))
}
"when we are within timeout, but inner throws TimeoutException, we don't raise" in {
implicit val timer: Timer = new JavaTimer
class SkyFallException extends Exception("let the skyfall")
val skyFall = new SkyFallException
val p = new HandledPromise[Int]
intercept[TimeoutException] {
await(
p.within(20.milliseconds).raiseWithin(50.milliseconds, skyFall)
)
}
timer.stop()
assert(p.handled.isEmpty)
}
"when everything is chill" in {
implicit val timer: Timer = new JavaTimer
val p = new Promise[Int]
p.setValue(1)
assert(await(p.raiseWithin(50.milliseconds)) == 1)
timer.stop()
}
"when timeout is forever" in {
// We manage to throw an exception inside
// the scala compiler if we use MockTimer
// here. Sigh.
implicit val timer: Timer = FailingTimer
val p = new Promise[Int]
assert(p.raiseWithin(Duration.Top) == p)
}
"when future already satisfied" in {
implicit val timer: Timer = new NullTimer
val p = new Promise[Int]
p.setValue(3)
assert(p.raiseWithin(1.minute) == p)
}
"interruption" in Time.withCurrentTimeFrozen { _ =>
implicit val timer: Timer = new MockTimer
val p = new HandledPromise[Int]
val f = p.raiseWithin(50.milliseconds)
assert(p.handled.isEmpty)
f.raise(new Exception)
p.handled match {
case Some(_) =>
case None => fail()
}
}
}
"masked" should {
"do unconditional interruption" in {
val p = new HandledPromise[Unit]
val f = p.masked
f.raise(new Exception())
assert(p.handled.isEmpty)
}
"do conditional interruption" in {
val p = new HandledPromise[Unit]
val f1 = p.mask {
case _: TimeoutException => true
}
val f2 = p.mask {
case _: TimeoutException => true
}
f1.raise(new TimeoutException("bang!"))
assert(p.handled.isEmpty)
f2.raise(new Exception())
assert(p.handled.isDefined)
}
}
"liftToTry" should {
"success" in {
val p = const(Return(3))
assert(await(p.liftToTry) == Return(3))
}
"failure" in {
val ex = new Exception()
val p = const(Throw(ex))
assert(await(p.liftToTry) == Throw(ex))
}
"propagates interrupt" in {
val p = new HandledPromise[Unit]
p.liftToTry.raise(new Exception())
assert(p.handled.isDefined)
}
}
"lowerFromTry" should {
"success" in {
val f = const(Return(Return(3)))
assert(await(f.lowerFromTry) == 3)
}
"failure" in {
val ex = new Exception()
val p = const(Return(Throw(ex)))
val ex1 = intercept[Exception] { await(p.lowerFromTry) }
assert(ex == ex1)
}
"propagates interrupt" in {
val p = new HandledPromise[Try[Unit]]
p.lowerFromTry.raise(new Exception())
assert(p.handled.isDefined)
}
}
}
s"FutureTask ($name)" should {
"return result" in {
val task = new FutureTask("hello")
task.run()
assert(await(task) == "hello")
}
"throw result" in {
val task = new FutureTask[String](throw new IllegalStateException)
task.run()
intercept[IllegalStateException] {
await(task)
}
}
}
}
test(
"ConstFuture",
new MkConst {
def apply[A](r: Try[A]): Future[A] = Future.const(r)
})
test(
"Promise",
new MkConst {
def apply[A](r: Try[A]): Future[A] = new Promise(r)
})
"Future.apply" should {
"fail on NLRC" in {
def ok(): String = {
val f = Future(return "OK")
val t = intercept[FutureNonLocalReturnControl] {
f.poll.get.get()
}
val nlrc = intercept[NonLocalReturnControl[String]] {
throw t.getCause
}
assert(nlrc.value == "OK")
"NOK"
}
assert(ok() == "NOK")
}
}
"Future.None" should {
"always be defined" in {
assert(Future.None.isDefined)
}
"but still None" in {
assert(await(Future.None).isEmpty)
}
}
"Future.True" should {
"always be defined" in {
assert(Future.True.isDefined)
}
"but still True" in {
assert(await(Future.True))
}
}
"Future.False" should {
"always be defined" in {
assert(Future.False.isDefined)
}
"but still False" in {
assert(!await(Future.False))
}
}
"Future.never" should {
"must be undefined" in {
assert(!Future.never.isDefined)
assert(Future.never.poll.isEmpty)
}
"always time out" in {
intercept[TimeoutException] { Await.ready(Future.never, 0.milliseconds) }
}
}
"Future.onFailure" should {
val nonfatal = Future.exception(new RuntimeException())
val fatal = Future.exception(new FatalException())
"with Function1" in {
val counter = new AtomicInteger()
val f: Throwable => Unit = _ => counter.incrementAndGet()
nonfatal.onFailure(f)
assert(counter.get() == 1)
fatal.onFailure(f)
assert(counter.get() == 2)
}
"with PartialFunction" in {
val monitor = new HandledMonitor()
Monitor.using(monitor) {
val counter = new AtomicInteger()
nonfatal.onFailure { case NonFatal(_) => counter.incrementAndGet() }
assert(counter.get() == 1)
assert(monitor.handled == null)
// this will throw a MatchError and propagated to the monitor
fatal.onFailure { case NonFatal(_) => counter.incrementAndGet() }
assert(counter.get() == 1)
assert(monitor.handled.getClass == classOf[MatchError])
}
}
}
"Future.sleep" should {
"Satisfy after the given amount of time" in Time.withCurrentTimeFrozen { tc =>
implicit val timer: MockTimer = new MockTimer
val f = Future.sleep(10.seconds)
assert(!f.isDefined)
tc.advance(5.seconds)
timer.tick()
assert(!f.isDefined)
tc.advance(5.seconds)
timer.tick()
assert(f.isDefined)
await(f)
}
"Be interruptible" in {
implicit val timer: MockTimer = new MockTimer
// sleep and grab the task that's created
val f = Future.sleep(1.second)(timer)
val task = timer.tasks(0)
// then raise a known exception
val e = new Exception("expected")
f.raise(e)
// we were immediately satisfied with the exception and the task was canceled
f mustProduce Throw(e)
assert(task.isCancelled)
}
"Return Future.Done for durations <= 0" in {
implicit val timer: MockTimer = new MockTimer
assert(Future.sleep(Duration.Zero) eq Future.Done)
assert(Future.sleep((-10).seconds) eq Future.Done)
assert(timer.tasks.isEmpty)
}
"Return Future.never for Duration.Top" in {
implicit val timer: MockTimer = new MockTimer
assert(Future.sleep(Duration.Top) eq Future.never)
assert(timer.tasks.isEmpty)
}
}
"Future.select" should {
import Arbitrary.arbitrary
val genLen = Gen.choose(1, 10)
"return the first result" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = List.fill(n)(new Promise[Int]())
assert(ps.map(_.waitqLength).sum == 0)
val f = Future.select(ps)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
ps(i).update(t)
assert(f.isDefined)
val (ft, fps) = await(f)
assert(ft == t)
assert(fps.toSet == (ps.toSet - ps(i)))
}
}
"not accumulate listeners when losing or" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
p.or(q)
assert(p.waitqLength == 1)
q.setDone()
assert(p.waitqLength == 0)
}
"not accumulate listeners when losing select" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
Future.select(Seq(p, q))
assert(p.waitqLength == 1)
q.setDone()
assert(p.waitqLength == 0)
}
"not accumulate listeners if not selected" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = List.fill(n)(new Promise[Int]())
assert(ps.map(_.waitqLength).sum == 0)
val f = Future.select(ps)
assert(ps.map(_.waitqLength).sum == n)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
f.respond { _ => () }
assert(ps.map(_.waitqLength).sum == n)
ps(i).update(t)
assert(ps.map(_.waitqLength).sum == 0)
}
}
"fail if we attempt to select an empty future sequence" in {
val f = Future.select(Nil)
assert(f.isDefined)
val e = new IllegalArgumentException("empty future list")
val actual = intercept[IllegalArgumentException] { await(f) }
assert(actual.getMessage == e.getMessage)
}
"propagate interrupts" in {
val fs = (0 until 10).map(_ => new HandledPromise[Int])
Future.select(fs).raise(new Exception)
assert(fs.forall(_.handled.isDefined))
}
}
// These tests are almost a carbon copy of the "Future.select" tests, they
// should evolve in-sync.
"Future.selectIndex" should {
import Arbitrary.arbitrary
val genLen = Gen.choose(1, 10)
"return the first result" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = IndexedSeq.fill(n)(new Promise[Int]())
assert(ps.map(_.waitqLength).sum == 0)
val f = Future.selectIndex(ps)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
ps(i).update(t)
assert(f.isDefined)
assert(await(f) == i)
}
}
"not accumulate listeners when losing select" in {
val p = new Promise[Unit]
val q = new Promise[Unit]
Future.selectIndex(IndexedSeq(p, q))
assert(p.waitqLength == 1)
q.setDone()
assert(p.waitqLength == 0)
}
"not accumulate listeners if not selected" in {
forAll(genLen, arbitrary[Boolean]) { (n, fail) =>
val ps = IndexedSeq.fill(n)(new Promise[Int]())
assert(ps.map(_.waitqLength).sum == 0)
val f = Future.selectIndex(ps)
assert(ps.map(_.waitqLength).sum == n)
val i = Random.nextInt(ps.length)
val e = new Exception("sad panda")
val t = if (fail) Throw(e) else Return(i)
f.respond { _ => () }
assert(ps.map(_.waitqLength).sum == n)
ps(i).update(t)
assert(ps.map(_.waitqLength).sum == 0)
}
}
"fail if we attempt to select an empty future sequence" in {
val f = Future.selectIndex(IndexedSeq.empty)
assert(f.isDefined)
val e = new IllegalArgumentException("empty future list")
val actual = intercept[IllegalArgumentException] { await(f) }
assert(actual.getMessage == e.getMessage)
}
"propagate interrupts" in {
val fs = IndexedSeq.fill(10)(new HandledPromise[Int]())
Future.selectIndex(fs).raise(new Exception)
assert(fs.forall(_.handled.isDefined))
}
}
"Future.each" should {
"iterate until an exception is thrown" in {
val exc = new Exception("done")
var next: Future[Int] = Future.value(10)
val done = Future.each(next) {
case 0 => next = Future.exception(exc)
case n => next = Future.value(n - 1)
}
assert(done.poll.contains(Throw(exc)))
}
"evaluate next one time per iteration" in {
var i, j = 0
def next(): Future[Int] =
if (i == 10) Future.exception(new Exception)
else {
i += 1
Future.value(i)
}
Future.each(next()) { i =>
j += 1
assert(i == j)
}
}
"terminate if the body throws an exception" in {
val exc = new Exception("body exception")
var i = 0
def next(): Future[Int] = Future.value({ i += 1; i })
val done = Future.each(next()) {
case 10 => throw exc
case _ =>
}
assert(done.poll.contains(Throw(exc)))
assert(i == 10)
}
"terminate when 'next' throws" in {
val exc = new Exception
def next(): Future[Int] = throw exc
val done = Future.each(next()) { _ => throw exc }
assert(done.poll.contains(Throw(Future.NextThrewException(exc))))
}
}
}
|
twitter/util
|
util-core/src/test/scala/com/twitter/util/FutureTest.scala
|
Scala
|
apache-2.0
| 60,883
|
package model
import com.google.gdata.client.calendar.CalendarService
import com.google.gdata.client.calendar.CalendarQuery
import org.joda.time.LocalDate
import java.text.SimpleDateFormat
import java.net.URL
import org.joda.time.DateTime
import com.google.gdata.data.calendar.CalendarEventFeed
import play.api.libs.json.Json
import scala.collection.JavaConversions._
import com.google.gdata.data.calendar.CalendarFeed
import org.apache.commons.lang3.StringUtils
import com.google.gdata.data.calendar.CalendarEventFeed
import com.google.gdata.data.calendar.CalendarFeed
import play.api.libs.json.Json.toJsFieldJsValueWrapper
object GoogleCalendar {
def getGoogleCalendarEntries(userName: String, userPassword: String, days: Int) = {
val APP_ID = "myhomepage-getGoogleCalendarEntries"
val METAFEED_URL_BASE = "https://www.google.com/calendar/feeds/"
val EVENT_FEED_URL_SUFFIX = "/private/full"
val service = new CalendarService(APP_ID);
service.setUserCredentials(userName, userPassword);
val start = LocalDate.now();
val until = start.plusDays(days);
val dayf = new SimpleDateFormat("EE");
val df = new SimpleDateFormat("dd.MM");
val tf = new SimpleDateFormat("HH:mm");
val feedUrl = new URL("https://www.google.com/calendar/feeds/default/allcalendars/full")
val feeds = service.getFeed(feedUrl, classOf[CalendarFeed]).getEntries().flatMap(entry => {
val id = StringUtils.substringAfterLast(entry.getId(), "/")
val eventFeedUrl = new URL("http://www.google.com/calendar/feeds/" + id + "/private/full")
val myQuery = new CalendarQuery(eventFeedUrl)
myQuery.setMinimumStartTime(new com.google.gdata.data.DateTime(start.toDate()))
myQuery.setMaximumStartTime(new com.google.gdata.data.DateTime(until.toDate()))
val resultFeed = service.query(myQuery, classOf[CalendarEventFeed])
resultFeed.getEntries()
})
case class Entry(val timestamp: DateTime, val text: String)
implicit def dateTimeOrdering: Ordering[DateTime] = Ordering.fromLessThan(_ isBefore _)
Json.arr(
feeds.map(entry => Entry(
new DateTime(entry.getTimes().get(0).getStartTime().getValue()),
entry.getTitle().getPlainText()))
.filter(e => e.timestamp.isAfter(start.toDateMidnight()))
.sortBy(e => e.timestamp)
.map(e => Json.obj(
"day" -> dayf.format(e.timestamp.toDate()),
"date" -> df.format(e.timestamp.toDate()),
"time" -> tf.format(e.timestamp.toDate()),
"text" -> e.text)))
}
}
|
Sorokan/LeosStartpage
|
app/model/GoogleCalendar.scala
|
Scala
|
gpl-3.0
| 2,548
|
package beyond.engine.javascript.lib.database
import reactivemongo.bson.BSONObjectID
case class ObjectId(bson: BSONObjectID) {
def this(id: String) = this(BSONObjectID(id))
override val toString: String = bson.stringify
def toJSON(key: String): String = s"ObjectId($toString)"
}
|
SollmoStudio/beyond
|
core/app/beyond/engine/javascript/lib/database/ObjectId.scala
|
Scala
|
apache-2.0
| 288
|
/*
* Receive.scala
* (Cord)
*
* Copyright (c) 2015-2020 Hanns Holger Rutz.
*
* This software is published under the GNU Lesser General Public License v2.1+
*
*
* For further information, please contact Hanns Holger Rutz at
* contact@sciss.de
*/
package de.sciss.cord
package objects
import de.sciss.cord.impl.{NoInlets, NodeImplOps, ObjNodeImpl, SingleOutlet}
class Receive(val parent: Patcher, val args: List[Any])
extends ObjNodeImpl("receive") with NoInlets with SingleOutlet {
private val portName = args match {
case (n: String) :: Nil => n
case _ => throw new IllegalArgumentException(s"'receive' requires one string argument, the port name")
}
val outlet = this.messageOutlet
private val inlet = Registry.addReceive(portName, { m: M => outlet(m) })
override def dispose(): Unit = {
super.dispose()
Registry.removeReceive(portName, inlet)
}
}
|
Sciss/Cord
|
src/main/scala/de/sciss/cord/objects/Receive.scala
|
Scala
|
lgpl-2.1
| 900
|
object Test extends App {
class Bar[T]
implicit def barInt: Bar[Int] = {
println("barInt")
new Bar[Int]
}
implicit def bar[T]: Bar[T] = {
println("bar")
new Bar[T]
}
implicitly[Bar[Int]]
locally {
def barInt: Unit = ???
implicitly[Bar[Int]]
// used to resolve to bar, but
// resolves to barInt now, since shadowing is no longer tested
}
}
|
som-snytt/dotty
|
tests/run/i5224.scala
|
Scala
|
apache-2.0
| 394
|
/*
* Copyright 2010-2020 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
import net.liftweb.common.Box
import net.liftweb.json._
import net.liftweb.util.{ConnectionIdentifier, DefaultConnectionIdentifier}
import net.liftweb.util.Helpers.tryo
import scala.collection.JavaConverters._
import java.util.UUID
import org.bson.{BsonDocument, Document, UuidRepresentation}
import org.bson.codecs.{PatternCodec, UuidCodecProvider}
import org.bson.codecs.configuration.{CodecRegistries, CodecRegistry}
import org.bson.conversions.Bson
import org.bson.types.ObjectId
import com.mongodb._
import com.mongodb.client.{MongoCollection, MongoDatabase}
import com.mongodb.client.model.{DeleteOptions, IndexOptions, InsertOneOptions, ReplaceOptions, UpdateOptions}
import com.mongodb.client.model.Filters.{eq => eqs}
import com.mongodb.client.result.{DeleteResult, UpdateResult}
/**
* extend case class with this trait
*/
trait MongoDocument[BaseDocument] extends JsonObject[BaseDocument] {
self: BaseDocument =>
def _id: Any
def meta: MongoDocumentMeta[BaseDocument]
def delete: Box[DeleteResult] = {
meta.deleteOne("_id", _id)
}
def save: UpdateResult = meta.save(this)
def getRef: Option[MongoRef] = _id match {
case oid: ObjectId => Some(MongoRef(meta.collectionName, oid))
case _ => None
}
}
/**
* extend case class companion objects with this trait
*/
trait MongoDocumentMeta[BaseDocument] extends JsonObjectMeta[BaseDocument] with MongoMeta[BaseDocument, BsonDocument] {
private val bsonDocumentClass = classOf[BsonDocument]
def codecRegistry: CodecRegistry = CodecRegistries.fromRegistries(
MongoClientSettings.getDefaultCodecRegistry(),
CodecRegistries.fromProviders(new UuidCodecProvider(UuidRepresentation.JAVA_LEGACY)),
CodecRegistries.fromCodecs(new PatternCodec())
)
/**
* Override this to specify a ConnectionIdentifier.
*/
def connectionIdentifier: ConnectionIdentifier = DefaultConnectionIdentifier
/**
* Use the collection associated with this Meta.
*/
def useCollection[T](f: MongoCollection[BsonDocument] => T): T =
MongoDB.useMongoCollection(connectionIdentifier, collectionName, bsonDocumentClass) { mc =>
f(mc.withCodecRegistry(codecRegistry).withWriteConcern(writeConcern))
}
def useCollection[T](db: MongoDatabase)(f: MongoCollection[BsonDocument] => T): T = {
val mc = db.getCollection(collectionName, bsonDocumentClass)
f(mc.withCodecRegistry(codecRegistry).withWriteConcern(writeConcern))
}
@deprecated("Use useCollection instead", "3.4.3")
def useColl[T](f: DBCollection => T): T =
MongoDB.useCollection(connectionIdentifier, collectionName)(f)
/**
* Use the db associated with this Meta.
*/
def useDatabase[T](f: MongoDatabase => T): T =
MongoDB.useDatabase(connectionIdentifier) { md =>
f(md.withCodecRegistry(codecRegistry).withWriteConcern(writeConcern))
}
@deprecated("Use useDatabase instead", "3.4.3")
def useDb[T](f: DB => T): T = MongoDB.use(connectionIdentifier)(f)
def create(dbo: Bson): BaseDocument = {
val jv = BsonParser.serialize(dbo)
create(jv.asInstanceOf[JObject])
}
/**
* Find a single row by a qry, using a Bson.
*/
def find(qry: Bson): Option[BaseDocument] = {
useCollection { coll =>
coll.find(qry).limit(1).first match {
case null => None
case dbo => {
Some(create(dbo))
}
}
}
}
/**
* Find a single document by _id using a String.
*/
def find(s: String): Option[BaseDocument] =
if (ObjectId.isValid(s))
find(eqs("_id", new ObjectId(s)))
else
find(eqs("_id", s))
/**
* Find a single document by _id using an ObjectId.
*/
def find(oid: ObjectId): Option[BaseDocument] = find(eqs("_id", oid))
/**
* Find a single document by _id using a UUID.
*/
def find(uuid: UUID): Option[BaseDocument] = find(eqs("_id", uuid))
/**
* Find a single document by a qry using String, Any inputs
*/
def find(k: String, v: Any): Option[BaseDocument] = find(eqs(k, v))
/**
* Find a single document by a qry using a json query
*/
def find(json: JObject): Option[BaseDocument] = find(BsonParser.parse(json))
/**
* Find all documents in this collection
*/
def findAll: List[BaseDocument] = {
useCollection { coll =>
/** Mongo Cursors are both Iterable and Iterator,
* so we need to reduce ambiguity for implicits
*/
coll.find.iterator.asScala.map(create).toList
}
}
/**
* Find all documents using a Bson query.
*/
def findAll(qry: Bson, sort: Option[Bson], opts: FindOption*): List[BaseDocument] = {
val findOpts = opts.toList
useCollection { coll =>
val cur = coll.find(qry).limit(
findOpts.find(_.isInstanceOf[Limit]).map(x => x.value).getOrElse(0)
).skip(
findOpts.find(_.isInstanceOf[Skip]).map(x => x.value).getOrElse(0)
)
sort.foreach(s => cur.sort(s))
/** Mongo Cursors are both Iterable and Iterator,
* so we need to reduce ambiguity for implicits
*/
cur.iterator.asScala.map(create).toList
}
}
/**
* Find all documents using a Bson query.
*/
def findAll(qry: Bson, opts: FindOption*): List[BaseDocument] =
findAll(qry, None, opts :_*)
/**
* Find all documents using a Bson query with sort
*/
def findAll(qry: Bson, sort: Bson, opts: FindOption*): List[BaseDocument] =
findAll(qry, Some(sort), opts :_*)
/**
* Find all documents using a JObject query
*/
def findAll(qry: JObject, opts: FindOption*): List[BaseDocument] =
findAll(BsonParser.parse(qry), None, opts :_*)
/**
* Find all documents using a JObject query with sort
*/
def findAll(qry: JObject, sort: JObject, opts: FindOption*): List[BaseDocument] =
findAll(BsonParser.parse(qry), Some(BsonParser.parse(sort)), opts :_*)
/**
* Find all documents using a k, v query
*/
def findAll(k: String, o: Any, opts: FindOption*): List[BaseDocument] =
findAll(eqs(k, o), None, opts :_*)
/**
* Find all documents using a k, v query with JObject sort
*/
def findAll(k: String, o: Any, sort: JObject, opts: FindOption*): List[BaseDocument] =
findAll(eqs(k, o), Some(BsonParser.parse(sort)), opts :_*)
def insertOne(inst: BaseDocument, opts: InsertOneOptions = new InsertOneOptions): Box[BaseDocument] = tryo {
useCollection { coll =>
val bson = BsonParser.parse(toJObject(inst))
coll.insertOne(bson, opts)
inst
}
}
def replaceOne(inst: BaseDocument, opts: ReplaceOptions = new ReplaceOptions): Box[UpdateResult] = tryo {
useCollection { coll =>
val bson = BsonParser.parse(toJObject(inst))
val id = bson.get("_id")
coll.replaceOne(eqs("_id", id), bson, opts)
}
}
def replaceOne(qry: Bson, inst: BaseDocument, opts: ReplaceOptions): Box[UpdateResult] = tryo {
useCollection { coll =>
val bson = BsonParser.parse(toJObject(inst))
coll.replaceOne(qry, bson, opts)
}
}
def replaceOne(qry: Bson, inst: BaseDocument): Box[UpdateResult] =
replaceOne(qry, inst, new ReplaceOptions)
def replaceOne(qry: JObject, inst: BaseDocument, opts: ReplaceOptions): Box[UpdateResult] = tryo {
useCollection { coll =>
val bson = BsonParser.parse(toJObject(inst))
coll.replaceOne(BsonParser.parse(qry), bson, opts)
}
}
def replaceOne(qry: JObject, inst: BaseDocument): Box[UpdateResult] =
replaceOne(qry, inst, new ReplaceOptions)
/**
* Save a document to the db
*/
def save(inst: BaseDocument): UpdateResult = {
val opts = new ReplaceOptions().upsert(true)
useCollection { coll =>
val bson = BsonParser.parse(toJObject(inst))
val id = bson.get("_id")
coll.replaceOne(eqs("_id", id), bson, opts)
}
}
@deprecated("Use save instead", "3.4.3")
def save(in: BaseDocument, db: DB) {
db.getCollection(collectionName).save(JObjectParser.parse(toJObject(in)))
}
@deprecated("Use updateOne, updateMany, or replaceOne instead", "3.4.3")
def update(qry: JObject, newbd: BaseDocument, db: DB, opts: UpdateOption*) {
update(qry, toJObject(newbd), db, opts :_*)
}
@deprecated("Use updateOne, updateMany, or replaceOne instead", "3.4.3")
def update(qry: JObject, newbd: BaseDocument, opts: UpdateOption*) {
MongoDB.use(connectionIdentifier) ( db => {
update(qry, newbd, db, opts :_*)
})
}
}
|
lift/framework
|
persistence/mongodb/src/main/scala/net/liftweb/mongodb/MongoDocument.scala
|
Scala
|
apache-2.0
| 9,047
|
package simulations
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class EpidemySuite extends FunSuite {
test("population"){
val es = new EpidemySimulator
assert(es.persons.size == es.SimConfig.population)
}
test("prevalence rate"){
val prevalenceRate = 0.01
val es = new EpidemySimulator
val numInfected = es.persons.count(_.infected)
assert(numInfected == es.SimConfig.population * prevalenceRate,
"prevalence rate should be 0.01"
)
}
test("dead person stays dead"){
val es = new EpidemySimulator
val chosenOne = es.persons.head
chosenOne.infected = true
chosenOne.sick = true
chosenOne.dead = true
chosenOne.immune = false
val(row, col) = (chosenOne.row, chosenOne.col)
val testDays = 100
while(!es.agenda.isEmpty && es.agenda.head.time < testDays){
es.next
assert(chosenOne.dead == true, "Dead person should keep dead state")
assert(chosenOne.infected == true, "Dead person keeps infected")
assert(chosenOne.immune == false, "Dead person cannot become immune")
assert(chosenOne.sick == true, "Dead person keeps sick")
assert(chosenOne.col == col && chosenOne.row == row, "Dead person cannot move")
}
}
test("nobody moves into a room with dead person"){
val es = new EpidemySimulator
val chosenOne = es.persons.head
chosenOne.infected = true
chosenOne.sick = true
chosenOne.dead = true
chosenOne.immune = false
val originalPersons = es.personsInRoom(chosenOne.room)
val testDays = 100
while(!es.agenda.isEmpty && es.agenda.head.time < testDays){
es.next
assert(es.personsInRoom(chosenOne.room).forall(originalPersons.contains(_)), "all persons in the room was there at the beginning")
}
}
test("adjacentRooms") {
val es = new EpidemySimulator
assert(es.adjacentRooms(1,2).size == 4, es.adjacentRooms(1,2).toString)
assert(es.adjacentRooms(1,2).contains(1,3))
assert(es.adjacentRooms(1,2).contains(1,1))
assert(es.adjacentRooms(1,2).contains(0,2))
assert(es.adjacentRooms(1,2).contains(2,2))
assert(es.adjacentRooms(0,2).contains(7,2))
assert(es.adjacentRooms(7,5).contains(0,5))
assert(es.adjacentRooms(3,7).contains(3,0))
assert(es.adjacentRooms(6,0).contains(6,7))
assert(es.adjacentRooms(0,0).contains(7,0))
assert(es.adjacentRooms(7,7).contains(0,7))
}
test("life cycle"){
val es = new EpidemySimulator
val incubationTime = 6
val dieTime = 14
val immuneTime = 16
val healTime = 18
val prevalenceRate = 0.01
val transRate = 0.4
val dieRate = 0.25
val infectedPerson = (es.persons.find{_.infected}).get
//before incubation time
while(es.agenda.head.time < incubationTime){
assert(infectedPerson.infected == true, "Infected person keeps infected in 6 days")
assert(infectedPerson.sick == false, "Infected person does not get sick in 6 days")
assert(infectedPerson.immune == false, "Infected person cannot become immune in 6 days")
assert(infectedPerson.dead == false, "Infected person does not die in 6 days")
es.next
}
//incubation time has passed, there should be an event for getting sick
assert(es.agenda.head.time == incubationTime, "You should set a 'sick' event after incubation time")
while(es.agenda.head.time == incubationTime) es.next
assert(infectedPerson.sick == true, "Infected person should become sick after 6 days")
//wait for dieTime
while(es.agenda.head.time < dieTime){
assert(infectedPerson.infected == true, "Sick person keeps infected")
assert(infectedPerson.sick == true, "Sick person keeps sick before turning immune")
assert(infectedPerson.immune == false, "Sick person is not immune")
assert(infectedPerson.dead == false, "Sick person does not die before 14 infected days")
es.next
}
assert(es.agenda.head.time == dieTime, "You should set a 'die' event (decides with a probability 25% whether the person dies) after 14 days")
while(es.agenda.head.time == dieTime) es.next
}
test("transmissibility rate"){
var infectedTimes = 0
for(i <- 0 to 100){
val es = new EpidemySimulator
val healthyPerson = (es.persons find {p => !p.infected}).get
es.persons.filter(p => p != healthyPerson) foreach {_.infected = true}
while(es.agenda.head.time < 6) es.next
infectedTimes = infectedTimes + (if(healthyPerson.infected) 1 else 0)
}
assert(infectedTimes > 0, "A person should get infected according to the transmissibility rate when he moves into a room with an infectious person")
}
}
|
kailuowang/PrinciplesOfReactiveProgramming
|
simulations/src/test/scala/simulations/EpidemySuite.scala
|
Scala
|
mit
| 4,881
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.TimeUnit
import com.yammer.metrics.core.Meter
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.Pool
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.requests.DeleteRecordsResponse
import scala.collection._
case class DeleteRecordsPartitionStatus(requiredOffset: Long,
responseStatus: DeleteRecordsResponse.PartitionResponse) {
@volatile var acksPending = false
override def toString = "[acksPending: %b, error: %s, lowWatermark: %d, requiredOffset: %d]"
.format(acksPending, responseStatus.error.toString, responseStatus.lowWatermark, requiredOffset)
}
/**
* A delayed delete records operation that can be created by the replica manager and watched
* in the delete records operation purgatory
*/
class DelayedDeleteRecords(delayMs: Long,
deleteRecordsStatus: Map[TopicPartition, DeleteRecordsPartitionStatus],
replicaManager: ReplicaManager,
responseCallback: Map[TopicPartition, DeleteRecordsResponse.PartitionResponse] => Unit)
extends DelayedOperation(delayMs) {
// first update the acks pending variable according to the error code
deleteRecordsStatus.foreach { case (topicPartition, status) =>
if (status.responseStatus.error == Errors.NONE) {
// Timeout error state will be cleared when required acks are received
status.acksPending = true
status.responseStatus.error = Errors.REQUEST_TIMED_OUT
} else {
status.acksPending = false
}
trace("Initial partition status for %s is %s".format(topicPartition, status))
}
/**
* The delayed delete records operation can be completed if every partition specified in the request satisfied one of the following:
*
* 1) There was an error while checking if all replicas have caught up to to the deleteRecordsOffset: set an error in response
* 2) The low watermark of the partition has caught up to the deleteRecordsOffset. set the low watermark in response
*
*/
override def tryComplete(): Boolean = {
// check for each partition if it still has pending acks
deleteRecordsStatus.foreach { case (topicPartition, status) =>
trace(s"Checking delete records satisfaction for ${topicPartition}, current status $status")
// skip those partitions that have already been satisfied
if (status.acksPending) {
val (lowWatermarkReached, error, lw) = replicaManager.getPartition(topicPartition) match {
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(_) =>
val leaderLW = partition.lowWatermarkIfLeader
(leaderLW >= status.requiredOffset, Errors.NONE, leaderLW)
case None =>
(false, Errors.NOT_LEADER_FOR_PARTITION, DeleteRecordsResponse.INVALID_LOW_WATERMARK)
}
case None =>
(false, Errors.UNKNOWN_TOPIC_OR_PARTITION, DeleteRecordsResponse.INVALID_LOW_WATERMARK)
}
if (error != Errors.NONE || lowWatermarkReached) {
status.acksPending = false
status.responseStatus.error = error
status.responseStatus.lowWatermark = lw
}
}
}
// check if every partition has satisfied at least one of case A or B
if (!deleteRecordsStatus.values.exists(_.acksPending))
forceComplete()
else
false
}
override def onExpiration() {
deleteRecordsStatus.foreach { case (topicPartition, status) =>
if (status.acksPending) {
DelayedDeleteRecordsMetrics.recordExpiration(topicPartition)
}
}
}
/**
* Upon completion, return the current response status along with the error code per partition
*/
override def onComplete() {
val responseStatus = deleteRecordsStatus.mapValues(status => status.responseStatus)
responseCallback(responseStatus)
}
}
object DelayedDeleteRecordsMetrics extends KafkaMetricsGroup {
private val aggregateExpirationMeter = newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS)
def recordExpiration(partition: TopicPartition) {
aggregateExpirationMeter.mark()
}
}
|
wangcy6/storm_app
|
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/server/DelayedDeleteRecords.scala
|
Scala
|
apache-2.0
| 5,089
|
/* Title: Pure/Concurrent/event_timer.scala
Author: Makarius
Initiate event after given point in time.
Note: events are run as synchronized action within a dedicated thread
and should finish quickly without further ado.
*/
package isabelle
import java.util.{Timer, TimerTask, Date => JDate}
object Event_Timer
{
private lazy val event_timer = new Timer("event_timer", true)
final class Request private[Event_Timer](val time: Time, task: TimerTask)
{
def cancel: Boolean = task.cancel
}
def request(time: Time)(event: => Unit): Request =
{
val task = new TimerTask { def run { event } }
event_timer.schedule(task, new JDate(time.ms))
new Request(time, task)
}
}
|
larsrh/libisabelle
|
modules/pide/2017/src/main/scala/Concurrent/event_timer.scala
|
Scala
|
apache-2.0
| 715
|
package io.vamp.operation.controller
import java.time.OffsetDateTime
import java.time.temporal.ChronoUnit
import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.{ Cancel, Request }
import akka.stream.scaladsl.Source
import akka.util.Timeout
import akka.http.scaladsl.model.sse.ServerSentEvent
import com.typesafe.scalalogging.LazyLogging
import io.vamp.common.Namespace
import io.vamp.common.akka.IoC._
import io.vamp.common.akka._
import io.vamp.common.json.{ OffsetDateTimeSerializer, SerializationFormat }
import io.vamp.common.notification.NotificationProvider
import io.vamp.model.event.{ Event, EventQuery, TimeRange }
import io.vamp.model.reader._
import io.vamp.pulse.Percolator.{ RegisterPercolator, UnregisterPercolator }
import io.vamp.pulse.PulseActor.{ Publish, Query }
import io.vamp.pulse.{ EventRequestEnvelope, EventResponseEnvelope, PulseActor }
import org.json4s.native.Serialization._
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
trait EventApiController extends AbstractController with LazyLogging {
private val tagParameter = "tag"
private val typeParameter = "type"
def sourceEvents(parameters: Map[String, List[String]], request: String, keepAlivePeriod: FiniteDuration)(implicit namespace: Namespace, timeout: Timeout) = {
Source.actorPublisher[ServerSentEvent](Props(new ActorPublisher[ServerSentEvent] {
def receive = {
case Request(_) ⇒ openEventStream(self, parameters, request)
case Cancel ⇒ closeEventStream(self)
case (None, event: Event) ⇒
if (totalDemand > 0) filterSse(event).map {
case true ⇒ onNext(ServerSentEvent(write(event)(SerializationFormat(OffsetDateTimeSerializer)), event.`type`))
case _ ⇒ logger.info("Filter sse returned false")
}
case _ ⇒ logger.info("Unmatched case in source events")
}
})).keepAlive(keepAlivePeriod, () ⇒ ServerSentEvent.heartbeat)
}
def publishEvent(request: String)(implicit namespace: Namespace, timeout: Timeout) = {
val event = EventReader.read(request)
actorFor[PulseActor] ? Publish(event)
}
def queryEvents(parameters: Map[String, List[String]], request: String)(page: Int, perPage: Int)(implicit namespace: Namespace, timeout: Timeout): Future[Any] = {
logger.debug("Events are queried with request {}", request)
val query = parseQuery(parameters, request)
actorFor[PulseActor] ? Query(EventRequestEnvelope(query, page, perPage))
}
def openEventStream(to: ActorRef, parameters: Map[String, List[String]], request: String, message: Any = None)(implicit namespace: Namespace) = {
val (tags, kind) = {
if (request.isEmpty) parameters.getOrElse(tagParameter, Nil).toSet → parameters.get(typeParameter).map(_.head)
else {
val query = EventQueryReader.read(request)
query.tags → query.`type`
}
}
actorFor[PulseActor].tell(RegisterPercolator(percolator(to), tags, kind, message), to)
}
def closeEventStream(to: ActorRef)(implicit namespace: Namespace) = actorFor[PulseActor].tell(UnregisterPercolator(percolator(to)), to)
protected def parseQuery(parameters: Map[String, List[String]], request: String) = {
if (request.trim.isEmpty) {
// logger.info("event query type {}", parameters.get(typeParameter).map(_.head))
EventQuery(parameters.getOrElse(tagParameter, Nil).toSet, parameters.get(typeParameter).map(_.head), None)
}
else EventQueryReader.read(request)
}
protected def filterSse(event: Event)(implicit namespace: Namespace, timeout: Timeout): Future[Boolean] = Future.successful(true)
private def percolator(channel: ActorRef) = s"stream://${channel.path.elements.mkString("/")}"
}
trait EventValue {
this: ActorSystemProvider with ExecutionContextProvider with NotificationProvider ⇒
def last(tags: Set[String], window: FiniteDuration, `type`: Option[String] = None)(implicit namespace: Namespace, timeout: Timeout): Future[Option[AnyRef]] = {
val eventQuery = EventQuery(tags, `type`, Option(timeRange(window)), None)
actorFor[PulseActor] ? PulseActor.Query(EventRequestEnvelope(eventQuery, 1, 1)) map {
case EventResponseEnvelope(Event(_, _, _, value, _, _, _) :: _, _, _, _) ⇒ Option(value)
case _ ⇒ None
}
}
protected def timeRange(window: FiniteDuration) = {
val now = OffsetDateTime.now()
val from = now.minus(window.toSeconds, ChronoUnit.SECONDS)
TimeRange(Some(from), Some(now), includeLower = true, includeUpper = true)
}
}
|
magneticio/vamp
|
operation/src/main/scala/io/vamp/operation/controller/EventApiController.scala
|
Scala
|
apache-2.0
| 4,654
|
package pl.pholda.malpompaaligxilo.dsl.expr.logical
import pl.pholda.malpompaaligxilo.dsl.DslFormExpr
import pl.pholda.malpompaaligxilo.form.FormInstance
case class Negation(expr: DslFormExpr[_]) extends DslFormExpr[Any] {
override def apply(formInstance: FormInstance[_]): Any = {
expr(formInstance) match {
case b: Boolean => !b
case x => x
}
}
}
|
pholda/MalpompaAligxilo
|
dsl/shared/src/main/scala/pl/pholda/malpompaaligxilo/dsl/expr/logical/Negation.scala
|
Scala
|
gpl-3.0
| 375
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Dataset, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.expressions.codegen.{ByteCodeStats, CodeAndComment, CodeGenerator}
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
import org.apache.spark.sql.execution.joins.BroadcastHashJoinExec
import org.apache.spark.sql.execution.joins.SortMergeJoinExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
// Disable AQE because the WholeStageCodegenExec is added when running QueryStageExec
class WholeStageCodegenSuite extends QueryTest with SharedSparkSession
with DisableAdaptiveExecutionSuite {
import testImplicits._
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
test("range/filter should be combined") {
val df = spark.range(10).filter("id = 1").selectExpr("id + 1")
val plan = df.queryExecution.executedPlan
assert(plan.find(_.isInstanceOf[WholeStageCodegenExec]).isDefined)
assert(df.collect() === Array(Row(2)))
}
ignore("Aggregate should be included in WholeStageCodegen") {
val df = spark.range(10).groupBy().agg(max(col("id")), avg(col("id")))
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[HashAggregateExec]).isDefined)
assert(df.collect() === Array(Row(9, 4.5)))
}
ignore("Aggregate with grouping keys should be included in WholeStageCodegen") {
val df = spark.range(3).groupBy(col("id") * 2).count().orderBy(col("id") * 2)
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[HashAggregateExec]).isDefined)
assert(df.collect() === Array(Row(0, 1), Row(2, 1), Row(4, 1)))
}
ignore("BroadcastHashJoin should be included in WholeStageCodegen") {
val rdd = spark.sparkContext.makeRDD(Seq(Row(1, "1"), Row(1, "1"), Row(2, "2")))
val schema = new StructType().add("k", IntegerType).add("v", StringType)
val smallDF = spark.createDataFrame(rdd, schema)
val df = spark.range(10).join(broadcast(smallDF), col("k") === col("id"))
assert(df.queryExecution.executedPlan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[BroadcastHashJoinExec]).isDefined)
assert(df.collect() === Array(Row(1, 1, "1"), Row(1, 1, "1"), Row(2, 2, "2")))
}
test("Sort should be included in WholeStageCodegen") {
val df = spark.range(3, 0, -1).toDF().sort(col("id"))
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[SortExec]).isDefined)
assert(df.collect() === Array(Row(1), Row(2), Row(3)))
}
test("MapElements should be included in WholeStageCodegen") {
import testImplicits._
val ds = spark.range(10).map(_.toString)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[SerializeFromObjectExec]).isDefined)
assert(ds.collect() === 0.until(10).map(_.toString).toArray)
}
ignore("typed filter should be included in WholeStageCodegen") {
val ds = spark.range(10).filter(_ % 2 == 0)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[FilterExec]).isDefined)
assert(ds.collect() === Array(0, 2, 4, 6, 8))
}
ignore("back-to-back typed filter should be included in WholeStageCodegen") {
val ds = spark.range(10).filter(_ % 2 == 0).filter(_ % 3 == 0)
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[FilterExec]).isDefined)
assert(ds.collect() === Array(0, 6))
}
ignore("cache for primitive type should be in WholeStageCodegen with InMemoryTableScanExec") {
import testImplicits._
val dsInt = spark.range(3).cache()
dsInt.count()
val dsIntFilter = dsInt.filter(_ > 0)
val planInt = dsIntFilter.queryExecution.executedPlan
assert(planInt.collect {
case WholeStageCodegenExec(FilterExec(_,
ColumnarToRowExec(InputAdapter(_: InMemoryTableScanExec)))) => ()
}.length == 1)
assert(dsIntFilter.collect() === Array(1, 2))
// cache for string type is not supported for InMemoryTableScanExec
val dsString = spark.range(3).map(_.toString).cache()
dsString.count()
val dsStringFilter = dsString.filter(_ == "1")
val planString = dsStringFilter.queryExecution.executedPlan
assert(planString.collect {
case _: ColumnarToRowExec => ()
}.isEmpty)
assert(dsStringFilter.collect() === Array("1"))
}
ignore("SPARK-19512 codegen for comparing structs is incorrect") {
// this would raise CompileException before the fix
spark.range(10)
.selectExpr("named_struct('a', id) as col1", "named_struct('a', id+2) as col2")
.filter("col1 = col2").count()
// this would raise java.lang.IndexOutOfBoundsException before the fix
spark.range(10)
.selectExpr("named_struct('a', id, 'b', id) as col1",
"named_struct('a',id+2, 'b',id+2) as col2")
.filter("col1 = col2").count()
}
ignore("SPARK-21441 SortMergeJoin codegen with CodegenFallback expressions should be disabled") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
import testImplicits._
val df1 = Seq((1, 1), (2, 2), (3, 3)).toDF("key", "int")
val df2 = Seq((1, "1"), (2, "2"), (3, "3")).toDF("key", "str")
val df = df1.join(df2, df1("key") === df2("key"))
.filter("int = 2 or reflect('java.lang.Integer', 'valueOf', str) = 1")
.select("int")
val plan = df.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.children(0)
.isInstanceOf[SortMergeJoinExec]).isEmpty)
assert(df.collect() === Array(Row(1), Row(2)))
}
}
def genGroupByCode(caseNum: Int): CodeAndComment = {
val caseExp = (1 to caseNum).map { i =>
s"case when id > $i and id <= ${i + 1} then 1 else 0 end as v$i"
}.toList
val keyExp = List(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as double) as k2",
"cast(id & 1023 as int) as k3")
val ds = spark.range(10)
.selectExpr(keyExp:::caseExp: _*)
.groupBy("k1", "k2", "k3")
.sum()
val plan = ds.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find(p => p match {
case wp: WholeStageCodegenExec => wp.child match {
case hp: HashAggregateExec if (hp.child.isInstanceOf[ProjectExec]) => true
case _ => false
}
case _ => false
})
assert(wholeStageCodeGenExec.isDefined)
wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
}
def genCode(ds: Dataset[_]): Seq[CodeAndComment] = {
val plan = ds.queryExecution.executedPlan
val wholeStageCodeGenExecs = plan.collect { case p: WholeStageCodegenExec => p }
assert(wholeStageCodeGenExecs.nonEmpty, "WholeStageCodegenExec is expected")
wholeStageCodeGenExecs.map(_.doCodeGen()._2)
}
ignore("SPARK-21871 check if we can get large code size when compiling too long functions") {
val codeWithShortFunctions = genGroupByCode(3)
val (_, ByteCodeStats(maxCodeSize1, _, _)) = CodeGenerator.compile(codeWithShortFunctions)
assert(maxCodeSize1 < SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get)
val codeWithLongFunctions = genGroupByCode(50)
val (_, ByteCodeStats(maxCodeSize2, _, _)) = CodeGenerator.compile(codeWithLongFunctions)
assert(maxCodeSize2 > SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.defaultValue.get)
}
ignore("bytecode of batch file scan exceeds the limit of WHOLESTAGE_HUGE_METHOD_LIMIT") {
import testImplicits._
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).select(Seq.tabulate(201) {i => ('id + i).as(s"c$i")} : _*)
df.write.mode(SaveMode.Overwrite).parquet(path)
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "202",
SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key -> "2000") {
// wide table batch scan causes the byte code of codegen exceeds the limit of
// WHOLESTAGE_HUGE_METHOD_LIMIT
val df2 = spark.read.parquet(path)
val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(fileScan2.asInstanceOf[FileSourceScanExec].supportsColumnar)
checkAnswer(df2, df)
}
}
}
ignore("Control splitting consume function by operators with config") {
import testImplicits._
val df = spark.range(10).select(Seq.tabulate(2) {i => ('id + i).as(s"c$i")} : _*)
Seq(true, false).foreach { config =>
withSQLConf(SQLConf.WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR.key -> s"$config") {
val plan = df.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find(p => p match {
case wp: WholeStageCodegenExec => true
case _ => false
})
assert(wholeStageCodeGenExec.isDefined)
val code = wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
assert(code.body.contains("project_doConsume") == config)
}
}
}
ignore("Skip splitting consume function when parameter number exceeds JVM limit") {
// since every field is nullable we have 2 params for each input column (one for the value
// and one for the isNull variable)
Seq((128, false), (127, true)).foreach { case (columnNum, hasSplit) =>
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).select(Seq.tabulate(columnNum) {i => lit(i).as(s"c$i")} : _*)
.write.mode(SaveMode.Overwrite).parquet(path)
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "255",
SQLConf.WHOLESTAGE_SPLIT_CONSUME_FUNC_BY_OPERATOR.key -> "true") {
val projection = Seq.tabulate(columnNum)(i => s"c$i + c$i as newC$i")
val df = spark.read.parquet(path).selectExpr(projection: _*)
val plan = df.queryExecution.executedPlan
val wholeStageCodeGenExec = plan.find {
case _: WholeStageCodegenExec => true
case _ => false
}
assert(wholeStageCodeGenExec.isDefined)
val code = wholeStageCodeGenExec.get.asInstanceOf[WholeStageCodegenExec].doCodeGen()._2
assert(code.body.contains("project_doConsume") == hasSplit)
}
}
}
}
test("codegen stage IDs should be preserved in transformations after CollapseCodegenStages") {
// test case adapted from DataFrameSuite to trigger ReuseExchange
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2") {
val df = spark.range(100)
val join = df.join(df, "id")
val plan = join.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].codegenStageId == 0).isEmpty,
"codegen stage IDs should be preserved through ReuseExchange")
checkAnswer(join, df.toDF)
}
}
test("including codegen stage ID in generated class name should not regress codegen caching") {
import testImplicits._
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_USE_ID_IN_CLASS_NAME.key -> "true") {
// the same query run twice should produce identical code, which would imply a hit in
// the generated code cache.
val ds1 = spark.range(3).select('id + 2)
val code1 = genCode(ds1)
val ds2 = spark.range(3).select('id + 2)
val code2 = genCode(ds2) // same query shape as above, deliberately
assert(code1 == code2, "Should produce same code")
}
}
ignore("SPARK-23598: Codegen working for lots of aggregation operations without runtime errors") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
var df = Seq((8, "bat"), (15, "mouse"), (5, "horse")).toDF("age", "name")
for (i <- 0 until 70) {
df = df.groupBy("name").agg(avg("age").alias("age"))
}
assert(df.limit(1).collect() === Array(Row("bat", 8.0)))
}
}
ignore("SPARK-25767: Lazy evaluated stream of expressions handled correctly") {
val a = Seq(1).toDF("key")
val b = Seq((1, "a")).toDF("key", "value")
val c = Seq(1).toDF("key")
val ab = a.join(b, Stream("key"), "left")
val abc = ab.join(c, Seq("key"), "left")
checkAnswer(abc, Row(1, "a"))
}
test("SPARK-26680: Stream in groupBy does not cause StackOverflowError") {
val groupByCols = Stream(col("key"))
val df = Seq((1, 2), (2, 3), (1, 3)).toDF("key", "value")
.groupBy(groupByCols: _*)
.max("value")
checkAnswer(df, Seq(Row(1, 3), Row(2, 3)))
}
ignore("SPARK-26572: evaluate non-deterministic expressions for aggregate results") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val baseTable = Seq(1, 1).toDF("idx")
// BroadcastHashJoinExec with a HashAggregateExec child containing no aggregate expressions
val distinctWithId = baseTable.distinct().withColumn("id", monotonically_increasing_id())
.join(baseTable, "idx")
assert(distinctWithId.queryExecution.executedPlan.collectFirst {
case WholeStageCodegenExec(
ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: HashAggregateExec, _))) => true
}.isDefined)
checkAnswer(distinctWithId, Seq(Row(1, 0), Row(1, 0)))
// BroadcastHashJoinExec with a HashAggregateExec child containing a Final mode aggregate
// expression
val groupByWithId =
baseTable.groupBy("idx").sum().withColumn("id", monotonically_increasing_id())
.join(baseTable, "idx")
assert(groupByWithId.queryExecution.executedPlan.collectFirst {
case WholeStageCodegenExec(
ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: HashAggregateExec, _))) => true
}.isDefined)
checkAnswer(groupByWithId, Seq(Row(1, 2, 0), Row(1, 2, 0)))
}
}
ignore("SPARK-28520: WholeStageCodegen does not work properly for LocalTableScanExec") {
// Case1: LocalTableScanExec is the root of a query plan tree.
// In this case, WholeStageCodegenExec should not be inserted
// as the direct parent of LocalTableScanExec.
val df = Seq(1, 2, 3).toDF
val rootOfExecutedPlan = df.queryExecution.executedPlan
// Ensure WholeStageCodegenExec is not inserted and
// LocalTableScanExec is still the root.
assert(rootOfExecutedPlan.isInstanceOf[LocalTableScanExec],
"LocalTableScanExec should be still the root.")
// Case2: The parent of a LocalTableScanExec supports WholeStageCodegen.
// In this case, the LocalTableScanExec should be within a WholeStageCodegen domain
// and no more InputAdapter is inserted as the direct parent of the LocalTableScanExec.
val aggedDF = Seq(1, 2, 3).toDF.groupBy("value").sum()
val executedPlan = aggedDF.queryExecution.executedPlan
// HashAggregateExec supports WholeStageCodegen and it's the parent of
// LocalTableScanExec so LocalTableScanExec should be within a WholeStageCodegen domain.
assert(
executedPlan.find {
case WholeStageCodegenExec(
HashAggregateExec(_, _, _, _, _, _, _: LocalTableScanExec)) => true
case _ => false
}.isDefined,
"LocalTableScanExec should be within a WholeStageCodegen domain.")
}
ignore("Give up splitting aggregate code if a parameter length goes over the limit") {
withSQLConf(
SQLConf.CODEGEN_SPLIT_AGGREGATE_FUNC.key -> "true",
SQLConf.CODEGEN_METHOD_SPLIT_THRESHOLD.key -> "1",
"spark.sql.CodeGenerator.validParamLength" -> "0") {
withTable("t") {
val expectedErrMsg = "Failed to split aggregate code into small functions"
Seq(
// Test case without keys
"SELECT AVG(v) FROM VALUES(1) t(v)",
// Tet case with keys
"SELECT k, AVG(v) FROM VALUES((1, 1)) t(k, v) GROUP BY k").foreach { query =>
val errMsg = intercept[IllegalStateException] {
sql(query).collect
}.getMessage
assert(errMsg.contains(expectedErrMsg))
}
}
}
}
ignore("Give up splitting subexpression code if a parameter length goes over the limit") {
withSQLConf(
SQLConf.CODEGEN_SPLIT_AGGREGATE_FUNC.key -> "false",
SQLConf.CODEGEN_METHOD_SPLIT_THRESHOLD.key -> "1",
"spark.sql.CodeGenerator.validParamLength" -> "0") {
withTable("t") {
val expectedErrMsg = "Failed to split subexpression code into small functions"
Seq(
// Test case without keys
"SELECT AVG(a + b), SUM(a + b + c) FROM VALUES((1, 1, 1)) t(a, b, c)",
// Tet case with keys
"SELECT k, AVG(a + b), SUM(a + b + c) FROM VALUES((1, 1, 1, 1)) t(k, a, b, c) " +
"GROUP BY k").foreach { query =>
val e = intercept[Exception] {
sql(query).collect
}.getCause
assert(e.isInstanceOf[IllegalStateException])
assert(e.getMessage.contains(expectedErrMsg))
}
}
}
}
}
|
Intel-bigdata/OAP
|
oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
|
Scala
|
apache-2.0
| 19,739
|
import sbt._
import Keys._
object BuildSettings {
val paradiseVersion = "2.0.1"
val buildSettings = Defaults.defaultSettings ++ Seq(
version := "1.0.0",
scalacOptions ++= Seq(""),
scalaVersion := "2.11.4",
resolvers += Resolver.sonatypeRepo("snapshots"),
resolvers += Resolver.sonatypeRepo("releases"),
addCompilerPlugin("org.scalamacros" % "paradise" % paradiseVersion cross CrossVersion.full),
incOptions := incOptions.value.withNameHashing(true)
)
}
object MyBuild extends Build {
import BuildSettings._
lazy val root: Project = Project(
"root",
file("."),
settings = buildSettings ++ Seq(
run <<= run in Compile in core
)
) aggregate(macros, core)
lazy val macros: Project = Project(
"macros",
file("macros"),
settings = buildSettings ++ Seq(
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _),
libraryDependencies ++= (
if (scalaVersion.value.startsWith("2.10")) List("org.scalamacros" %% "quasiquotes" % paradiseVersion)
else Nil
)
)
)
lazy val core: Project = Project(
"core",
file("core"),
settings = buildSettings
) dependsOn(macros)
}
|
twitter-forks/sbt
|
sbt/src/sbt-test/source-dependencies/macro-annotation/project/Build.scala
|
Scala
|
bsd-3-clause
| 1,209
|
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package builtin {
package snippet {
import _root_.scala.xml._
import _root_.net.liftweb.http._
object Loc extends DispatchSnippet {
def dispatch : DispatchIt = {
case _ => render _
}
def render(kids: NodeSeq) : NodeSeq =
S.attr.~("locid").map(_.text) match {
case Some(id) => S.loc(id, kids)
case _ => S.loc(kids.text, kids)
}
}
}
}
}
|
jeppenejsum/liftweb
|
framework/lift-base/lift-webkit/src/main/scala/net/liftweb/builtin/snippet/Loc.scala
|
Scala
|
apache-2.0
| 1,009
|
abstract class Foo {
def bar(): Unit = this match {
case Foo_1() => //do something
case Foo_2() => //do something
// Works fine
}
def baz(that: Foo): Unit = (this, that) match {
case (Foo_1(), _) => //do something
case (Foo_2(), _) => //do something
// match may not be exhaustive
}
}
case class Foo_1() extends Foo
case class Foo_2() extends Foo
|
dotty-staging/dotty
|
tests/pos-special/fatal-warnings/t10373.scala
|
Scala
|
apache-2.0
| 417
|
package chess
import Pos._
import format.Visual.addNewLines
class GameTest extends ChessTest {
"capture a piece" should {
"add it to the dead pieces" in {
val game = Game().playMoves(
E2 -> E4,
D7 -> D5,
E4 -> D5)
game must beSuccess.like {
case g ⇒ g.deads must haveTheSameElementsAs(List(D5 -> Black.pawn))
}
}
}
"recapture a piece" should {
"add both to the dead pieces" in {
val game = Game("""
bq
R""").playMoves(
A1 -> A2,
B2 -> A2)
game must beSuccess.like {
case g ⇒ g.deads must haveTheSameElementsAs(List(
A2 -> Black.bishop,
A2 -> White.rook))
}
}
}
"prevent castle by capturing a rook" should {
val game = Game("""
b
R K""", Black)
"can castle queenside" in {
game.board.history canCastle White on QueenSide must_== true
}
"can still castle queenside" in {
game.playMoves(B2 -> A3) must beSuccess.like {
case g ⇒ g.board.history canCastle White on QueenSide must_== true
}
}
"can not castle queenside anymore" in {
game.playMoves(B2 -> A1) must beSuccess.like {
case g ⇒ g.board.history canCastle White on QueenSide must_== false
}
}
}
}
|
cxd4/scalachess
|
src/test/scala/GameTest.scala
|
Scala
|
mit
| 1,278
|
package org.jetbrains.plugins.scala
package lang
package refactoring
package namesSuggester
import java.util.regex.{Matcher, Pattern}
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.{JavaPsiFacade, PsiClass, PsiNamedElement}
import org.atteo.evo.inflector.English
import org.jetbrains.plugins.scala.decompiler.DecompilerUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScLiteral, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAliasDefinition
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.refactoring.util.{NameValidator, ScalaNamesUtil}
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.util.ScEquivalenceUtil
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
* @since 26.06.2008
*/
object NameSuggester {
private def emptyValidator(project: Project) = new NameValidator {
def getProject(): Project = project
def validateName(name: String, increaseNumber: Boolean): String = name
}
def suggestNames(expr: ScExpression): Array[String] = suggestNames(expr, emptyValidator(expr.getProject))
def suggestNames(expr: ScExpression, validator: NameValidator): Array[String] = {
val names = new ArrayBuffer[String]
val types = new ArrayBuffer[ScType]()
val typez = expr.getType(TypingContext.empty).getOrElse(null)
if (typez != null && typez != Unit) types += typez
expr.getTypeWithoutImplicits().foreach(types += _)
expr.getTypeIgnoreBaseType(TypingContext.empty).foreach(types += _)
if (typez != null && typez == Unit) types += typez
for (tpe <- types.reverse) {generateNamesByType(tpe)(names, validator)}
generateNamesByExpr(expr)(names, validator)
val result = (for (name <- names if name != "" && ScalaNamesUtil.isIdentifier(name) || name == "class") yield {
if (name != "class") name else "clazz"
}).toList.reverse.toArray
if (result.length > 0) result
else Array(validator.validateName("value", increaseNumber = true))
}
def suggestNamesByType(typez: ScType): Array[String] = {
val names = new ArrayBuffer[String]
generateNamesByType(typez)(names, emptyValidator(DecompilerUtil.obtainProject))
val result = names.map {
case "class" => "clazz"
case s => s
}.filter(name => name != "" && ScalaNamesUtil.isIdentifier(name))
if (result.isEmpty) {
Array("value")
} else result.reverse.toArray
}
private def add(s: String)(implicit validator: NameValidator, names: ArrayBuffer[String]) {
val name = validator.validateName(s, increaseNumber = true)
if (!names.contains(name))
names += name
}
private def namesByType(tpe: ScType, withPlurals: Boolean = true, shortVersion: Boolean = true)
(implicit validator: NameValidator): ArrayBuffer[String] = {
val names = ArrayBuffer[String]()
generateNamesByType(tpe, shortVersion)(names, validator, withPlurals)
names
}
private def generateNamesByType(typez: ScType, shortVersion: Boolean = true)
(implicit names: ArrayBuffer[String],
validator: NameValidator,
withPlurals: Boolean = true) {
val project = validator.getProject()
implicit val typeSystem = project.typeSystem
def addPlurals(arg: ScType) {
def addPlural(s: String) {
if (!withPlurals) add(s)
else {
s match {
case "x" => add("xs")
case "index" => add("indices")
case _ => add(English.plural(s))
}
}
}
arg match {
case valType: ValType => addPlural(valType.name.toLowerCase)
case TupleType(_) => addPlural("tuple")
case FunctionType(_, _) => addPlural("function")
case ScDesignatorType(e) =>
val camelNames = getCamelNames(e.name)
camelNames.foreach(addPlural)
case _ =>
namesByType(arg, withPlurals = false, shortVersion = false).foreach(addPlural)
}
}
def addFromTwoTypes(tp1: ScType, tp2: ScType, separator: String) {
for {
leftName <- namesByType(tp1, shortVersion = false)
rightName <- namesByType(tp2, shortVersion = false)
} {
add(s"$leftName$separator${rightName.capitalize}")
}
}
def addForFunctionType(ret: ScType, params: Seq[ScType]) = {
add("function")
params match {
case Seq() =>
generateNamesByType(ret)
case Seq(param) =>
addFromTwoTypes(param, ret, "To")
case _ =>
}
}
def addForParameterizedType(baseType: ScType, args: Seq[ScType]) {
baseType match {
case ScProjectionType(p, ta: ScTypeAliasDefinition, _) =>
ta.aliasedType match {
case Success(ExtractClass(c), _) =>
generateNamesByType(baseType)
inner(c)
case _ => generateNamesByType(baseType)
}
case ScDesignatorType(c: PsiClass) =>
generateNamesByType(baseType)
inner(c)
case _ => generateNamesByType(baseType)
}
def inner(classOfBaseType: PsiClass) {
val arrayClassName = "scala.Array"
val baseCollectionClassName = "scala.collection.GenTraversableOnce"
val baseJavaCollectionClassName = "java.lang.Iterable"
val baseMapClassName = "scala.collection.GenMap"
val baseJavaMapClassName = "java.util.Map"
val eitherClassName = "scala.util.Either"
def isInheritor(c: PsiClass, baseFqn: String) = {
val baseClass = JavaPsiFacade.getInstance(project).findClass(baseFqn, GlobalSearchScope.allScope(project))
baseClass != null && (c.isInheritor(baseClass, true) || ScEquivalenceUtil.areClassesEquivalent(c, baseClass))
}
val needPrefix = Map(
"scala.Option" -> "maybe",
"scala.Some" -> "some",
"scala.concurrent.Future" -> "eventual",
"scala.concurrent.Promise" -> "promised",
"scala.util.Try" -> "tried")
classOfBaseType match {
case c if c.qualifiedName == arrayClassName && args.nonEmpty =>
addPlurals(args.head)
case c if needPrefix.keySet.contains(c.qualifiedName) && args.nonEmpty =>
for {
s <- namesByType(args.head, shortVersion = false)
prefix = needPrefix(c.qualifiedName)
} {
add(prefix + s.capitalize)
}
case c if c.qualifiedName == eitherClassName && args.size == 2 =>
addFromTwoTypes(args.head, args(1), "Or")
case c if (isInheritor(c, baseMapClassName) || isInheritor(c, baseJavaMapClassName))
&& args.size == 2 =>
addFromTwoTypes(args.head, args(1), "To")
case c if (isInheritor(c, baseCollectionClassName) || isInheritor(c, baseJavaCollectionClassName))
&& args.size == 1 =>
addPlurals(args.head)
case _ =>
}
}
}
def addLowerCase(name: String, length: Int = 1) = {
val lowerCaseName = name.toLowerCase
add(if (shortVersion) lowerCaseName.substring(0, length) else lowerCaseName)
}
def addForNamedElementString(name: String) = if (name != null && name.toUpperCase == name) {
add(deleteNonLetterFromString(name).toLowerCase)
} else if (name == "String") {
addLowerCase(name)
} else {
generateCamelNames(name)
}
def addForNamedElement(named: PsiNamedElement) = addForNamedElementString(named.name)
def addValTypeName(valType: ValType, length: Int = 1) = addLowerCase(valType.name, length)
typez match {
case Int => addValTypeName(Int)
case Unit => add(Unit.name)
case Byte => add(Byte.name)
case Long => addValTypeName(Long)
case Float => addValTypeName(Float, 2)
case Double => addValTypeName(Double)
case Short => addValTypeName(Short, 2)
case Boolean => addValTypeName(Boolean)
case Char => addValTypeName(Char)
case TupleType(comps) => add("tuple")
case FunctionType(ret, params) => addForFunctionType(ret, params)
case ScDesignatorType(e) => addForNamedElement(e)
case TypeParameterType(name, typeParams, lowerType, upperType, ptp) => addForNamedElementString(name)
case ScProjectionType(_, e, _) => addForNamedElement(e)
case ParameterizedType(tp, args) =>
addForParameterizedType(tp, args)
case JavaArrayType(argument) => addPlurals(argument)
case ScCompoundType(comps, _, _) =>
if (comps.nonEmpty) generateNamesByType(comps.head)
case _ =>
}
}
@tailrec
private def generateNamesByExpr(expr: ScExpression)(implicit names: ArrayBuffer[String], validator: NameValidator) {
expr match {
case _: ScThisReference => add("thisInstance")
case _: ScSuperReference => add("superInstance")
//TODO: probably replace
case x: ScReferenceElement if x.refName.inName != null =>
val name = x.refName.inName //TODO: probably replace
if (name != null && name.toUpperCase == name) {
add(name.toLowerCase)
} else {
generateCamelNames(name)
}
case x: ScMethodCall =>
generateNamesByExpr(x.getEffectiveInvokedExpr)
case l: ScLiteral if l.isString =>
l.getValue match {
case s: String if ScalaNamesUtil.isIdentifier(s.toLowerCase) => add(s.toLowerCase)
case _ =>
}
case _ => expr.getContext match {
case x: ScAssignStmt => x.assignName.foreach(add)
case x: ScArgumentExprList => x.matchedParameters.find(_._1 == expr) match {
case Some((_, parameter)) => add(parameter.name)
case _ =>
}
case _ =>
}
}
}
private def generateCamelNames(name: String)(implicit names: ArrayBuffer[String], validator: NameValidator) {
if (name == "") return
val s = if (Array("get", "set", "is").exists(name.startsWith))
name.charAt(0) match {
case 'g' | 's' => name.substring(3, name.length)
case _ => name.substring(2, name.length)
}
else name
for (i <- 0 until s.length) {
if (i == 0) {
val candidate = s.substring(0, 1).toLowerCase + s.substring(1)
add(deleteNonLetterFromStringFromTheEnd(candidate))
}
else if (s(i) >= 'A' && s(i) <= 'Z') {
val candidate = s.substring(i, i + 1).toLowerCase + s.substring(i + 1)
add(deleteNonLetterFromStringFromTheEnd(candidate))
}
}
}
private def getCamelNames(name: String): Seq[String] = {
if (name == "") return Seq.empty
val names = new ArrayBuffer[String]
val s = if (Array("get", "set", "is").exists(name.startsWith))
name.charAt(0) match {
case 'g' | 's' => name.substring(3, name.length)
case _ => name.substring(2, name.length)
}
else name
for (i <- 0 until s.length) {
if (i == 0) {
val candidate = s.substring(0, 1).toLowerCase + s.substring(1)
names += deleteNonLetterFromStringFromTheEnd(candidate)
}
else if (s(i) >= 'A' && s(i) <= 'Z') {
val candidate = s.substring(i, i + 1).toLowerCase + s.substring(i + 1)
names += deleteNonLetterFromStringFromTheEnd(candidate)
}
}
names
}
private def deleteNonLetterFromString(s: String): String = {
val pattern: Pattern = Pattern.compile("[^a-zA-Z]")
val matcher: Matcher = pattern.matcher(s)
matcher.replaceAll("")
}
private def deleteNonLetterFromStringFromTheEnd(s: String): String = {
s.reverse.dropWhile(!_.isLetter).reverse
}
}
|
katejim/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/refactoring/namesSuggester/NameSuggester.scala
|
Scala
|
apache-2.0
| 12,068
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.hive.ql.udf.UDAFPercentile
import org.apache.hadoop.hive.ql.udf.generic.{AbstractGenericUDAFResolver, GenericUDAFEvaluator, GenericUDAFMax}
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.{AggregationBuffer, Mode}
import org.apache.hadoop.hive.ql.util.JavaDataModel
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo
import test.org.apache.spark.sql.MyDoubleAvg
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.execution.aggregate.ObjectHashAggregateExec
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
class HiveUDAFSuite extends QueryTest
with TestHiveSingleton with SQLTestUtils with AdaptiveSparkPlanHelper {
import testImplicits._
protected override def beforeAll(): Unit = {
super.beforeAll()
sql(s"CREATE TEMPORARY FUNCTION mock AS '${classOf[MockUDAF].getName}'")
sql(s"CREATE TEMPORARY FUNCTION hive_max AS '${classOf[GenericUDAFMax].getName}'")
sql(s"CREATE TEMPORARY FUNCTION mock2 AS '${classOf[MockUDAF2].getName}'")
Seq(
(0: Integer) -> "val_0",
(1: Integer) -> "val_1",
(2: Integer) -> null,
(3: Integer) -> null
).toDF("key", "value").repartition(2).createOrReplaceTempView("t")
}
protected override def afterAll(): Unit = {
try {
sql(s"DROP TEMPORARY FUNCTION IF EXISTS mock")
sql(s"DROP TEMPORARY FUNCTION IF EXISTS hive_max")
} finally {
super.afterAll()
}
}
test("built-in Hive UDAF") {
val df = sql("SELECT key % 2, hive_max(key) FROM t GROUP BY key % 2")
val aggs = collect(df.queryExecution.executedPlan) {
case agg: ObjectHashAggregateExec => agg
}
// There should be two aggregate operators, one for partial aggregation, and the other for
// global aggregation.
assert(aggs.length == 2)
checkAnswer(df, Seq(
Row(0, 2),
Row(1, 3)
))
}
test("customized Hive UDAF") {
val df = sql("SELECT key % 2, mock(value) FROM t GROUP BY key % 2")
val aggs = collect(df.queryExecution.executedPlan) {
case agg: ObjectHashAggregateExec => agg
}
// There should be two aggregate operators, one for partial aggregation, and the other for
// global aggregation.
assert(aggs.length == 2)
checkAnswer(df, Seq(
Row(0, Row(1, 1)),
Row(1, Row(1, 1))
))
}
test("SPARK-24935: customized Hive UDAF with two aggregation buffers") {
withTempView("v") {
spark.range(100).createTempView("v")
val df = sql("SELECT id % 2, mock2(id) FROM v GROUP BY id % 2")
val aggs = collect(df.queryExecution.executedPlan) {
case agg: ObjectHashAggregateExec => agg
}
// There should be two aggregate operators, one for partial aggregation, and the other for
// global aggregation.
assert(aggs.length == 2)
withSQLConf(SQLConf.OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD.key -> "1") {
checkAnswer(df, Seq(
Row(0, Row(50, 0)),
Row(1, Row(50, 0))
))
}
withSQLConf(SQLConf.OBJECT_AGG_SORT_BASED_FALLBACK_THRESHOLD.key -> "100") {
checkAnswer(df, Seq(
Row(0, Row(50, 0)),
Row(1, Row(50, 0))
))
}
}
}
test("call JAVA UDAF") {
withTempView("temp") {
withUserDefinedFunction("myDoubleAvg" -> false) {
spark.range(1, 10).toDF("value").createOrReplaceTempView("temp")
sql(s"CREATE FUNCTION myDoubleAvg AS '${classOf[MyDoubleAvg].getName}'")
checkAnswer(
spark.sql("SELECT default.myDoubleAvg(value) as my_avg from temp"),
Row(105.0))
}
}
}
test("non-deterministic children expressions of UDAF") {
withTempView("view1") {
spark.range(1).selectExpr("id as x", "id as y").createTempView("view1")
withUserDefinedFunction("testUDAFPercentile" -> true) {
// non-deterministic children of Hive UDAF
sql(s"CREATE TEMPORARY FUNCTION testUDAFPercentile AS '${classOf[UDAFPercentile].getName}'")
val e1 = intercept[AnalysisException] {
sql("SELECT testUDAFPercentile(x, rand()) from view1 group by y")
}.getMessage
assert(Seq("nondeterministic expression",
"should not appear in the arguments of an aggregate function").forall(e1.contains))
}
}
}
test("SPARK-27907 HiveUDAF with 0 rows throws NPE") {
withTable("abc") {
sql("create table abc(a int)")
checkAnswer(sql("select histogram_numeric(a,2) from abc"), Row(null))
sql("insert into abc values (1)")
checkAnswer(sql("select histogram_numeric(a,2) from abc"), Row(Row(1.0, 1.0) :: Nil))
checkAnswer(sql("select histogram_numeric(a,2) from abc where a=3"), Row(null))
}
}
}
/**
* A testing Hive UDAF that computes the counts of both non-null values and nulls of a given column.
*/
class MockUDAF extends AbstractGenericUDAFResolver {
override def getEvaluator(info: Array[TypeInfo]): GenericUDAFEvaluator = new MockUDAFEvaluator
}
class MockUDAF2 extends AbstractGenericUDAFResolver {
override def getEvaluator(info: Array[TypeInfo]): GenericUDAFEvaluator = new MockUDAFEvaluator2
}
class MockUDAFBuffer(var nonNullCount: Long, var nullCount: Long)
extends GenericUDAFEvaluator.AbstractAggregationBuffer {
override def estimate(): Int = JavaDataModel.PRIMITIVES2 * 2
}
class MockUDAFBuffer2(var nonNullCount: Long, var nullCount: Long)
extends GenericUDAFEvaluator.AbstractAggregationBuffer {
override def estimate(): Int = JavaDataModel.PRIMITIVES2 * 2
}
class MockUDAFEvaluator extends GenericUDAFEvaluator {
private val nonNullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private val nullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private val bufferOI = {
val fieldNames = Seq("nonNullCount", "nullCount").asJava
val fieldOIs = Seq(nonNullCountOI: ObjectInspector, nullCountOI: ObjectInspector).asJava
ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs)
}
private val nonNullCountField = bufferOI.getStructFieldRef("nonNullCount")
private val nullCountField = bufferOI.getStructFieldRef("nullCount")
override def getNewAggregationBuffer: AggregationBuffer = new MockUDAFBuffer(0L, 0L)
override def reset(agg: AggregationBuffer): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
buffer.nonNullCount = 0L
buffer.nullCount = 0L
}
override def init(mode: Mode, parameters: Array[ObjectInspector]): ObjectInspector = bufferOI
override def iterate(agg: AggregationBuffer, parameters: Array[AnyRef]): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
if (parameters.head eq null) {
buffer.nullCount += 1L
} else {
buffer.nonNullCount += 1L
}
}
override def merge(agg: AggregationBuffer, partial: Object): Unit = {
if (partial ne null) {
val nonNullCount = nonNullCountOI.get(bufferOI.getStructFieldData(partial, nonNullCountField))
val nullCount = nullCountOI.get(bufferOI.getStructFieldData(partial, nullCountField))
val buffer = agg.asInstanceOf[MockUDAFBuffer]
buffer.nonNullCount += nonNullCount
buffer.nullCount += nullCount
}
}
override def terminatePartial(agg: AggregationBuffer): AnyRef = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
Array[Object](buffer.nonNullCount: java.lang.Long, buffer.nullCount: java.lang.Long)
}
override def terminate(agg: AggregationBuffer): AnyRef = terminatePartial(agg)
}
// Same as MockUDAFEvaluator but using two aggregation buffers, one for PARTIAL1 and the other
// for PARTIAL2.
class MockUDAFEvaluator2 extends GenericUDAFEvaluator {
private val nonNullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private val nullCountOI = PrimitiveObjectInspectorFactory.javaLongObjectInspector
private var aggMode: Mode = null
private val bufferOI = {
val fieldNames = Seq("nonNullCount", "nullCount").asJava
val fieldOIs = Seq(nonNullCountOI: ObjectInspector, nullCountOI: ObjectInspector).asJava
ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs)
}
private val nonNullCountField = bufferOI.getStructFieldRef("nonNullCount")
private val nullCountField = bufferOI.getStructFieldRef("nullCount")
override def getNewAggregationBuffer: AggregationBuffer = {
// These 2 modes consume original data.
if (aggMode == Mode.PARTIAL1 || aggMode == Mode.COMPLETE) {
new MockUDAFBuffer(0L, 0L)
} else {
new MockUDAFBuffer2(0L, 0L)
}
}
override def reset(agg: AggregationBuffer): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
buffer.nonNullCount = 0L
buffer.nullCount = 0L
}
override def init(mode: Mode, parameters: Array[ObjectInspector]): ObjectInspector = {
aggMode = mode
bufferOI
}
override def iterate(agg: AggregationBuffer, parameters: Array[AnyRef]): Unit = {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
if (parameters.head eq null) {
buffer.nullCount += 1L
} else {
buffer.nonNullCount += 1L
}
}
override def merge(agg: AggregationBuffer, partial: Object): Unit = {
if (partial ne null) {
val nonNullCount = nonNullCountOI.get(bufferOI.getStructFieldData(partial, nonNullCountField))
val nullCount = nullCountOI.get(bufferOI.getStructFieldData(partial, nullCountField))
val buffer = agg.asInstanceOf[MockUDAFBuffer2]
buffer.nonNullCount += nonNullCount
buffer.nullCount += nullCount
}
}
// As this method is called for both states, Partial1 and Partial2, the hack in the method
// to check for class of aggregation buffer was necessary.
override def terminatePartial(agg: AggregationBuffer): AnyRef = {
var result: AnyRef = null
if (agg.getClass.toString.contains("MockUDAFBuffer2")) {
val buffer = agg.asInstanceOf[MockUDAFBuffer2]
result = Array[Object](buffer.nonNullCount: java.lang.Long, buffer.nullCount: java.lang.Long)
} else {
val buffer = agg.asInstanceOf[MockUDAFBuffer]
result = Array[Object](buffer.nonNullCount: java.lang.Long, buffer.nullCount: java.lang.Long)
}
result
}
override def terminate(agg: AggregationBuffer): AnyRef = {
val buffer = agg.asInstanceOf[MockUDAFBuffer2]
Array[Object](buffer.nonNullCount: java.lang.Long, buffer.nullCount: java.lang.Long)
}
}
|
goldmedal/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala
|
Scala
|
apache-2.0
| 11,674
|
package com.twitter.finatra.json.internal.caseclass.validation.validators
import com.twitter.finatra.json.internal.caseclass.validation.validators.TimeGranularityValidator._
import com.twitter.finatra.validation.{ErrorCode, TimeGranularity, ValidationMessageResolver, ValidationResult, Validator}
import java.util.concurrent.TimeUnit
import java.util.concurrent.TimeUnit._
import org.joda.time.{DateTime, DateTimeZone}
object TimeGranularityValidator {
def errorMessage(
resolver: ValidationMessageResolver,
timeGranularity: TimeUnit,
value: DateTime) = {
resolver.resolve(
classOf[TimeGranularity],
value,
singularize(timeGranularity))
}
private def singularize(timeUnit: TimeUnit): String = {
val timeUnitStr = timeUnit.toString.toLowerCase
timeUnitStr.substring(0, timeUnitStr.length - 1)
}
}
/**
* Validates if a given value is of a given time granularity (e.g., days, hours, seconds)
*/
class TimeGranularityValidator(
validationMessageResolver: ValidationMessageResolver,
annotation: TimeGranularity)
extends Validator[TimeGranularity, DateTime](
validationMessageResolver,
annotation) {
private val timeGranularity = annotation.value()
/* Public */
override def isValid(value: DateTime) = {
ValidationResult(
isGranularity(value),
errorMessage(
validationMessageResolver,
timeGranularity,
value),
ErrorCode.InvalidTimeGranularity(value, timeGranularity))
}
/* Private */
private def toNanos(value: Long, timeUnit: TimeUnit): Long = {
NANOSECONDS.convert(value, timeUnit)
}
private def isGranularity(value: DateTime): Boolean = {
val utcDateTime = value.toDateTime(DateTimeZone.UTC)
toNanos(utcDateTime.getMillis, MILLISECONDS) % toNanos(1, timeGranularity) == 0
}
}
|
deanh/finatra
|
jackson/src/main/scala/com/twitter/finatra/json/internal/caseclass/validation/validators/TimeGranularityValidator.scala
|
Scala
|
apache-2.0
| 1,827
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import akka.util.Duration
import javax.servlet.http.HttpServletRequest
import org.eclipse.jetty.server.{Handler, Server}
import org.apache.spark.{Logging}
import org.apache.spark.deploy.master.Master
import org.apache.spark.ui.JettyUtils
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.Utils
/**
* Web UI server for the standalone master.
*/
private[spark]
class MasterWebUI(val master: Master, requestedPort: Int) extends Logging {
implicit val timeout = Duration.create(
System.getProperty("spark.akka.askTimeout", "10").toLong, "seconds")
val host = Utils.localHostName()
val port = requestedPort
val masterActorRef = master.self
var server: Option[Server] = None
var boundPort: Option[Int] = None
val applicationPage = new ApplicationPage(this)
val indexPage = new IndexPage(this)
def start() {
try {
val (srv, bPort) = JettyUtils.startJettyServer("0.0.0.0", port, handlers)
server = Some(srv)
boundPort = Some(bPort)
logInfo("Started Master web UI at http://%s:%d".format(host, boundPort.get))
} catch {
case e: Exception =>
logError("Failed to create Master JettyUtils", e)
System.exit(1)
}
}
val metricsHandlers = master.masterMetricsSystem.getServletHandlers ++
master.applicationMetricsSystem.getServletHandlers
val handlers = metricsHandlers ++ Array[(String, Handler)](
("/static", createStaticHandler(MasterWebUI.STATIC_RESOURCE_DIR)),
("/app/json", (request: HttpServletRequest) => applicationPage.renderJson(request)),
("/app", (request: HttpServletRequest) => applicationPage.render(request)),
("/json", (request: HttpServletRequest) => indexPage.renderJson(request)),
("*", (request: HttpServletRequest) => indexPage.render(request))
)
def stop() {
server.foreach(_.stop())
}
}
private[spark] object MasterWebUI {
val STATIC_RESOURCE_DIR = "org/apache/spark/ui/static"
}
|
windeye/spark
|
core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala
|
Scala
|
apache-2.0
| 2,789
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import javax.ws.rs.core.UriBuilder
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.immutable
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.logging.log4j.Level
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.plugin.PluginContainer
import org.apache.spark.io.CompressionCodec
import org.apache.spark.launcher.JavaModuleOptions
import org.apache.spark.metrics.source.JVMCPUSource
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.resource._
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.shuffle.ShuffleDataIOUtils
import org.apache.spark.shuffle.api.ShuffleDriverComponents
import org.apache.spark.status.{AppStatusSource, AppStatusStore}
import org.apache.spark.status.api.v1.ThreadStackTrace
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.util._
import org.apache.spark.util.logging.DriverLogger
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) {
// In order to prevent SparkContext from being created in executors.
SparkContext.assertOnDriver()
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// The following constructors are required when Java code accesses SparkContext directly.
// Please see SI-4278
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _listenerBus: LiveListenerBus = _
private var _env: SparkEnv = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _driverLogger: Option[DriverLogger] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _archives: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
private var _statusStore: AppStatusStore = _
private var _heartbeater: Heartbeater = _
private var _resources: immutable.Map[String, ResourceInformation] = _
private var _shuffleDriverComponents: ShuffleDriverComponents = _
private var _plugins: Option[PluginContainer] = None
private var _resourceProfileManager: ResourceProfileManager = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def resources: Map[String, ResourceInformation] = _resources
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def archives: Seq[String] = _archives
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE)
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
private[spark] def statusStore: AppStatusStore = _statusStore
// An asynchronous listener bus for Spark events
private[spark] def listenerBus: LiveListenerBus = _listenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedArchives = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* in case of MESOS something like 'driver-20170926223339-0001'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
Utils.cloneProperties(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ROOT)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
_conf.set("spark.app.startTime", startTime.toString)
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
// This should be set as early as possible.
SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf)
SparkContext.supplementJavaModuleOptions(_conf)
_driverLogger = DriverLogger(_conf)
val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt)
logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing(DRIVER_PORT, 0)
_conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.get(EVENT_LOG_COMPRESS)
if (compress && isEventLogEnabled) {
Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName)
} else {
None
}
}
_listenerBus = new LiveListenerBus(_conf)
_resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus)
// Initialize the app status store and listener before SparkEnv is created so that it gets
// all events.
val appStatusSource = AppStatusSource.createSource(conf)
_statusStore = AppStatusStore.createLiveStore(conf, appStatusSource)
listenerBus.addToStatusQueue(_statusStore.listener.get)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this, _statusStore)
_progressBar =
if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.get(UI_ENABLED)) {
Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "",
startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Performance optimization: this dummy call to .size() triggers eager evaluation of
// Configuration's internal `properties` field, guaranteeing that it will be computed and
// cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create
// a new per-session Configuration. If `properties` has not been computed by that time
// then each newly-created Configuration will perform its own expensive IO and XML
// parsing to load configuration defaults and populate its own properties. By ensuring
// that we've pre-computed the parent's properties, the child Configuration will simply
// clone the parent's properties.
_hadoopConfiguration.size()
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(jar => addJar(jar, true))
if (addedJars.nonEmpty) {
_conf.set("spark.app.initial.jar.urls", addedJars.keys.toSeq.mkString(","))
}
}
if (files != null) {
files.foreach(file => addFile(file, false, true))
if (addedFiles.nonEmpty) {
_conf.set("spark.app.initial.file.urls", addedFiles.keys.toSeq.mkString(","))
}
}
if (archives != null) {
archives.foreach(file => addFile(file, false, true, isArchive = true))
if (addedArchives.nonEmpty) {
_conf.set("spark.app.initial.archive.urls", addedArchives.keys.toSeq.mkString(","))
}
}
_executorMemory = _conf.getOption(EXECUTOR_MEMORY.key)
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
_shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
_shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
_conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
}
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Initialize any plugins before the task scheduler is initialized.
_plugins = PluginContainer(this, _resources.asJava)
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
} else {
None
}
// create and start the heartbeater for collecting memory metrics
_heartbeater = new Heartbeater(
() => SparkContext.this.reportHeartBeat(_executorMetricsSource),
"driver-heartbeater",
conf.get(EXECUTOR_HEARTBEAT_INTERVAL))
_heartbeater.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = _taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
_applicationAttemptId.foreach { attemptId =>
_conf.set(APP_ATTEMPT_ID, attemptId)
_env.blockManager.blockStoreClient.setAppAttemptId(attemptId)
}
if (_conf.get(UI_REVERSE_PROXY)) {
val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL.key, "").stripSuffix("/") +
"/proxy/" + _applicationId
System.setProperty("spark.ui.proxyBase", proxyUrl)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
_cleaner =
if (_conf.get(CLEANER_REFERENCE_TRACKING)) {
Some(new ContextCleaner(this, _shuffleDriverComponents))
} else {
None
}
_cleaner.foreach(_.start())
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
cleaner = cleaner, resourceProfileManager = resourceProfileManager))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// After application started, attach handlers to started server and start handler.
_ui.foreach(_.attachAllHandler())
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
try {
stop()
} catch {
case e: Throwable =>
logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e)
}
}
// Post init
_taskScheduler.postStartHook()
if (isLocal) {
_env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly)
}
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_env.metricsSystem.registerSource(new JVMCPUSource())
_executorMetricsSource.foreach(_.register(_env.metricsSystem))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
appStatusSource.foreach(_env.metricsSystem.registerSource(_))
_plugins.foreach(_.registerMetrics(applicationId))
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
env.blockManager.master.getExecutorEndpointRef(executorId) match {
case Some(endpointRef) =>
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
case None =>
logWarning(s"Executor $executorId might already have stopped and " +
"can not request thread dump from it.")
None
}
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties): Unit = {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String,
description: String, interruptOnCancel: Boolean = false): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty)
val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet
if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, nonEmptyRdds)
} else {
new UnionRDD(this, nonEmptyRdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Option(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
addFile(path, false, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* :: Experimental ::
* Add an archive to be downloaded and unpacked with this Spark job on every node.
*
* If an archive is added during execution, it will not be available until the next TaskSet
* starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(paths-to-files)` to find its download/unpacked location.
* The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*
* @since 3.1.0
*/
@Experimental
def addArchive(path: String): Unit = {
addFile(path, false, false, isArchive = true)
}
/**
* :: Experimental ::
* Returns a list of archive paths that are added to resources.
*
* @since 3.1.0
*/
@Experimental
def listArchives(): Seq[String] = addedArchives.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
addFile(path, recursive, false)
}
private def addFile(
path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false
): Unit = {
val uri = Utils.resolveURI(path)
val schemeCorrectedURI = uri.getScheme match {
case null => new File(path).getCanonicalFile.toURI
case "local" =>
logWarning(s"File with 'local' scheme $path is not supported to add to file server, " +
s"since it is already available on every node.")
return
case _ => uri
}
val hadoopPath = new Path(schemeCorrectedURI)
val scheme = schemeCorrectedURI.getScheme
if (!Array("http", "https", "ftp").contains(scheme) && !isArchive) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else if (uri.getScheme == null) {
schemeCorrectedURI.toString
} else {
uri.toString
}
val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis
if (!isArchive && addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
} else if (
isArchive &&
addedArchives.putIfAbsent(
UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString,
timestamp).isEmpty) {
logInfo(s"Added archive $path at $key with timestamp $timestamp")
// If the scheme is file, use URI to simply copy instead of downloading.
val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key)
val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build()
val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf,
hadoopConfiguration, timestamp, useCache = false, shouldUntar = false)
val dest = new File(
SparkFiles.getRootDirectory(),
if (uri.getFragment != null) uri.getFragment else source.getName)
logInfo(
s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}")
Utils.deleteRecursively(dest)
Utils.unpack(source, dest)
postEnvironmentUpdate()
} else {
logWarning(s"The path $path has been added already. Overwriting of added paths " +
"is not supported in the current version.")
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.addToSharedQueue(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
Nil
}
}
/**
* Get the max number of tasks that can be concurrent launched based on the ResourceProfile
* could be used, even if some of them are being used at the moment.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @param rp ResourceProfile which to use to calculate max concurrent tasks.
* @return The max number of tasks that can be concurrent launched currently.
*/
private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = {
schedulerBackend.maxNumConcurrentTasks(rp)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions. This applies to the default ResourceProfile.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
// this is being applied to the default resource profile, would need to add api to support
// others
val defaultProfId = resourceProfileManager.defaultResourceProfile.id
b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors),
immutable.Map(localityAwareTasks -> defaultProfId),
immutable.Map(defaultProfId -> hostToLocalTaskCount))
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* This is not supported when dynamic allocation is turned on.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
require(executorAllocationManager.isEmpty,
"killExecutors() unsupported with Dynamic Allocation turned on")
b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the block manager to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId))
rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0)
rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L)
rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L)
}
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]): Unit = {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
*
* If a jar is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
addJar(path, false)
}
private def addJar(path: String, addedOnSubmit: Boolean): Unit = {
def addLocalJarFile(file: File): Seq[String] = {
try {
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
Seq(env.rpcEnv.fileServer.addJar(file))
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
Nil
}
}
def checkRemoteJarFile(path: String): Seq[String] = {
val hadoopPath = new Path(path)
val scheme = hadoopPath.toUri.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
try {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Jar ${path} not found")
}
if (fs.getFileStatus(hadoopPath).isDirectory) {
throw new IllegalArgumentException(
s"Directory ${path} is not allowed for addJar")
}
Seq(path)
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
Nil
}
} else {
Seq(path)
}
}
if (path == null || path.isEmpty) {
logWarning("null or empty path specified as parameter to addJar")
} else {
val (keys, scheme) = if (path.contains("\\\\") && Utils.isWindows) {
// For local paths with backslashes on Windows, URI throws an exception
(addLocalJarFile(new File(path)), "local")
} else {
val uri = Utils.resolveURI(path)
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
val uriScheme = uri.getScheme
val jarPaths = uriScheme match {
// A JAR file which exists only on the driver node
case null =>
// SPARK-22585 path without schema is not url encoded
addLocalJarFile(new File(uri.getPath))
// A JAR file which exists only on the driver node
case "file" => addLocalJarFile(new File(uri.getPath))
// A JAR file which exists locally on every worker node
case "local" => Seq("file:" + uri.getPath)
case "ivy" =>
// Since `new Path(path).toUri` will lose query information,
// so here we use `URI.create(path)`
DependencyUtils.resolveMavenDependencies(URI.create(path))
.flatMap(jar => addLocalJarFile(new File(jar)))
case _ => checkRemoteJarFile(path)
}
(jarPaths, uriScheme)
}
if (keys.nonEmpty) {
val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis
val (added, existed) = keys.partition(addedJars.putIfAbsent(_, timestamp).isEmpty)
if (added.nonEmpty) {
val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI"
logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp")
postEnvironmentUpdate()
}
if (existed.nonEmpty) {
val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI"
logInfo(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." +
" Overwriting of added jar is not supported in the current version.")
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop SparkContext within listener bus thread.")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
if (listenerBus != null) {
Utils.tryLogNonFatalError {
postApplicationEnd()
}
}
Utils.tryLogNonFatalError {
_driverLogger.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_plugins.foreach(_.shutdown())
}
FallbackStorage.cleanUp(_conf, _hadoopConfiguration)
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_heartbeater != null) {
Utils.tryLogNonFatalError {
_heartbeater.stop()
}
_heartbeater = null
}
if (_shuffleDriverComponents != null) {
Utils.tryLogNonFatalError {
_shuffleDriverComponents.cleanupApplication()
}
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
if (_statusStore != null) {
_statusStore.close()
}
// Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this
// `SparkContext` is stopped.
localProperties.remove()
ResourceProfile.clearDefaultProfile()
// Unset YARN mode system env variable, to allow switching between cluster types.
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String): Unit = {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite): Unit = {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite(): Unit = {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit): Unit = {
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit): Unit = {
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String): Unit = {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI
* or through SparkListener.onTaskStart.
*
* @param taskId the task ID to kill. This id uniquely identifies the task attempt.
* @param interruptThread whether to interrupt the thread running the task.
* @param reason the reason for killing the task, which should be a short string. If a task
* is killed multiple times with different reasons, only one reason will be reported.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(
taskId: Long,
interruptThread: Boolean = true,
reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = {
dagScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Clean a closure to make it ready to be serialized and sent to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String): Unit = {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
try {
conf.get(EXTRA_LISTENERS).foreach { classNames =>
val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf)
listeners.foreach { listener =>
listenerBus.addToSharedQueue(listener)
logInfo(s"Registered listener ${listener.getClass().getName()}")
}
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this, _env.metricsSystem)
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart(): Unit = {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}
/** Post the application end event */
private def postApplicationEnd(): Unit = {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate(): Unit = {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val addedArchivePaths = addedArchives.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration,
schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Reports heartbeat metrics for the driver. */
private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = {
val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager)
executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics))
val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics]
// In the driver, we do not track per-stage metrics, so use a dummy stage for the key
driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics))
val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0)
listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates,
driverUpdates))
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if another thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." +
s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}"
throw new SparkException(errMsg)
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext should be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* Called to ensure that SparkContext is created or accessed only on the Driver.
*
* Throws an exception if a SparkContext is about to be created in executors.
*/
private def assertOnDriver(): Unit = {
if (Utils.isInRunningSparkTask) {
// we're accessing it during task execution, fail.
throw new IllegalStateException(
"SparkContext should only be created and accessed on the driver.")
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config))
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext())
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T])
: ArrayWritable = {
def anyToWritable[U <: Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String): Int = {
numDriverCores(master, null)
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String, conf: SparkConf): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) =>
if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") {
conf.getInt(DRIVER_CORES.key, 0)
} else {
0
}
case _ => 0 // Either driver is not being used, or its core count will be interpolated later
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
// Ensure that default executor's resources satisfies one or more tasks requirement.
// This function is for cluster managers that don't set the executor cores config, for
// others its checked in ResourceProfile.
def checkResourcesPerTask(executorCores: Int): Unit = {
val taskCores = sc.conf.get(CPUS_PER_TASK)
if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) {
validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores)
}
val defaultProf = sc.resourceProfileManager.defaultResourceProfile
ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores))
}
master match {
case "local" =>
checkResourcesPerTask(1)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) =>
checkResourcesPerTask(coresPerWorker.toInt)
// Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang.
val memoryPerWorkerInt = memoryPerWorker.toInt
if (sc.executorMemory > memoryPerWorkerInt) {
throw new SparkException(
"Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format(
memoryPerWorkerInt, sc.executorMemory))
}
// For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED
// to false because this mode is intended to be used for testing and in this case all the
// executors are running on the same host. So if host local reading was enabled here then
// testing of the remote fetching would be secondary as setting this config explicitly to
// false would be required in most of the unit test (despite the fact that remote fetching
// is much more frequent in production).
sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false)
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = LocalSparkCluster(
numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
/**
* This is a helper function to complete the missing S3A magic committer configurations
* based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled`
*/
private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = {
val magicCommitterConfs = conf
.getAllWithPrefix("spark.hadoop.fs.s3a.bucket.")
.filter(_._1.endsWith(".committer.magic.enabled"))
.filter(_._2.equalsIgnoreCase("true"))
if (magicCommitterConfs.nonEmpty) {
// Try to enable S3 magic committer if missing
conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true")
if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) {
conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic")
conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a",
"org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory")
conf.setIfMissing("spark.sql.parquet.output.committer.class",
"org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter")
conf.setIfMissing("spark.sql.sources.commitProtocolClass",
"org.apache.spark.internal.io.cloud.PathOutputCommitProtocol")
}
}
}
/**
* SPARK-36796: This is a helper function to supplement `--add-opens` options to
* `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`.
*/
private def supplementJavaModuleOptions(conf: SparkConf): Unit = {
def supplement(key: OptionalConfigEntry[String]): Unit = {
val v = conf.get(key) match {
case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts"
case None => JavaModuleOptions.defaultModuleOptions()
}
conf.set(key.key, v)
}
supplement(DRIVER_JAVA_OPTIONS)
supplement(EXECUTOR_JAVA_OPTIONS)
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
// Regular expression for connecting to kubernetes clusters
val KUBERNETES_REGEX = """k8s://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
// The following implicit declarations have been added on top of the very similar ones
// below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta
// expansion of zero-arg methods and thus won't match a no-arg method where it expects
// an implicit that is a function of no args.
implicit val intWritableConverterFn: () => WritableConverter[Int] =
() => simpleWritableConverter[Int, IntWritable](_.get)
implicit val longWritableConverterFn: () => WritableConverter[Long] =
() => simpleWritableConverter[Long, LongWritable](_.get)
implicit val doubleWritableConverterFn: () => WritableConverter[Double] =
() => simpleWritableConverter[Double, DoubleWritable](_.get)
implicit val floatWritableConverterFn: () => WritableConverter[Float] =
() => simpleWritableConverter[Float, FloatWritable](_.get)
implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] =
() => simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit val stringWritableConverterFn: () => WritableConverter[String] =
() => simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] =
() => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
// These implicits remain included for backwards-compatibility. They fulfill the
// same role as those above.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
|
vinodkc/spark
|
core/src/main/scala/org/apache/spark/SparkContext.scala
|
Scala
|
apache-2.0
| 131,049
|
package model
import com.wordnik.swagger.annotations.{ ApiModelProperty, ApiModel }
import spray.json.DefaultJsonProtocol
import scala.annotation.meta.field
/**
* Created by gneotux on 16/07/15.
*/
@ApiModel(description = "An Company entity")
case class Company(
@(ApiModelProperty @field)(value = "unique identifier for the company")
id: Int,
@(ApiModelProperty @field)(value = "name of the company")
name: String,
@(ApiModelProperty @field)(value = "email for the contact of the company")
email: String,
@(ApiModelProperty @field)(value = "phone number for the contact of the company")
phone: Option[String] = None,
@(ApiModelProperty@field)(value = "company's description")
description: Option[String] = None,
@(ApiModelProperty@field)(value = "company's website")
website: Option[String] = None,
@(ApiModelProperty@field)(value = "company's logo url")
logoUrl: Option[String] = None
)
object Company extends DefaultJsonProtocol{
implicit val companyFormat = jsonFormat7(Company.apply)
}
|
Gneotux/pfc
|
src/main/scala/model/Company.scala
|
Scala
|
apache-2.0
| 1,031
|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.initrepository
import java.net.URL
import play.api.libs.json.JsValue
import play.api.libs.ws.ning.{NingAsyncHttpClientConfigBuilder, NingWSClient, NingWSClientConfig}
import play.api.libs.ws.{WSAuthScheme, WSRequest}
class HttpTransport(username: String, password: String) {
private val ws = new NingWSClient(new NingAsyncHttpClientConfigBuilder(new NingWSClientConfig()).build())
def close() = {
ws.close()
Log.debug("closing http client")
}
private def expandQueryParam(param: String) = {
val pair = param.split("=")
pair.head -> pair.last
}
private def applyBody(body: Option[JsValue])(req: WSRequest): WSRequest =
body
.map { b =>
req.withBody(b)
}
.getOrElse(req)
private def applyQueryParams(url: URL)(req: WSRequest): WSRequest =
Option(url.getQuery) match {
case Some(query: String) => req.withQueryString(query.split("&") map expandQueryParam: _*)
case _ => req
}
def buildJsonCall(method: String, url: URL, body: Option[JsValue] = None): WSRequest = {
val urlWithoutQuery = url.toString.split('?').head
val req = ws
.url(urlWithoutQuery)
.withMethod(method)
.withFollowRedirects(false)
Function.chain(
Seq(
applyBody(body) _,
applyQueryParams(url) _
))(req)
}
def buildJsonCallWithAuth(method: String, url: URL, body: Option[JsValue] = None): WSRequest =
buildJsonCall(method, url, body).withAuth(username, password, WSAuthScheme.BASIC)
}
|
hmrc/init-repository
|
src/main/scala/uk/gov/hmrc/initrepository/HttpTransport.scala
|
Scala
|
apache-2.0
| 2,152
|
package org.bitcoins.script.control
import org.bitcoins.script.arithmetic.OP_ADD
import org.bitcoins.script.bitwise.OP_EQUAL
import org.bitcoins.script.constant.{OP_2, OP_1, OP_0}
import org.scalatest.{MustMatchers, FlatSpec}
/**
* Created by chris on 1/6/16.
*/
class ControlOperationsTest extends FlatSpec with MustMatchers {
"ControlOperations" must "define an OP_IF" in {
OP_IF.opCode must be (99)
}
it must "define an OP_NOTIF" in {
OP_NOTIF.opCode must be (100)
}
it must "define an OP_ELSE" in {
OP_ELSE.opCode must be (103)
}
it must "define an OP_ENDIF" in {
OP_ENDIF.opCode must be (104)
}
it must "define an OP_VERIFY" in {
OP_VERIFY.opCode must be (105)
}
it must "define an OP_RETURN" in {
OP_RETURN.opCode must be (106)
}
}
|
Christewart/scalacoin
|
src/test/scala/org/bitcoins/script/control/ControlOperationsTest.scala
|
Scala
|
mit
| 799
|
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.la
import cc.factorie._
import cc.factorie.util._
trait Tensor1 extends Tensor {
tensor1 =>
def dim1: Int
def activeDomain1: IntSeq = activeDomain
def activeDomain: IntSeq
def numDimensions: Int = 1
def activeDomains = Array(activeDomain1)
def dimensions = Array(dim1)
override def dimensionsMatch(t:Tensor): Boolean = t match {
case t:Tensor1 => t.dim1 == dim1
case _ => false
}
override def ensureDimensionsMatch(t:Tensor): Unit = t match {
case t:Tensor1 => require(t.dim1 == dim1)
case _ => throw new Error("Tensor ranks do not match.")
}
// FIXME: should "activeDomains" be a "def" there or a "val"?
def reshape(dim: Array[Int]) : Tensor = {
assert(dim.fold(1)((a,b) => a*b) == dim1)
val self = this
new Tensor with ReadOnlyTensor with SparseDoubleSeq {
def foreachActiveElement(f: (Int, Double) => Unit) = self.foreachActiveElement(f)
def activeDomainSize = self.activeDomainSize
def dimensions = dim
def activeDomain = tensor1.activeDomain
def apply(i: Int) = tensor1(i)
def length = tensor1.length
def isDense = tensor1.isDense
def numDimensions = dimensions.length
def dot(s: DoubleSeq) = self.dot(s)
def activeDomains = dimensions.map(d => new RangeIntSeq(0, d)).toArray
def copy: Tensor = throw new Error("Method copy not defined on class "+getClass.getName)
def blankCopy: Tensor = throw new Error("Method blankCopy not defined on class "+getClass.getName)
}
}
override def *(f: Double): Tensor1 = super.*(f).asInstanceOf[Tensor1]
override def /(f: Double): Tensor1 = super./(f).asInstanceOf[Tensor1]
def +(t: Tensor1): Tensor1 = super.+(t).asInstanceOf[Tensor1]
def -(t: Tensor1): Tensor1 = super.-(t).asInstanceOf[Tensor1]
// TODO: * could be either dot or outer. Resolve 1xN vs Nx1 status of Tensor1
// I think it should mean * since that is consistent with matrix-vector being "*" -luke
// def *(t: Tensor1): Double = this dot t
//... or it could be Hadamard product
def *(t: Tensor2): Tensor1 = t.leftMultiply(this)
@inline final def length: Int = dim1
override def copy: Tensor1 = throw new Error("Method copy not defined on class "+getClass.getName)
override def blankCopy: Tensor1 = throw new Error("Method blankCopy not defined on class "+getClass.getName)
override def stringPrefix = "Tensor1"
}
object Tensor1 {
def apply(values:Double*): DenseTensor1 = new DenseTensor1(values.toArray)
}
trait DenseTensorLike1 extends Tensor1 with DenseTensor {
//def activeDomain = new RangeIntSeq(0, dim1)
//override def activeDomain = activeDomain1
override def dot(t:DoubleSeq): Double = t match {
//case t:SingletonBinaryTensor => apply(t.singleIndex)
//case t:SingletonTensor => apply(t.singleIndex) * t.singleValue
//case t:DenseTensorLike1 => Tensor.dot(this, t)
case t:SparseBinaryTensorLike1 => t dot this
case t:SparseIndexedTensor1 => t dot this
case t:DoubleSeq => super.dot(t)
}
override def +=(t:DoubleSeq, f:Double): Unit = t match {
//case t:SingletonBinaryTensorLike1 => __values(t.singleIndex) += 1.0
//case t:SingletonTensor1 => __values(t.singleIndex) += t.singleValue
//case t:SparseBinaryTensorLike1 => t.=+(_values, f)
case t:SparseIndexedTensor1 => t.=+(_values, f)
case t:DoubleSeq => super.+=(t, f)
}
}
class DenseTensor1(val dim1:Int) extends DenseTensorLike1 {
def this(t:DoubleSeq) = { this(t.length); this := t }
def this(a:Array[Double]) = { this(a.length); this := a }
def this(dim1:Int, fillValue:Double) = { this(dim1); java.util.Arrays.fill(_values, fillValue) }
override def copy: DenseTensor1 = { val c = new DenseTensor1(dim1); System.arraycopy(_values, 0, c._values, 0, length); c }
override def blankCopy: DenseTensor1 = new DenseTensor1(dim1)
// TODO I added it, but I'm not sure we should we have this method, or whether we should implement it in the Tensor trait.
// See also comments about "def *" in Tensor1 above. -akm
/** Return the Hadamard product */
def *(t:DenseTensor1): DenseTensor1 = {
val result = this.copy // TODO We should arrange to get rid of this cast.
val a = result.asArray
val b = t.asArray
val len = length; var i = 0; while (i < len) { a(i) *= b(i); i += 1 }
result
}
}
// TODO Consider something like the following for Scala 2.10:
// implicit class DenseTensor1(override val asArray:Array[Double]) extends DenseTensorLike1 {
// _setArray(asArray)
// }
class GrowableDenseTensor1(initialSize:Int) extends { private var _dim1 = initialSize } with DenseTensorLike1 {
def dim1: Int = _dim1
override def apply(index:Int):Double = if (index < _valuesSize) _values(index) else 0.0
override def ensureDimensionsMatch(t:Tensor): Unit = t match {
case t:Tensor1 => ensureDimensions(t.dim1)
case _ => super.ensureDimensionsMatch(t)
}
def ensureDimensions(d1:Int): Unit = {
if (d1 > _dim1) {
if (d1 > _valuesSize) {
val newSize = math.max(_valuesSize * 2, d1)
val oldValues = _values
_resetValues(newSize) // allocates a new array of size newSize
Array.copy(oldValues, 0, _values, 0, oldValues.size)
if (defaultValue != 0.0) java.util.Arrays.fill(_values, oldValues.size, newSize, defaultValue)
}
_dim1 = d1
}
}
// Currently these two are the only methods that support capacity expansion
override def +=(index:Int, incr:Double): Unit = {
ensureDimensions(index+1)
super.+=(index, incr)
}
override def +=(t:DoubleSeq, f:Double): Unit = t match {
case t:SingletonBinaryTensor1 => +=(t.singleIndex, f)
case t:SingletonTensor1 => +=(t.singleIndex, f * t.singleValue)
case t:SparseBinaryTensorLike1 => { ensureDimensions(t.maxIndex+1); t.=+(_values, f) }
case t:DenseTensorLike1 => { ensureDimensions(t.length); super.+=(t, f) }
case t:SparseIndexedTensor1 => { ensureDimensions(t.length); super.+=(t, f) }
case t:UniformTensor1 => { ensureDimensions(t.length); super.+=(t, f) } //val len = length; val u = t.uniformValue * f; var i = 0; while (i < len) { __values(i) += u; i += 1 }
case _ => {t.foreachActiveElement((i, d) => +=(i,d*f))}
}
override def copy: GrowableDenseTensor1 = { val c = new GrowableDenseTensor1(_dim1); c := this; c }
override def blankCopy: GrowableDenseTensor1 = new GrowableDenseTensor1(_dim1)
}
class ProxyGrowableDenseTensor1(val sizeProxy:Iterable[Any]) extends GrowableDenseTensor1(sizeProxy.size) {
override def dim1 = math.max(super.dim1, sizeProxy.size)
override def copy: ProxyGrowableDenseTensor1 = { val c = new ProxyGrowableDenseTensor1(sizeProxy); c := this; c }
override def blankCopy: GrowableDenseTensor1 = new ProxyGrowableDenseTensor1(sizeProxy)
}
/** A Tensor representation of a single scalar (Double) value */
class ScalarTensor(var singleValue:Double) extends Tensor1 with DenseDoubleSeq {
def forallActiveElements(f: (Int, Double) => Boolean) = forallElements(f)
def dim1 = 1
def activeDomainSize = activeDomain.size
def activeDomain = new SingletonIntSeq(0)
def isDense = false
def update(i: Int, v: Double) = if (i == 0) { singleValue = v} else throw new Error
def dot(s: DoubleSeq) = if (s.length == 1) { singleValue*s(0)} else throw new Error
def +=(i: Int, v: Double) = if (i == 0) { singleValue += v} else throw new Error
def +=(t: Tensor, v: Double) = if (t.length == 1) { singleValue += v*t(0)} else throw new Error
def zero() = singleValue = 0
def apply(i:Int): Double = if (i == 0) singleValue else throw new Error
}
/** A one-dimensional one-hot Tensor. */
class SingletonTensor1(val dim1:Int, val singleIndex:Int, val singleValue:Double) extends SingletonIndexedTensor with Tensor1 {
def activeDomain = new SingletonIntSeq(singleIndex)
}
/** A one-dimensional one-hot Tensor with hot value 1.0. */
trait SingletonBinaryTensorLike1 extends Tensor1 with SingletonBinaryTensor {
def activeDomain = new SingletonIntSeq(singleIndex)
}
/** A one-dimensional one-hot Tensor with hot value 1.0. */
class SingletonBinaryTensor1(val dim1:Int, var singleIndex:Int) extends SingletonBinaryTensorLike1 {
override def copy: SingletonBinaryTensor1 = new SingletonBinaryTensor1(dim1, singleIndex)
}
/** A one-dimensional one-hot Tensor with hot value 1.0. */
class GrowableSingletonBinaryTensor1(val sizeProxy:Iterable[Any], var singleIndex:Int) extends SingletonBinaryTensorLike1 {
def dim1 = sizeProxy.size
}
/** A Tensor1 of arbitrary fixed length whose value at all indices is uniformValue. */
class UniformTensor1(val dim1:Int, var uniformValue:Double) extends Tensor1 with UniformTensor {
def activeDomain = new RangeIntSeq(0, dim1)
override def copy = new UniformTensor1(dim1, uniformValue)
override def +(t:Tensor): Tensor = t match {
case t:UniformTensor1 => new UniformTensor1(dim1, uniformValue + t.uniformValue)
case t:Tensor1 => new DenseTensor1(dim1, uniformValue) + t
}
override def *=(d: Double) = uniformValue *= d
}
/** A Tensor1 of arbitrary fixed length containing all 1.0. */
class UnaryTensor1(dim1:Int) extends UniformTensor1(dim1, 1.0) {
override def copy = new UnaryTensor1(dim1)
}
/** A Tensor1 of mutable increasing length whose value at all indices is uniformValue. */
class GrowableUniformTensor1(val sizeProxy:Iterable[Any], val uniformValue:Double) extends UniformTensor with Tensor1 {
def activeDomain = new RangeIntSeq(0, dim1)
//def activeDomain = activeDomain1
def dim1 = sizeProxy.size
override def copy = new GrowableUniformTensor1(sizeProxy, uniformValue)
}
trait SparseBinaryTensorLike1 extends Tensor1 with ArraySparseBinaryTensor { }
class SparseBinaryTensor1(val dim1:Int) extends SparseBinaryTensorLike1 {
def this(t:Tensor) = { this(t.length); throw new Error("Not yet implemented.") }
override def blankCopy: SparseBinaryTensor1 = new SparseBinaryTensor1(dim1)
}
class GrowableSparseBinaryTensor1(val sizeProxy:Iterable[Any]) extends SparseBinaryTensorLike1 {
def dim1: Int = sizeProxy.size
override def blankCopy: GrowableSparseBinaryTensor1 = new GrowableSparseBinaryTensor1(sizeProxy)
override def copy = {
val newT = new GrowableSparseBinaryTensor1(sizeProxy)
this.foreachActiveElement((i, v) => newT(i) = v)
newT
}
}
// Just aliases
class SparseTensor1(dim1:Int) extends SparseIndexedTensor1(dim1)
class GrowableSparseTensor1(sizeProxy:Iterable[Any]) extends GrowableSparseIndexedTensor1(sizeProxy)
trait SparseHashTensorLike extends Tensor with SparseDoubleSeq {
self =>
def isDense = false
var default:Double = 0.0
private val h = new scala.collection.mutable.HashMap[Int,Double] { override def default(index:Int) = self.default }
def apply(index:Int) = h(index)
override def update(index:Int, value:Double) = {
assert(index < length, "index %d should be less than length %d".format(index, length))
if(value == default) h.remove(index)
else h(index) = value
}
override def activeElements = h.iterator
override def activeDomainSize = h.size
def activeDomain: IntSeq = new SeqIntSeq(h.keys.toIndexedSeq) // TODO This is currently really inefficient
//def activeDomain = activeDomain1
override def foreachActiveElement(f: (Int,Double)=>Unit): Unit = h.foreach(t => f(t._1, t._2))
override def +=(index:Int, incr:Double): Unit = {
assert(index < length, "index %d should be less than length %d".format(index, length))
val newCt = h(index) + incr
if (newCt == 0.0)
h.remove(index)
else
h(index) = newCt
}
override def zero(): Unit = h.clear()
override def dot(v:DoubleSeq): Double = v match {
case t:SparseBinaryTensor1 => t dot this
case v:TensorTimesScalar => v dot this
case v:SingletonBinaryTensor1 => v dot this
case v:SingletonTensor1 => v dot this
case sv:SparseHashTensorLike => {
var result = 0.0
if (v.size > this.size) activeElements.foreach({case(index,value) => result += sv(index) * value})
else sv.activeElements.foreach({case(index,value) => result += h(index) * value})
result
}
case dv:DoubleSeq => {
var result = 0.0
h.iterator.foreach({case(index,value) => result += dv(index) * value})
result
}
}
def +=(v:Tensor1): Unit = v.foreachActiveElement({case(index,value) => +=(index, value)}) //h.update(index, h(index) + value)})
override def +=(s:Double): Unit = {
default += s
h.keys.foreach(index => +=(index, s)) //h.update(index, h(index) + s))
}
}
/** A Tensor1 that may contain mostly zeros, with a few arbitrary non-zeros, represented compactly in memory,
implemented as a HashMap from Int indices to Double values.
@author Andrew McCallum */
class SparseHashTensor1(val dim1:Int) extends SparseHashTensorLike with Tensor1
/** Growable Version of SparseHashTensor
@author Dirk Weissenborn */
class GrowableSparseHashTensor1(val sizeProxy:Iterable[Any]) extends SparseHashTensorLike with Tensor1 {
def dim1 = sizeProxy.size
}
trait Tensor1ElementIterator extends DoubleSeqIterator with Iterator[Tensor1ElementIterator] {
def index: Int
def value: Double
}
class SparseIndexedTensor1(val dim1:Int) extends Tensor1 with ArraySparseIndexedTensor {
def activeElements1: Tensor1ElementIterator = {
_makeReadable()
new Tensor1ElementIterator { // Must not change _indexs and _values during iteration!
var i = 0
def hasNext = i < _unsafeActiveDomainSize
def index = _indices(i-1)
def value = _values(i-1)
def next() = { i += 1; this }
}
}
override def blankCopy: SparseIndexedTensor1 = new SparseIndexedTensor1(dim1)
override def copy: SparseIndexedTensor1 = { val t = new SparseIndexedTensor1(dim1); this.copyInto(t); t }
}
class GrowableSparseIndexedTensor1(val sizeProxy:Iterable[Any]) extends Tensor1 with ArraySparseIndexedTensor {
def dim1 = sizeProxy.size
def activeElements1: Tensor1ElementIterator = {
_makeReadable()
new Tensor1ElementIterator { // Must not change _indexs and _values during iteration!
var i = 0
def hasNext = i < _unsafeActiveDomainSize
def index = _indices(i-1)
def value = _values(i-1)
def next() = { i += 1; this }
}
}
override def blankCopy: GrowableSparseIndexedTensor1 = new GrowableSparseIndexedTensor1(sizeProxy)
override def copy: GrowableSparseIndexedTensor1 = { val t = new GrowableSparseIndexedTensor1(sizeProxy); this.copyInto(t); t }
}
//// TODO Pull this out into SparseIndexedTensor
//class SparseIndexedTensor1b(len:Int) extends Tensor1 {
// def this(sizeProxy:Iterable[Any]) = { this(-1); _sizeProxy = sizeProxy }
// def isDense = false
// private val _length: Int = len
// private var _sizeProxy: Iterable[Any] = null
// private var __values: Array[Double] = new Array[Double](4)
// private var __indexs: Array[Int] = new Array[Int](4) // the indices, in order corresponding to _values
// private var _positions: Array[Int] = null // a dense array containing the index into _indices and _values; not yet implemented
// private var _npos = 0 // the number of positions in _values and _indices that are actually being used
// private var _sorted = 0 // The number of positions in _values & _indices where indices are sorted; if _sorted == _npos then ready for use
// private def setCapacity(cap:Int): Unit = {
// assert(cap >= _npos)
// val newInd = new Array[Int](cap)
// val newVal = new Array[Double](cap)
// System.arraycopy(__indexs, 0, newInd, 0, _npos)
// System.arraycopy(__values, 0, newVal, 0, _npos)
// __indexs = newInd; __values = newVal
// }
// private def ensureCapacity(cap:Int): Unit = if (__indexs.length < cap) setCapacity(math.max(cap, __indexs.length + __indexs.length/2))
// def _values = __values
// def _indices = __indexs
// def trim: Unit = setCapacity(_npos)
// def dim1: Int = if (_length < 0) _sizeProxy.size else _length
// override def activeDomainSize: Int = { makeReadable; _npos }
// def activeDomain: IntSeq = { makeReadable ; new TruncatedArrayIntSeq(__indexs, _npos) } // TODO Consider making more efficient
// //def activeDomain = activeDomain1
// override def foreachActiveElement(f:(Int,Double)=>Unit): Unit = { var i = 0; while (i < _npos) { f(__indexs(i), __values(i)); i += 1 } }
// override def activeElements: Iterator[(Int,Double)] = {
// makeReadable
// new Iterator[(Int,Double)] { // Must not change _indexs and _values during iteration!
// var i = 0
// def hasNext = i < _npos
// def next = { i += 1 ; (__indexs(i-1), __values(i-1)) }
// }
// }
// override def zero(): Unit = _npos = 0
// override def sum: Double = { var s = 0.0; var i = 0; while (i < _npos) { s += __values(i); i += 1 }; s }
//
// /** Return the position at which index occurs, or -1 if index does not occur. */
// def position(index:Int): Int = {
// makeReadable
// var i = 0; var ii = 0
// while (i < _npos) { ii = __indexs(i); if (ii == index) return i else if (ii > index) return -1; i += 1 }
// //while (i < _npos) { if (_indexs(i) == index) return i; i += 1 }
// -1
// }
// def position(index:Int, start:Int): Int = { // Just linear search for now; consider binary search with memory of last position
// makeReadable
// var i = start; var ii = 0
// while (i < _npos) { ii = __indexs(i); if (ii == index) return i else if (ii > index) return -1; i += 1 }
// -1
// }
//
// def apply(index:Int): Double = {
// // makeReadable is called in this.position
// val pos = position(index)
// if (pos < 0) 0.0 else __values(pos)
// }
//
// override def dot(v:DoubleSeq): Double = {
// makeReadable
// v match {
// case v:SingletonBinaryTensor1 => apply(v.singleIndex)
// case v:SingletonTensor1 => apply(v.singleIndex) * v.singleValue
// case v:SparseIndexedTensor1b => {
// val v1 = if (this._npos < v._npos) this else v
// val v2 = if (v._npos< this._npos) v else this
// var i = 0; var j = -1; var j2 = 0
// var result = 0.0
// while (i < v1._npos) {
// j2 = v2.position(v1.__indexs(i), j+1)
// if (j2 >= 0) { result += v1.__values(i) * v2.__values(j2); j = j2 }
// i += 1
// }
// result
// }
// case v:DoubleSeq => { var result = 0.0; var p = 0; while (p < _npos) { result += v(__indexs(p)) * __values(p); p += 1 }; result }
// }
// }
//
// // Consider using bit shifting and only one array for this!
// // How many bits are in the mantissa of a Double? Enough to also keep the index?
//
// // Sort _indexs & _values between start and end; does not modify positions outside that range.
// // Return the number of duplicate indices.
// @inline private def sort(start:Int, end:Int): Int = {
// throw new Error("Not yet implemented")
// var cp = start
// while (cp < end) {
// val ci = __indexs(cp)
// val cv = __values(cp)
// var i = cp - 1
// while (i >= 0 && __indexs(i) >= ci) {
// val tmpi =
// i -= 1
// }
// }
// 0
// }
//
// override def toString = "SparseIndexedTensor1 npos="+_npos+" sorted="+_sorted+" ind="+__indexs.mkString(",")+" val="+__values.mkString(",")
//
// @inline private def makeReadable: Unit = {
// var cp = _sorted // "current position", the position next to be placed into sorted order
// while (cp < _npos) {
// //println("cp="+cp)
// val ci = __indexs(cp) // "current index", the index next to be placed into sorted order.
// val cv = __values(cp) // "current value"
// var i = _sorted - 1
// //println("i="+i)
// // Find the position at which the current index/value belongs
// while (i >= 0 && __indexs(i) >= ci) i -= 1
// i += 1
// // Put it there, shifting to make room if necessary
// //println("Placing at position "+i)
// if (__indexs(i) == ci) { if (i != cp) __values(i) += cv else _sorted += 1 }
// else insert(i, ci, cv, incrementNpos=false, incrementSorted=true)
// //println("sorted="+_sorted)
// cp += 1
// }
// _npos = _sorted
// if (_npos * 1.5 > __values.length) trim
// }
//
// // Caller is responsible for making sure there is enough capacity
// @inline private def insert(position:Int, index:Int, value:Double, incrementNpos:Boolean, incrementSorted:Boolean): Unit = {
// if (_npos - position > 0) {
// System.arraycopy(__values, position, __values, position+1, _sorted-position)
// System.arraycopy(__indexs, position, __indexs, position+1, _sorted-position)
// }
// __indexs(position) = index
// __values(position) = value
// if (incrementNpos) _npos += 1
// if (incrementSorted) _sorted += 1
// }
//
// override def update(index:Int, value:Double): Unit = {
// val p = position(index)
// if (p >= 0) __values(p) = value
// else +=(index, value)
// }
// // Efficiently support multiple sequential additions
// override def +=(index:Int, incr:Double): Unit = {
// ensureCapacity(_npos+1)
// __indexs(_npos) = index
// __values(_npos) = incr
// _npos += 1
// }
//
// override def +=(s:Double): Unit = throw new Error("Method +=(Double) not defined on class "+getClass.getName)
// override def +=(t:DoubleSeq, f:Double): Unit = t match {
// case t:SingletonBinaryTensorLike1 => +=(t.singleIndex, f)
// case t:SingletonTensor1 => +=(t.singleIndex, f * t.singleValue)
// case t:SparseBinaryTensorLike1 => { val a = t.asIntArray; val len = a.length; var i = 0; while (i < len) { +=(a(i), f); i += 1 }}
// case t:SparseIndexedTensor1b => { val len = t._npos; var i = 0; while (i < len) { +=(t.__indexs(i), f * t.__values(i)); i += 1 }}
// case t:DenseTensor1 => { val l = t.length; var i = 0; while (i < l) { val v = t(i); if (v != 0.0) +=(i, f * v); i += 1 }}
// case t: TensorTimesScalar => this += (t.tensor, f * t.scalar)
// case _ => super.+=(t, f)
// }
// override def =+(a:Array[Double], offset:Int, f:Double): Unit = { var i = 0; while (i < _npos) { a(__indexs(i)+offset) += f * __values(i); i += 1 }}
//
// override def clone: SparseIndexedTensor1b = {
// val v: SparseIndexedTensor1b = if (_sizeProxy eq null) new SparseIndexedTensor1b(_length) else new SparseIndexedTensor1b(_sizeProxy)
// makeReadable
// v._npos = _npos
// v._sorted = _sorted
// v.__values = __values.clone
// v.__indexs = __indexs.clone
// // TODO Deal with _positions
// v
// }
//
//}
|
iesl/fuse_ttl
|
src/factorie-factorie_2.11-1.1/src/main/scala/cc/factorie/la/Tensor1.scala
|
Scala
|
apache-2.0
| 23,200
|
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package math
import java.util.Comparator
import scala.language.{implicitConversions, higherKinds}
/** Ordering is a trait whose instances each represent a strategy for sorting
* instances of a type.
*
* Ordering's companion object defines many implicit objects to deal with
* subtypes of AnyVal (e.g. Int, Double), String, and others.
*
* To sort instances by one or more member variables, you can take advantage
* of these built-in orderings using Ordering.by and Ordering.on:
*
* {{{
* import scala.util.Sorting
* val pairs = Array(("a", 5, 2), ("c", 3, 1), ("b", 1, 3))
*
* // sort by 2nd element
* Sorting.quickSort(pairs)(Ordering.by[(String, Int, Int), Int](_._2))
*
* // sort by the 3rd element, then 1st
* Sorting.quickSort(pairs)(Ordering[(Int, String)].on(x => (x._3, x._1)))
* }}}
*
* An Ordering[T] is implemented by specifying compare(a:T, b:T), which
* decides how to order two instances a and b. Instances of Ordering[T] can be
* used by things like scala.util.Sorting to sort collections like Array[T].
*
* For example:
*
* {{{
* import scala.util.Sorting
*
* case class Person(name:String, age:Int)
* val people = Array(Person("bob", 30), Person("ann", 32), Person("carl", 19))
*
* // sort by age
* object AgeOrdering extends Ordering[Person] {
* def compare(a:Person, b:Person) = a.age compare b.age
* }
* Sorting.quickSort(people)(AgeOrdering)
* }}}
*
* This trait and scala.math.Ordered both provide this same functionality, but
* in different ways. A type T can be given a single way to order itself by
* extending Ordered. Using Ordering, this same type may be sorted in many
* other ways. Ordered and Ordering both provide implicits allowing them to be
* used interchangeably.
*
* You can import scala.math.Ordering.Implicits to gain access to other
* implicit orderings.
*
* @author Geoffrey Washburn
* @version 0.9.5, 2008-04-15
* @since 2.7
* @see [[scala.math.Ordered]], [[scala.util.Sorting]]
*/
@annotation.implicitNotFound(msg = "No implicit Ordering defined for ${T}.")
trait Ordering[T] extends Comparator[T] with PartialOrdering[T] with Serializable {
outer =>
/** Returns whether a comparison between `x` and `y` is defined, and if so
* the result of `compare(x, y)`.
*/
def tryCompare(x: T, y: T) = Some(compare(x, y))
/** Returns an integer whose sign communicates how x compares to y.
*
* The result sign has the following meaning:
*
* - negative if x < y
* - positive if x > y
* - zero otherwise (if x == y)
*/
def compare(x: T, y: T): Int
/** Return true if `x` <= `y` in the ordering. */
override def lteq(x: T, y: T): Boolean = compare(x, y) <= 0
/** Return true if `x` >= `y` in the ordering. */
override def gteq(x: T, y: T): Boolean = compare(x, y) >= 0
/** Return true if `x` < `y` in the ordering. */
override def lt(x: T, y: T): Boolean = compare(x, y) < 0
/** Return true if `x` > `y` in the ordering. */
override def gt(x: T, y: T): Boolean = compare(x, y) > 0
/** Return true if `x` == `y` in the ordering. */
override def equiv(x: T, y: T): Boolean = compare(x, y) == 0
/** Return `x` if `x` >= `y`, otherwise `y`. */
def max(x: T, y: T): T = if (gteq(x, y)) x else y
/** Return `x` if `x` <= `y`, otherwise `y`. */
def min(x: T, y: T): T = if (lteq(x, y)) x else y
/** Return the opposite ordering of this one. */
override def reverse: Ordering[T] = new Ordering[T] {
override def reverse = outer
def compare(x: T, y: T) = outer.compare(y, x)
}
/** Given f, a function from U into T, creates an Ordering[U] whose compare
* function is equivalent to:
*
* {{{
* def compare(x:U, y:U) = Ordering[T].compare(f(x), f(y))
* }}}
*/
def on[U](f: U => T): Ordering[U] = new Ordering[U] {
def compare(x: U, y: U) = outer.compare(f(x), f(y))
}
/** This inner class defines comparison operators available for `T`. */
class Ops(lhs: T) {
def <(rhs: T) = lt(lhs, rhs)
def <=(rhs: T) = lteq(lhs, rhs)
def >(rhs: T) = gt(lhs, rhs)
def >=(rhs: T) = gteq(lhs, rhs)
def equiv(rhs: T) = Ordering.this.equiv(lhs, rhs)
def max(rhs: T): T = Ordering.this.max(lhs, rhs)
def min(rhs: T): T = Ordering.this.min(lhs, rhs)
}
/** This implicit method augments `T` with the comparison operators defined
* in `scala.math.Ordering.Ops`.
*/
implicit def mkOrderingOps(lhs: T): Ops = new Ops(lhs)
}
trait LowPriorityOrderingImplicits {
/** This would conflict with all the nice implicit Orderings
* available, but thanks to the magic of prioritized implicits
* via subclassing we can make `Ordered[A] => Ordering[A]` only
* turn up if nothing else works. Since `Ordered[A]` extends
* `Comparable[A]` anyway, we can throw in some Java interop too.
*/
implicit def ordered[A <% Comparable[A]]: Ordering[A] = new Ordering[A] {
def compare(x: A, y: A): Int = x compareTo y
}
implicit def comparatorToOrdering[A](implicit cmp: Comparator[A]): Ordering[A] = new Ordering[A] {
def compare(x: A, y: A) = cmp.compare(x, y)
}
}
/** This is the companion object for the [[scala.math.Ordering]] trait.
*
* It contains many implicit orderings as well as well as methods to construct
* new orderings.
*/
object Ordering extends LowPriorityOrderingImplicits {
def apply[T](implicit ord: Ordering[T]) = ord
trait ExtraImplicits {
/** Not in the standard scope due to the potential for divergence:
* For instance `implicitly[Ordering[Any]]` diverges in its presence.
*/
implicit def seqDerivedOrdering[CC[X] <: scala.collection.Seq[X], T](implicit ord: Ordering[T]): Ordering[CC[T]] =
new Ordering[CC[T]] {
def compare(x: CC[T], y: CC[T]): Int = {
val xe = x.iterator
val ye = y.iterator
while (xe.hasNext && ye.hasNext) {
val res = ord.compare(xe.next(), ye.next())
if (res != 0) return res
}
Ordering.Boolean.compare(xe.hasNext, ye.hasNext)
}
}
/** This implicit creates a conversion from any value for which an
* implicit `Ordering` exists to the class which creates infix operations.
* With it imported, you can write methods as follows:
*
* {{{
* def lessThan[T: Ordering](x: T, y: T) = x < y
* }}}
*/
implicit def infixOrderingOps[T](x: T)(implicit ord: Ordering[T]): Ordering[T]#Ops = new ord.Ops(x)
}
/** An object containing implicits which are not in the default scope. */
object Implicits extends ExtraImplicits { }
/** Construct an Ordering[T] given a function `lt`. */
def fromLessThan[T](cmp: (T, T) => Boolean): Ordering[T] = new Ordering[T] {
def compare(x: T, y: T) = if (cmp(x, y)) -1 else if (cmp(y, x)) 1 else 0
// overrides to avoid multiple comparisons
override def lt(x: T, y: T): Boolean = cmp(x, y)
override def gt(x: T, y: T): Boolean = cmp(y, x)
override def gteq(x: T, y: T): Boolean = !cmp(x, y)
override def lteq(x: T, y: T): Boolean = !cmp(y, x)
}
/** Given f, a function from T into S, creates an Ordering[T] whose compare
* function is equivalent to:
*
* {{{
* def compare(x:T, y:T) = Ordering[S].compare(f(x), f(y))
* }}}
*
* This function is an analogue to Ordering.on where the Ordering[S]
* parameter is passed implicitly.
*/
def by[T, S](f: T => S)(implicit ord: Ordering[S]): Ordering[T] =
fromLessThan((x, y) => ord.lt(f(x), f(y)))
trait UnitOrdering extends Ordering[Unit] {
def compare(x: Unit, y: Unit) = 0
}
implicit object Unit extends UnitOrdering
trait BooleanOrdering extends Ordering[Boolean] {
def compare(x: Boolean, y: Boolean) = java.lang.Boolean.compare(x, y)
}
implicit object Boolean extends BooleanOrdering
trait ByteOrdering extends Ordering[Byte] {
def compare(x: Byte, y: Byte) = java.lang.Byte.compare(x, y)
}
implicit object Byte extends ByteOrdering
trait CharOrdering extends Ordering[Char] {
def compare(x: Char, y: Char) = java.lang.Character.compare(x, y)
}
implicit object Char extends CharOrdering
trait ShortOrdering extends Ordering[Short] {
def compare(x: Short, y: Short) = java.lang.Short.compare(x, y)
}
implicit object Short extends ShortOrdering
trait IntOrdering extends Ordering[Int] {
def compare(x: Int, y: Int) = java.lang.Integer.compare(x, y)
}
implicit object Int extends IntOrdering
trait LongOrdering extends Ordering[Long] {
def compare(x: Long, y: Long) = java.lang.Long.compare(x, y)
}
implicit object Long extends LongOrdering
trait FloatOrdering extends Ordering[Float] {
outer =>
def compare(x: Float, y: Float) = java.lang.Float.compare(x, y)
override def lteq(x: Float, y: Float): Boolean = x <= y
override def gteq(x: Float, y: Float): Boolean = x >= y
override def lt(x: Float, y: Float): Boolean = x < y
override def gt(x: Float, y: Float): Boolean = x > y
override def equiv(x: Float, y: Float): Boolean = x == y
override def max(x: Float, y: Float): Float = math.max(x, y)
override def min(x: Float, y: Float): Float = math.min(x, y)
override def reverse: Ordering[Float] = new FloatOrdering {
override def reverse = outer
override def compare(x: Float, y: Float) = outer.compare(y, x)
override def lteq(x: Float, y: Float): Boolean = outer.lteq(y, x)
override def gteq(x: Float, y: Float): Boolean = outer.gteq(y, x)
override def lt(x: Float, y: Float): Boolean = outer.lt(y, x)
override def gt(x: Float, y: Float): Boolean = outer.gt(y, x)
override def min(x: Float, y: Float): Float = outer.max(x, y)
override def max(x: Float, y: Float): Float = outer.min(x, y)
}
}
implicit object Float extends FloatOrdering
trait DoubleOrdering extends Ordering[Double] {
outer =>
def compare(x: Double, y: Double) = java.lang.Double.compare(x, y)
override def lteq(x: Double, y: Double): Boolean = x <= y
override def gteq(x: Double, y: Double): Boolean = x >= y
override def lt(x: Double, y: Double): Boolean = x < y
override def gt(x: Double, y: Double): Boolean = x > y
override def equiv(x: Double, y: Double): Boolean = x == y
override def max(x: Double, y: Double): Double = math.max(x, y)
override def min(x: Double, y: Double): Double = math.min(x, y)
override def reverse: Ordering[Double] = new DoubleOrdering {
override def reverse = outer
override def compare(x: Double, y: Double) = outer.compare(y, x)
override def lteq(x: Double, y: Double): Boolean = outer.lteq(y, x)
override def gteq(x: Double, y: Double): Boolean = outer.gteq(y, x)
override def lt(x: Double, y: Double): Boolean = outer.lt(y, x)
override def gt(x: Double, y: Double): Boolean = outer.gt(y, x)
override def min(x: Double, y: Double): Double = outer.max(x, y)
override def max(x: Double, y: Double): Double = outer.min(x, y)
}
}
implicit object Double extends DoubleOrdering
trait BigIntOrdering extends Ordering[BigInt] {
def compare(x: BigInt, y: BigInt) = x.compare(y)
}
implicit object BigInt extends BigIntOrdering
trait BigDecimalOrdering extends Ordering[BigDecimal] {
def compare(x: BigDecimal, y: BigDecimal) = x.compare(y)
}
implicit object BigDecimal extends BigDecimalOrdering
trait StringOrdering extends Ordering[String] {
def compare(x: String, y: String) = x.compareTo(y)
}
implicit object String extends StringOrdering
trait OptionOrdering[T] extends Ordering[Option[T]] {
def optionOrdering: Ordering[T]
def compare(x: Option[T], y: Option[T]) = (x, y) match {
case (None, None) => 0
case (None, _) => -1
case (_, None) => 1
case (Some(x), Some(y)) => optionOrdering.compare(x, y)
}
}
implicit def Option[T](implicit ord: Ordering[T]): Ordering[Option[T]] =
new OptionOrdering[T] { val optionOrdering = ord }
implicit def Iterable[T](implicit ord: Ordering[T]): Ordering[Iterable[T]] =
new Ordering[Iterable[T]] {
def compare(x: Iterable[T], y: Iterable[T]): Int = {
val xe = x.iterator
val ye = y.iterator
while (xe.hasNext && ye.hasNext) {
val res = ord.compare(xe.next(), ye.next())
if (res != 0) return res
}
Boolean.compare(xe.hasNext, ye.hasNext)
}
}
implicit def Tuple2[T1, T2](implicit ord1: Ordering[T1], ord2: Ordering[T2]): Ordering[(T1, T2)] =
new Ordering[(T1, T2)]{
def compare(x: (T1, T2), y: (T1, T2)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
0
}
}
implicit def Tuple3[T1, T2, T3](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3]) : Ordering[(T1, T2, T3)] =
new Ordering[(T1, T2, T3)]{
def compare(x: (T1, T2, T3), y: (T1, T2, T3)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
0
}
}
implicit def Tuple4[T1, T2, T3, T4](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4]) : Ordering[(T1, T2, T3, T4)] =
new Ordering[(T1, T2, T3, T4)]{
def compare(x: (T1, T2, T3, T4), y: (T1, T2, T3, T4)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
0
}
}
implicit def Tuple5[T1, T2, T3, T4, T5](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4], ord5: Ordering[T5]): Ordering[(T1, T2, T3, T4, T5)] =
new Ordering[(T1, T2, T3, T4, T5)]{
def compare(x: (T1, T2, T3, T4, T5), y: Tuple5[T1, T2, T3, T4, T5]): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
val compare5 = ord5.compare(x._5, y._5)
if (compare5 != 0) return compare5
0
}
}
implicit def Tuple6[T1, T2, T3, T4, T5, T6](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4], ord5: Ordering[T5], ord6: Ordering[T6]): Ordering[(T1, T2, T3, T4, T5, T6)] =
new Ordering[(T1, T2, T3, T4, T5, T6)]{
def compare(x: (T1, T2, T3, T4, T5, T6), y: (T1, T2, T3, T4, T5, T6)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
val compare5 = ord5.compare(x._5, y._5)
if (compare5 != 0) return compare5
val compare6 = ord6.compare(x._6, y._6)
if (compare6 != 0) return compare6
0
}
}
implicit def Tuple7[T1, T2, T3, T4, T5, T6, T7](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4], ord5: Ordering[T5], ord6: Ordering[T6], ord7: Ordering[T7]): Ordering[(T1, T2, T3, T4, T5, T6, T7)] =
new Ordering[(T1, T2, T3, T4, T5, T6, T7)]{
def compare(x: (T1, T2, T3, T4, T5, T6, T7), y: (T1, T2, T3, T4, T5, T6, T7)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
val compare5 = ord5.compare(x._5, y._5)
if (compare5 != 0) return compare5
val compare6 = ord6.compare(x._6, y._6)
if (compare6 != 0) return compare6
val compare7 = ord7.compare(x._7, y._7)
if (compare7 != 0) return compare7
0
}
}
implicit def Tuple8[T1, T2, T3, T4, T5, T6, T7, T8](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4], ord5: Ordering[T5], ord6: Ordering[T6], ord7: Ordering[T7], ord8: Ordering[T8]): Ordering[(T1, T2, T3, T4, T5, T6, T7, T8)] =
new Ordering[(T1, T2, T3, T4, T5, T6, T7, T8)]{
def compare(x: (T1, T2, T3, T4, T5, T6, T7, T8), y: (T1, T2, T3, T4, T5, T6, T7, T8)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
val compare5 = ord5.compare(x._5, y._5)
if (compare5 != 0) return compare5
val compare6 = ord6.compare(x._6, y._6)
if (compare6 != 0) return compare6
val compare7 = ord7.compare(x._7, y._7)
if (compare7 != 0) return compare7
val compare8 = ord8.compare(x._8, y._8)
if (compare8 != 0) return compare8
0
}
}
implicit def Tuple9[T1, T2, T3, T4, T5, T6, T7, T8, T9](implicit ord1: Ordering[T1], ord2: Ordering[T2], ord3: Ordering[T3], ord4: Ordering[T4], ord5: Ordering[T5], ord6: Ordering[T6], ord7: Ordering[T7], ord8 : Ordering[T8], ord9: Ordering[T9]): Ordering[(T1, T2, T3, T4, T5, T6, T7, T8, T9)] =
new Ordering[(T1, T2, T3, T4, T5, T6, T7, T8, T9)]{
def compare(x: (T1, T2, T3, T4, T5, T6, T7, T8, T9), y: (T1, T2, T3, T4, T5, T6, T7, T8, T9)): Int = {
val compare1 = ord1.compare(x._1, y._1)
if (compare1 != 0) return compare1
val compare2 = ord2.compare(x._2, y._2)
if (compare2 != 0) return compare2
val compare3 = ord3.compare(x._3, y._3)
if (compare3 != 0) return compare3
val compare4 = ord4.compare(x._4, y._4)
if (compare4 != 0) return compare4
val compare5 = ord5.compare(x._5, y._5)
if (compare5 != 0) return compare5
val compare6 = ord6.compare(x._6, y._6)
if (compare6 != 0) return compare6
val compare7 = ord7.compare(x._7, y._7)
if (compare7 != 0) return compare7
val compare8 = ord8.compare(x._8, y._8)
if (compare8 != 0) return compare8
val compare9 = ord9.compare(x._9, y._9)
if (compare9 != 0) return compare9
0
}
}
}
|
felixmulder/scala
|
src/library/scala/math/Ordering.scala
|
Scala
|
bsd-3-clause
| 19,852
|
package com.example.pingpong
import android.view.{Gravity, ViewGroup, LayoutInflater}
import android.os.Bundle
import android.widget.{FrameLayout, Button}
import android.view.ViewGroup.LayoutParams._
import macroid._
import macroid.FullDsl._
import macroid.contrib.TextTweaks
import macroid.Ui
import macroid.akka.AkkaFragment
import scala.concurrent.ExecutionContext.Implicits.global
/** Styles for our widgets */
object Styles {
// how racket looks
def racket(implicit appCtx: AppContext) =
hide + disable +
text("SMASH") +
TextTweaks.large +
lp[FrameLayout](WRAP_CONTENT, WRAP_CONTENT, Gravity.CENTER)
}
/** Effects for out widgets */
object Effects {
// make a glorious fade
def appear =
fadeIn(600) +
enable
// disappear with style
def disappear =
disable ++
fadeOut(600) ++
delay(600)
}
/** Our UI fragment */
class RacketFragment extends AkkaFragment with Contexts[AkkaFragment] {
// get actor name from arguments
lazy val actorName = getArguments.getString("name")
// actor for this fragment
lazy val actor = Some(actorSystem.actorSelection(s"/user/$actorName"))
// a slot for the racket button
var racket = slot[Button]
// trigger the fadeIn effect
def receive =
racket <~ Effects.appear
// smash the ball
def smash =
// wait until the racket disappears
(racket <~~ Effects.disappear) ~~
// tell the actor to smash
Ui(actor.foreach(_ ! RacketActor.Smash))
override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle) = getUi {
l[FrameLayout](
w[Button] <~ wire(racket) <~ Styles.racket <~ On.click(smash)
)
}
}
|
macroid/macroid-akka-pingpong
|
src/main/scala/com/example/pingpong/RacketFragment.scala
|
Scala
|
apache-2.0
| 1,679
|
package net.revenj.server
case class CommandResultDescription[TFormat](
requestID: String,
result: CommandResult[TFormat],
start: Long) {
val duration: Long = (System.nanoTime - start) / 1000
}
|
ngs-doo/revenj
|
scala/revenj-akka/src/main/scala/net/revenj/server/CommandResultDescription.scala
|
Scala
|
bsd-3-clause
| 211
|
import sbt._
object Globals {
val name = "akka-rabbit"
val scalaVersion = "2.10.4"
val crossScalaVersions = Seq("2.10.4", "2.11.1")
val jvmVersion = "1.7"
val homepage = Some(url("http://www.coiney.com"))
val startYear = Some(2014)
val summary = "An asynchronous scala client for RabbitMQ, based on Akka."
val description = "An asynchronous scala client for RabbitMQ, based on Akka."
val maintainer = "pjan <pjan@coiney.com>"
val license = Some("BSD")
val organizationName = "Coiney Inc."
val organization = "com.coiney"
val organizationHomepage = Some(url("http://coiney.com"))
val sourceUrl = "https://github.com/Coiney/akka-rabbit"
val scmUrl = "git@github.com:Coiney/akka-rabbit.git"
val scmConnection = "scm:git:git@github.com:Coiney/akka-rabbit.git"
val serviceDaemonUser = "admin"
val serviceDaemonGroup = "admin"
val baseCredentials: Seq[Credentials] = Seq[Credentials](
Credentials(Path.userHome / ".ivy2" / ".credentials_coiney_snapshots"),
Credentials(Path.userHome / ".ivy2" / ".credentials_coiney_release")
)
val snapshotRepo = Some("snapshots" at "http://archives.coiney.com:8888/repository/snapshots/")
val pomDevelopers =
<id>pjan</id><name>pjan vandaele</name><url>http://pjan.io</url>;
val pomLicense =
<licenses>
<license>
<name>The BSD 3-Clause License</name>
<url>http://opensource.org/licenses/BSD-3-Clause</url>
<distribution>repo</distribution>
</license>
</licenses>;
}
|
Coiney/akka-rabbit
|
project/Globals.scala
|
Scala
|
bsd-3-clause
| 1,678
|
package pcap.streams
import org.scalatest._
import scodec.bits.BitVector
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import pcap.codec.Codecs.WithHeaderDecoder
import scodec.bits.ByteVector
import akka.util.ByteString
import akka.stream.scaladsl.Source
import scala.collection.immutable.Seq
import java.io.File
import akka.stream.scaladsl.Sink
import akka.stream.Materializer
import scala.concurrent.ExecutionContext
import scodec.protocols.ip.Port
import scodec.codecs._
import scodec.Decoder
import pcap.data.StreamKey
import scodec.bits._
/**
* @author rsearle
*/
class SingleBinaryStructuredStreamSpec extends FlatSpec with Matchers {
val captured = ByteString(ByteVector.fromBase64(
"""1MOyoQIABAAAAAAAAAAAAP//AAABAAAAXQqlVfvBBABCAAAAQgAAAAAZ0S1zOZCxHIOdkQgARQAA
NHcpQACABlbrCgoMMgoKDGrzg2Gpjws21AAAAACAAiAAB1QAAAIEBbQBAwMIAQEEAl0KpVUuwwQA
QgAAAEIAAACQsRyDnZEAGdEtczkIAEUAADQAAEAAQAYOFQoKDGoKCgwyYanzg0yD33qPCzbVgBIW
0OR1AAACBAW0AQEEAgEDAwddCqVVX8MEADYAAAA2AAAAABnRLXM5kLEcg52RCABFAAAodypAAIAG
VvYKCgwyCgoMavODYamPCzbVTIPfe1AQAQA7GAAAXQqlVRPKBABCAAAAQgAAAAAZ0S1zOZCxHIOd
kQgARQAANHcrQACABlbpCgoMMgoKDGrzg2Gpjws21UyD33tQGAEAD3MAAAUBAAgAACWAAAgBAF0K
pVVgywQAPAAAADwAAACQsRyDnZEAGdEtczkIAEUAACiewUAAQAZvXwoKDGoKCgwyYanzg0yD33uP
CzbhUBAALjveAAAAAAAAAABdCqVVju4GADwAAAA8AAAAkLEcg52RABnRLXM5CABFAAAunsJAAEAG
b1gKCgxqCgoMMmGp84NMg997jws24VAYAC4kowAACgEAAg0qXQqlVXtJBwA6AAAAOgAAAAAZ0S1z
OZCxHIOdkQgARQAALHdBQACABlbbCgoMMgoKDGrzg2Gpjws24UyD34FQGAEAOPgAAAICAABdCqVV
50oHADwAAAA8AAAAkLEcg52RABnRLXM5CABFAAAonsNAAEAGb10KCgxqCgoMMmGp84NMg9+Bjws2
5VAQAC471AAAAAAAAAAAXQqlVT9OBwA8AAAAPAAAAJCxHIOdkQAZ0S1zOQgARQAALJ7EQABABm9Y
CgoMagoKDDJhqfODTIPfgY8LNuVQGAAuLMYAAA8CAAAAAF0KpVVOYQcAPgAAAD4AAAAAGdEtczmQ
sRyDnZEIAEUAADB3RkAAgAZW0goKDDIKCgxq84NhqY8LNuVMg9+FUBgBACyzAAAGAwAEAGQH0F0K
pVU2ZwcASwAAAEsAAACQsRyDnZEAGdEtczkIAEUAAD2exUAAQAZvRgoKDGoKCgwyYanzg0yD34WP
CzbtUBgALnxaAAABAwARZmx1c2ggcng9MTAwOjIwMDBdCqVV2LQIADoAAAA6AAAAABnRLXM5kLEc
g52RCABFAAAseNlAAIAGVUMKCgwyCgoMavODYamPCzbtTIPfmlAYAQAv0QAACwQAAF0KpVW3tggA
PAAAADwAAACQsRyDnZEAGdEtczkIAEUAACyexkAAQAZvVgoKDGoKCgwyYanzg0yD35qPCzbxUBgA
LiyfAAAPBAAAAABdCqVVr7cIADwAAAA8AAAAkLEcg52RABnRLXM5CABFAAAonsdAAEAGb1kKCgxq
CgoMMmGp84NMg9+ejws28VARAC47qgAAAAAAAAAAXQqlVcS3CAA2AAAANgAAAAAZ0S1zOZCxHIOd
kQgARQAAKHjaQACABlVGCgoMMgoKDGrzg2Gpjws28UyD359QEAEAOtgAAF0KpVVjuAgANgAAADYA
AAAAGdEtczmQsRyDnZEIAEUAACh420AAgAZVRQoKDDIKCgxq84NhqY8LNvFMg9+fUBEBADrXAABd
CqVVsrkIADwAAAA8AAAAkLEcg52RABnRLXM5CABFAAAoAABAAEAGDiEKCgxqCgoMMmGp84NMg9+f
jws28lAQAC47qQAAAAAAAAAA""").get.toByteBuffer)
val packet = uint8 ~ uint(8) ~ variableSizeBytes(uint16, bytes)
val transaction = packet ~ packet
val decoder = vector(transaction)
"structured" should "atomic extract" in {
implicit val system = ActorSystem("Sys")
import system.dispatcher
implicit val materializer = ActorMaterializer()
Source.single(captured)
.transform(() => ByteStringDecoderStage(new WithHeaderDecoder))
.collect { case data: pcap.data.v4.TCP => data }
.groupBy(2,_.stream)
.fold((pcap.data.v4.nullTCP,ByteVector.empty))((pair,t) => (t,pair._2 ++ t.bytes))
.map { r => (r._1.stream, decoder.decodeValue(r._2.bits).require) }
.mergeSubstreams
.runWith(Sink.head)
.onComplete { t =>
val r = t.get._2
t.get._1.toString should be("Key(25001,62339,10.10.12.106,10.10.12.50)")
r.size should be(4)
r(0) should be((((5, 1), hex"0000258000080100"), ((10, 1), hex"0d2a")))
system.shutdown
}
system.awaitTermination()
}
it should "stream packets" in {
implicit val system = ActorSystem("Sys")
import system.dispatcher
implicit val materializer = ActorMaterializer()
val f = Source.single(captured)
.transform(() => ByteStringDecoderStage(new WithHeaderDecoder))
.collect { case data: pcap.data.v4.TCP if !data.bytes.isEmpty => data }
.groupBy(2,_.stream)
.map { r => (r.stream, packet.decodeValue(r.bytes.bits).require) }
.mergeSubstreams
.runWith(Sink.fold(List[(StreamKey, ((Int, Int), ByteVector))]())((l, v) => l :+ v))
.onComplete { t =>
system.shutdown
val r = t.get
r.size should be(8)
r(0)._2 should be(((5, 1), hex"0000258000080100"))
r(1)._2 should be(((10, 1), hex"0d2a"))
}
system.awaitTermination()
}
//Vector(((5,1),ByteVector(8 bytes, 0x0000258000080100)), ((10,1),ByteVector(2 bytes, 0x0d2a)), ((2,2),ByteVector(empty)), ((15,2),ByteVector(empty)), ((6,3),ByteVector(4 bytes, 0x006407d0)), ((1,3),ByteVector(17 bytes, 0x666c7573682072783d3130303a32303030)), ((11,4),ByteVector(empty)), ((15,4),ByteVector(empty)))
}
|
searler/pcap
|
src/test/scala/pcap/streams/SingleBinaryStructuredStreamSpec.scala
|
Scala
|
apache-2.0
| 4,688
|
package tuner.gui
import scala.swing.Button
import scala.swing.BoxPanel
import scala.swing.Orientation
import scala.swing.Publisher
import scala.swing.TextField
import scala.swing.event.ButtonClicked
import scala.swing.event.ValueChanged
/**
* Element for selecting either a file or directory path
*
* Sends ValueChanged when a new file is selected
*/
class PathPanel extends BoxPanel(Orientation.Horizontal) with Publisher {
val filenameField = new TextField
val chooseButton = new Button("Browse…")
var title = ""
var fileSelector = FileChooser.loadFile _
contents += filenameField
contents += chooseButton
// set up the events system
listenTo(filenameField)
listenTo(chooseButton)
reactions += {
case ButtonClicked(_) =>
val ttl = title
fileSelector(ttl) foreach {filename =>
filenameField.text = filename
}
case ValueChanged(`filenameField`) =>
publish(new ValueChanged(this))
}
def path : String = filenameField.text
def file : java.io.File = new java.io.File(path)
def validPath : Boolean = file.exists
}
|
gabysbrain/tuner
|
src/main/scala/tuner/gui/PathPanel.scala
|
Scala
|
mit
| 1,099
|
package info.siddhuw.factors
import java.text.SimpleDateFormat
import com.github.tototoshi.csv.CSVReader
import com.typesafe.scalalogging.LazyLogging
import info.siddhuw._
import info.siddhuw.factors.ResultType.ResultType
import info.siddhuw.factors.utils.ClubPerformanceUtils
/**
* This factor works as follows.:
*
* It calculates the performance of the clubs that each of the team members belongs to, weighting the performance
* by the number of team members who play for that particular club.
*
* This class expects:
* * a teamsheet for each team that has the following fields: Player,Club,League (League has to be within parameters)
* * a results file with the same name as the league, in the football-data.co.uk format, sorted by match date
*
* Created by siddhuwarrier on 27/07/2014.
*/
class ClubPerformanceFactor(numGamesToConsider: Int = 14) extends PerformanceFactor with LazyLogging {
val CLUB_FIELD = "Club"
val LEAGUE_FIELD = "League"
val HOME_TEAM_FIELD = "HomeTeam"
val AWAY_TEAM_FIELD = "AwayTeam"
val DATE_FIELD = "Date"
val DATE_FORMAT = new SimpleDateFormat("dd/MM/yy")
val FT_HOME_GOALS_FIELD = "FTHG"
val FT_AWAY_GOALS_FIELD = "FTAG"
override def getProbabilities(teamOneName: String, teamTwoName: String): Probability = {
val teamOneSquad = CSVReader.open(getFileFromClasspath(teamOneName + ".csv")).allWithHeaders()
val teamTwoSquad = CSVReader.open(getFileFromClasspath(teamTwoName + ".csv")).allWithHeaders()
val teamOneHistoricRes = calculateHistoricalResults(teamOneName, teamOneSquad)
val teamTwoHistoricRes = calculateHistoricalResults(teamTwoName, teamTwoSquad)
val prob = getFromHistoricalResults(teamOneHistoricRes, teamTwoHistoricRes, numGamesToConsider * 2)
logger.debug("Probability given club performance: {}", prob)
prob
}
private def calculateHistoricalResults(teamName: String, squad: List[Map[String, String]]): HistoricResults = {
val playersPerClub = getPlayersPerClub(squad)
logger.debug("For team " + teamName + ", players to club mapping: " + playersPerClub)
val clubToLeagueMapping = getClubToLeagueMapping(squad)
val numWins = getResultsFor(playersPerClub, clubToLeagueMapping, squad.size, ResultType.WIN)
val numLosses = getResultsFor(playersPerClub, clubToLeagueMapping, squad.size, ResultType.LOSS)
val numDraws = getResultsFor(playersPerClub, clubToLeagueMapping, squad.size, ResultType.DRAW)
HistoricResults(teamName, numWins, numLosses, numDraws)
}
private def getResultsFor(playersPerClub: Map[String, Int], clubToLeagueMapping: Map[String, String],
squadSize: Int, resultType: ResultType): Double = {
playersPerClub.map {
case (club, numPlayers) =>
val numResults = resultType match {
case ResultType.WIN =>
ClubPerformanceUtils.getClubWins(club, clubToLeagueMapping(club), numGamesToConsider)
case ResultType.LOSS =>
ClubPerformanceUtils.getClubLosses(club, clubToLeagueMapping(club), numGamesToConsider)
case _ =>
ClubPerformanceUtils.getClubDraws(club, clubToLeagueMapping(club), numGamesToConsider)
}
(numPlayers.toDouble / squadSize) * numResults.toDouble
}.sum
}
private def getPlayersPerClub(squad: List[Map[String, String]]): Map[String, Int] = {
squad.groupBy(_.get(CLUB_FIELD)).map {
case (k, v) => k.get -> v.size
}
}
private def getClubToLeagueMapping(squad: List[Map[String, String]]): Map[String, String] = {
squad.groupBy(_.get(CLUB_FIELD)).map {
case (k, v) => k.get -> v.map(_.get(LEAGUE_FIELD).get).head
}
}
}
|
siddhuwarrier/intl-odds-calculator
|
src/main/scala/info/siddhuw/factors/ClubPerformanceFactor.scala
|
Scala
|
apache-2.0
| 3,662
|
/*
* Copyright (c) $date.year. Webtrends (http://www.webtrends.com)
* @author $user on $date.get('MM/dd/yyyy hh:mm a')
*/
package com.webtrends.service
import com.webtrends.harness.app.Harness
import org.specs2.mutable.Specification
class ${service-name}Integration extends Specification {
//TODO TestHarness needs to be rebuilt for mocking the harness correctly
//Harness.startActorSystem
"${service-name} " should {
" be able to run as a full integration" in {
while (true) {
Thread.sleep(100)
}
success
}
}
/*step {
Harness.shutdownActorSystem(false) {
System.exit(0)
}
}*/
}
|
Crashfreak/wookiee
|
archetypes/wookiee-service-archetype/src/main/resources/archetype-resources/src/test/scala/com/webtrends/service/__service-name__Integration.scala
|
Scala
|
apache-2.0
| 648
|
package modules.text.term.en
import modules.ner.en.EnglishNamedEntityRecognizerInGlossary
import modules.text.term.MultiLingualTermNormalizerInGlossary
import us.feliscat.m17n.English
import us.feliscat.ner.NamedEntity
import us.feliscat.text.StringOption
/**
* <pre>
* Created on 2017/02/09.
* </pre>
*
* @author K.Sakamoto
*/
object EnglishTermNormalizerInGlossary extends MultiLingualTermNormalizerInGlossary with English {
override def recognize(textOpt: StringOption): Seq[NamedEntity] = {
EnglishNamedEntityRecognizerInGlossary.recognize(textOpt)
}
}
|
ktr-skmt/FelisCatusZero-multilingual
|
src/main/scala/modules/text/term/en/EnglishTermNormalizerInGlossary.scala
|
Scala
|
apache-2.0
| 579
|
package it.unich.jandom.utils.numberext
import breeze.math.Field
import breeze.storage.Zero
import breeze.linalg._
import breeze.linalg.operators._
import org.netlib.util.intW
import it.unich.jandom.utils.breeze.countNonZero
import breeze.linalg.support.CanTraverseValues
import breeze.linalg.support.CanTraverseValues.ValuesVisitor
case class ModZ(n: Int) extends Ordered[ModZ] {
val p = 3
def *(b: ModZ) = ModZ((n*b.n) % p)
def /(b: ModZ) = ModZ((n/b.n) % p)
def +(b: ModZ) = ModZ((n+b.n) % p)
def -(b: ModZ) = ModZ((n-b.n) % p)
def pow(b: ModZ) = {
var res = 1
for (i <- 0 until b.n) res *= n
ModZ(res % p)
}
def compare(b: ModZ) = n - b.n
def isInfinite = false
def isPosInfinity = n == p-1
def isNegInfinity = n == 0
def isInfinity = isPosInfinity || isNegInfinity
def max(b: ModZ) = ModZ(n max b.n)
def min(b: ModZ) = ModZ(n min b.n)
def toDouble: Double = n.toDouble
}
object ModZ { outer =>
val p = 3
val zero = ModZ(0)
val one = ModZ(1)
val NegativeInfinity = zero
val PositiveInfinity = ModZ(p - 1)
def apply(d: Double): ModZ = ModZ( d.toInt % p )
implicit object scalar extends Field[ModZ] {
def zero = outer.zero
def one = outer.one
def ==(a: ModZ, b:ModZ) = a == b
def !=(a: ModZ, b:ModZ) = a != b
def +(a: ModZ, b: ModZ) = a + b
def -(a: ModZ, b: ModZ) = a - b
def *(a: ModZ, b: ModZ) = a * b
def /(a: ModZ, b: ModZ) = a / b
def %(a: ModZ, b: ModZ) = outer.zero
def pow(a: ModZ, b: ModZ) = a pow b
implicit val normImpl: norm.Impl[ModZ, Double] = new norm.Impl[ModZ, Double] {
def apply(v: ModZ): Double = v.n.toDouble
}
}
trait ModZNumeric extends Numeric[ModZ] {
def plus(x: ModZ, y: ModZ): ModZ = x + y
def minus(x: ModZ, y: ModZ): ModZ = x - y
def times(x: ModZ, y: ModZ): ModZ = x * y
def negate(x: ModZ): ModZ = -x
def fromInt(x: Int): ModZ = ModZ(x % p)
def toInt(x: ModZ): Int = x.n
def toLong(x: ModZ): Long = x.n.toLong
def toFloat(x: ModZ): Float = x.n.toFloat
def toDouble(x: ModZ): Double = x.n.toDouble
}
trait ModZFractional extends ModZNumeric with Fractional[ModZ] {
def div(x: ModZ, y: ModZ): ModZ = x / y
}
trait ModZOrdering extends Ordering[ModZ] {
override def compare(a : ModZ, b : ModZ) = a.n - b.n
}
implicit object ModZFractional extends ModZFractional with ModZOrdering
implicit object MulMM extends OpMulMatrix.Impl2[ModZ,ModZ,ModZ]
{ def apply(a : ModZ, b : ModZ) = a * b}
implicit object MulDM extends OpDiv.Impl2[Double,ModZ,ModZ]
{ def apply(a : Double, b : ModZ) = ModZ(a.toInt % p) * b }
implicit object ModZZero extends Zero[ModZ] {
val zero = outer.zero
}
implicit def dv_s_Op_ModZ_OpMulMatrix: OpMulMatrix.Impl2[DenseVector[ModZ], ModZ, DenseVector[ModZ]] =
new OpMulMatrix.Impl2[DenseVector[ModZ], ModZ, DenseVector[ModZ]] {
def apply(a: DenseVector[ModZ], b: ModZ): DenseVector[ModZ] = {
val ad = a.data
var aoff = a.offset
val result = DenseVector.zeros[ModZ](a.length)
val rd = result.data
var i = 0
while (i < a.length) {
rd(i) = ad(aoff) * b
aoff += a.stride
i += 1
}
result
}
implicitly[BinaryRegistry[Vector[ModZ], ModZ, OpMulMatrix.type, Vector[ModZ]]].register(this)
}
implicit object implOpSolveMatrixBy_DRR_DRR_eq_DRR
extends OpSolveMatrixBy.Impl2[DenseMatrix[ModZ], DenseMatrix[ModZ], DenseMatrix[ModZ]] {
def LUSolve(X: DenseMatrix[ModZ], A: DenseMatrix[ModZ]) = {
var perm = (0 until A.rows).toArray
for (i <- 0 until A.rows) {
val optPivot = (i until A.rows) find { p => A(perm(p), perm(i)) != ModZ.zero }
val pivotRow = optPivot.getOrElse(throw new MatrixSingularException())
val tmp = perm(i)
perm(i) = perm(pivotRow)
perm(pivotRow) = tmp
val pivot = A(perm(i), perm(i))
for (j <- i + 1 until A.rows) {
val coeff = A(perm(j),perm(i)) / pivot
A(perm(j), ::) -= A(perm(i), ::) * coeff
X(perm(j), ::) -= X(perm(i), ::) * coeff
}
}
for (i <- A.rows - 1 to (0, -1)) {
X(perm(i), ::) /= A(perm(i), perm(i))
for (j <- i - 1 to (0, -1)) {
X(perm(j), ::) -= X(perm(i), ::) * A(perm(j), perm(i))
}
}
}
override def apply(A: DenseMatrix[ModZ], V: DenseMatrix[ModZ]): DenseMatrix[ModZ] = {
require(A.rows == V.rows, "Non-conformant matrix sizes")
if (A.size == 0) {
DenseMatrix.zeros[ModZ](0, 0)
} else if (A.rows == A.cols) {
val X = DenseMatrix.zeros[ModZ](V.rows, V.cols)
val Y = DenseMatrix.zeros[ModZ](A.rows, A.cols)
// square: LUSolve
X := V
Y := A
LUSolve(X, Y)
X
} else
???
}
}
implicit object implOpSolveMatrixBy_DMR_DVR_eq_DVR
extends OpSolveMatrixBy.Impl2[DenseMatrix[ModZ], DenseVector[ModZ], DenseVector[ModZ]] {
override def apply(a: DenseMatrix[ModZ], b: DenseVector[ModZ]): DenseVector[ModZ] = {
val rv: DenseMatrix[ModZ] = a \\ new DenseMatrix[ModZ](b.size, 1, b.data, b.offset, b.stride, true)
new DenseVector[ModZ](rv.data)
}
}
implicit def countFromTraverseModZ[T](implicit traverse: CanTraverseValues[T, ModZ]): countNonZero.Impl[T, Int] = {
new countNonZero.Impl[T, Int] {
def apply(t: T): Int = {
var count: Int = 0
traverse.traverse(t, new ValuesVisitor[ModZ] {
def visit(a: ModZ) = { if (a != ModZ.zero) count += 1 }
def zeros(count: Int, zeroValue: ModZ) {}
})
count
}
}
}
}
|
rubino22/JDBeta
|
core/src/main/scala/it/unich/jandom/utils/numberext/ModInteger.scala
|
Scala
|
lgpl-3.0
| 5,688
|
package mimir.util
import java.io.{Reader,File}
import org.jline.terminal.{Terminal,TerminalBuilder}
import org.jline.reader.{LineReader,LineReaderBuilder,EndOfFileException,UserInterruptException}
import com.typesafe.scalalogging.LazyLogging
class LineReaderInputSource(
terminal: Terminal,
historyFile: String = LineReaderInputSource.defaultHistoryFile,
prompt: String = "mimir> "
)
extends Reader
with LazyLogging
{
val input: LineReader =
LineReaderBuilder.
builder().
terminal(terminal).
variable(LineReader.HISTORY_FILE, historyFile).
build()
var pos: Int = 1;
var curr: String = "";
def close() = input.getTerminal.close
def read(cbuf: Array[Char], offset: Int, len: Int): Int =
{
try {
var i:Int = 0;
logger.debug(s"being asked for $len characters")
while(i < len){
while(pos >= curr.length){
if(i > 0){ logger.debug(s"returning $i characters"); return i; }
curr = input.readLine(prompt)
if(curr == null){ logger.debug("Reached end"); return -1; }
logger.debug(s"Read: '$curr'")
pos = 0;
}
cbuf(i+offset) = curr.charAt(pos);
i += 1; pos += 1;
}
logger.debug(s"Full! Returning $i characters")
return i;
} catch {
case _ : EndOfFileException => return -1;
case _ : UserInterruptException => System.exit(0); return -1;
}
}
}
object LineReaderInputSource
{
val defaultHistoryFile = System.getProperty("user.home") + File.separator + ".mimir_history"
}
|
UBOdin/mimir
|
src/main/scala/mimir/util/LineReaderInputSource.scala
|
Scala
|
apache-2.0
| 1,561
|
package util
import jp.sf.amateras.scalatra.forms._
import org.scalatra.i18n.Messages
trait Validations {
/**
* Constraint for the identifier such as user name, repository name or page name.
*/
def identifier: Constraint = new Constraint(){
override def validate(name: String, value: String, messages: Messages): Option[String] =
if(!value.matches("^[a-zA-Z0-9\\\\-_.]+$")){
Some(s"${name} contains invalid character.")
} else if(value.startsWith("_") || value.startsWith("-")){
Some(s"${name} starts with invalid character.")
} else {
None
}
}
def color = pattern("#[0-9a-fA-F]{6}")
/**
* ValueType for the java.util.Date property.
*/
def date(constraints: Constraint*): SingleValueType[java.util.Date] =
new SingleValueType[java.util.Date]((pattern("\\\\d{4}-\\\\d{2}-\\\\d{2}") +: constraints): _*){
def convert(value: String, messages: Messages): java.util.Date = new java.text.SimpleDateFormat("yyyy-MM-dd").parse(value)
}
}
|
unixcrh/gitbucket
|
src/main/scala/util/Validations.scala
|
Scala
|
apache-2.0
| 1,018
|
package mimir.demo
import java.io._
import org.specs2.reporter.LineLogger
import org.specs2.specification.core.{Fragment,Fragments}
import mimir.test._
import mimir.util._
import mimir.algebra.ID
object TimeSeqScenarios
extends SQLTestSpecification("TimeSeq")
{
sequential
"The Trivial Time Series" should {
"Load correctly" >> {
update("LOAD 'test/data/seq.csv'")
ok
}
"run limit queries" >> {
query("select T, A, B from seq limit 20"){ _.toSeq must have size(20) }
}
"run order-by limit queries" >> {
query("select T, A, B from seq order by t limit 20"){
_.toSeq.reverse.head(ID("T")).asLong must beEqualTo(20)
}
query("select T, A, B from seq order by t desc limit 20"){
_.toSeq.reverse.head(ID("T")).asLong must beEqualTo(9980)
}
}
"generate categories correctly" >> {
query("""
select T, A, B,
case when a is not null then 'A'
when b is not null then 'B'
else 'C'
end as cat from seq limit 20
""") { _.map { _(3).asString }.toSet must contain("A", "B", "C") }
}
}
}
|
UBOdin/mimir
|
src/test/scala/mimir/demo/TimeSeqScenarios.scala
|
Scala
|
apache-2.0
| 1,155
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.types.{LongType, StringType, TypeCollection}
class ExpressionTypeCheckingSuite extends SparkFunSuite {
val testRelation = LocalRelation(
'intField.int,
'stringField.string,
'booleanField.boolean,
'decimalField.decimal(8, 0),
'arrayField.array(StringType),
'mapField.map(StringType, LongType))
def assertError(expr: Expression, errorMessage: String): Unit = {
val e = intercept[AnalysisException] {
assertSuccess(expr)
}
assert(e.getMessage.contains(
s"cannot resolve '${expr.sql}' due to data type mismatch:"))
assert(e.getMessage.contains(errorMessage))
}
def assertSuccess(expr: Expression): Unit = {
val analyzed = testRelation.select(expr.as("c")).analyze
SimpleAnalyzer.checkAnalysis(analyzed)
}
def assertErrorForDifferingTypes(expr: Expression): Unit = {
assertError(expr,
s"differing types in '${expr.sql}'")
}
test("check types for unary arithmetic") {
assertError(BitwiseNot('stringField), "requires integral type")
}
test("check types for binary arithmetic") {
// We will cast String to Double for binary arithmetic
assertSuccess(Add('intField, 'stringField))
assertSuccess(Subtract('intField, 'stringField))
assertSuccess(Multiply('intField, 'stringField))
assertSuccess(Divide('intField, 'stringField))
assertSuccess(Remainder('intField, 'stringField))
// checkAnalysis(BitwiseAnd('intField, 'stringField))
assertErrorForDifferingTypes(Add('intField, 'booleanField))
assertErrorForDifferingTypes(Subtract('intField, 'booleanField))
assertErrorForDifferingTypes(Multiply('intField, 'booleanField))
assertErrorForDifferingTypes(Divide('intField, 'booleanField))
assertErrorForDifferingTypes(Remainder('intField, 'booleanField))
assertErrorForDifferingTypes(BitwiseAnd('intField, 'booleanField))
assertErrorForDifferingTypes(BitwiseOr('intField, 'booleanField))
assertErrorForDifferingTypes(BitwiseXor('intField, 'booleanField))
assertError(Add('booleanField, 'booleanField), "requires (numeric or calendarinterval) type")
assertError(Subtract('booleanField, 'booleanField),
"requires (numeric or calendarinterval) type")
assertError(Multiply('booleanField, 'booleanField), "requires numeric type")
assertError(Divide('booleanField, 'booleanField), "requires (double or decimal) type")
assertError(Remainder('booleanField, 'booleanField), "requires numeric type")
assertError(BitwiseAnd('booleanField, 'booleanField), "requires integral type")
assertError(BitwiseOr('booleanField, 'booleanField), "requires integral type")
assertError(BitwiseXor('booleanField, 'booleanField), "requires integral type")
}
test("check types for predicates") {
// We will cast String to Double for binary comparison
assertSuccess(EqualTo('intField, 'stringField))
assertSuccess(EqualNullSafe('intField, 'stringField))
assertSuccess(LessThan('intField, 'stringField))
assertSuccess(LessThanOrEqual('intField, 'stringField))
assertSuccess(GreaterThan('intField, 'stringField))
assertSuccess(GreaterThanOrEqual('intField, 'stringField))
// We will transform EqualTo with numeric and boolean types to CaseKeyWhen
assertSuccess(EqualTo('intField, 'booleanField))
assertSuccess(EqualNullSafe('intField, 'booleanField))
assertErrorForDifferingTypes(EqualTo('intField, 'mapField))
assertErrorForDifferingTypes(EqualNullSafe('intField, 'mapField))
assertErrorForDifferingTypes(LessThan('intField, 'booleanField))
assertErrorForDifferingTypes(LessThanOrEqual('intField, 'booleanField))
assertErrorForDifferingTypes(GreaterThan('intField, 'booleanField))
assertErrorForDifferingTypes(GreaterThanOrEqual('intField, 'booleanField))
assertError(EqualTo('mapField, 'mapField), "Cannot use map type in EqualTo")
assertError(EqualNullSafe('mapField, 'mapField), "Cannot use map type in EqualNullSafe")
assertError(LessThan('mapField, 'mapField),
s"requires ${TypeCollection.Ordered.simpleString} type")
assertError(LessThanOrEqual('mapField, 'mapField),
s"requires ${TypeCollection.Ordered.simpleString} type")
assertError(GreaterThan('mapField, 'mapField),
s"requires ${TypeCollection.Ordered.simpleString} type")
assertError(GreaterThanOrEqual('mapField, 'mapField),
s"requires ${TypeCollection.Ordered.simpleString} type")
assertError(If('intField, 'stringField, 'stringField),
"type of predicate expression in If should be boolean")
assertErrorForDifferingTypes(If('booleanField, 'intField, 'booleanField))
assertError(
CaseWhen(Seq(('booleanField.attr, 'intField.attr), ('booleanField.attr, 'mapField.attr))),
"THEN and ELSE expressions should all be same type or coercible to a common type")
assertError(
CaseKeyWhen('intField, Seq('intField, 'stringField, 'intField, 'mapField)),
"THEN and ELSE expressions should all be same type or coercible to a common type")
assertError(
CaseWhen(Seq(('booleanField.attr, 'intField.attr), ('intField.attr, 'intField.attr))),
"WHEN expressions in CaseWhen should all be boolean type")
}
test("check types for aggregates") {
// We use AggregateFunction directly at here because the error will be thrown from it
// instead of from AggregateExpression, which is the wrapper of an AggregateFunction.
// We will cast String to Double for sum and average
assertSuccess(Sum('stringField))
assertSuccess(Average('stringField))
assertSuccess(Min('arrayField))
assertError(Min('mapField), "min does not support ordering on type")
assertError(Max('mapField), "max does not support ordering on type")
assertError(Sum('booleanField), "function sum requires numeric type")
assertError(Average('booleanField), "function average requires numeric type")
}
test("check types for others") {
assertError(CreateArray(Seq('intField, 'booleanField)),
"input to function array should all be the same type")
assertError(Coalesce(Seq('intField, 'booleanField)),
"input to function coalesce should all be the same type")
assertError(Coalesce(Nil), "input to function coalesce cannot be empty")
assertError(new Murmur3Hash(Nil), "function hash requires at least one argument")
assertError(Explode('intField),
"input to function explode should be array or map type")
assertError(PosExplode('intField),
"input to function explode should be array or map type")
}
test("check types for CreateNamedStruct") {
assertError(
CreateNamedStruct(Seq("a", "b", 2.0)), "even number of arguments")
assertError(
CreateNamedStruct(Seq(1, "a", "b", 2.0)),
"Only foldable StringType expressions are allowed to appear at odd position")
assertError(
CreateNamedStruct(Seq('a.string.at(0), "a", "b", 2.0)),
"Only foldable StringType expressions are allowed to appear at odd position")
assertError(
CreateNamedStruct(Seq(Literal.create(null, StringType), "a")),
"Field name should not be null")
}
test("check types for CreateMap") {
assertError(CreateMap(Seq("a", "b", 2.0)), "even number of arguments")
assertError(
CreateMap(Seq('intField, 'stringField, 'booleanField, 'stringField)),
"keys of function map should all be the same type")
assertError(
CreateMap(Seq('stringField, 'intField, 'stringField, 'booleanField)),
"values of function map should all be the same type")
}
test("check types for ROUND/BROUND") {
assertSuccess(Round(Literal(null), Literal(null)))
assertSuccess(Round('intField, Literal(1)))
assertError(Round('intField, 'intField), "Only foldable Expression is allowed")
assertError(Round('intField, 'booleanField), "requires int type")
assertError(Round('intField, 'mapField), "requires int type")
assertError(Round('booleanField, 'intField), "requires numeric type")
assertSuccess(BRound(Literal(null), Literal(null)))
assertSuccess(BRound('intField, Literal(1)))
assertError(BRound('intField, 'intField), "Only foldable Expression is allowed")
assertError(BRound('intField, 'booleanField), "requires int type")
assertError(BRound('intField, 'mapField), "requires int type")
assertError(BRound('booleanField, 'intField), "requires numeric type")
}
test("check types for Greatest/Least") {
for (operator <- Seq[(Seq[Expression] => Expression)](Greatest, Least)) {
assertError(operator(Seq('booleanField)), "requires at least 2 arguments")
assertError(operator(Seq('intField, 'stringField)), "should all have the same type")
assertError(operator(Seq('mapField, 'mapField)), "does not support ordering")
}
}
}
|
liutang123/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ExpressionTypeCheckingSuite.scala
|
Scala
|
apache-2.0
| 10,020
|
package org.example
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.twitter._
import org.apache.spark.streaming.StreamingContext._
import org.atilika.kuromoji._
import org.atilika.kuromoji.Tokenizer._
import java.util.regex._
import org.elasticsearch.spark._
import org.elasticsearch.spark.rdd.EsSpark
object Handlespark extends App {
System.setProperty("twitter4j.oauth.consumerKey", "xhTNNX6UZeQaBZvq5RvnAT45z")
System.setProperty("twitter4j.oauth.consumerSecret", "ja2thM49xgafcWRZnK0FKE9snr4WmiMwsFxTRBgkWtSJEz8hYP")
System.setProperty("twitter4j.oauth.accessToken", "4991401-9YDQRLKNV1LBqdPpIq0uhzeuhilx6IO2pGicFMhDNy")
System.setProperty("twitter4j.oauth.accessTokenSecret", "zazWgKUFg5eqbiXJVJX0rI3ZtUMLL5ENYVbZOJOixJSPF")
val conf = new SparkConf().setAppName("Simple Application")
.setMaster("local[12]")
val sc = new SparkContext(conf)
val ssc = new StreamingContext(sc, Seconds(60)) // スライド幅60秒
val words = Array("iphone6", "nicovideo")
val stream = TwitterUtils.createStream(ssc, None, words)
// Twitterから取得したツイートを処理する
val tweetStream = stream.flatMap(status => {
val tokenizer : Tokenizer = Tokenizer.builder().build() // kuromojiの分析器
val features : scala.collection.mutable.ArrayBuffer[String] = new collection.mutable.ArrayBuffer[String]() //解析結果を保持するための入れ物
var tweetText : String = status.getText() //ツイート本文の取得
val japanese_pattern : Pattern = Pattern.compile("[¥¥u3040-¥¥u309F]+") //「ひらがなが含まれているか?」の正規表現
if(japanese_pattern.matcher(tweetText).find()) {
// 不要な文字列の削除
tweetText = tweetText.replaceAll("http(s*)://(.*)/", "").replaceAll("¥¥uff57", "") // 全角の「w」は邪魔www
// ツイート本文の解析
val tokens : java.util.List[Token] = tokenizer.tokenize(tweetText) // 形態素解析
val pattern : Pattern = Pattern.compile("^[a-zA-Z]+$|^[0-9]+$") //「英数字か?」の正規表現
for(index <- 0 to tokens.size()-1) { //各形態素に対して。。。
val token = tokens.get(index)
val matcher : Matcher = pattern.matcher(token.getSurfaceForm())
// 文字数が3文字以上で、かつ、英数字のみではない単語を検索
if(token.getSurfaceForm().length() >= 3 && !matcher.find()) {
// 条件に一致した形態素解析の結果を登録
features += (token.getSurfaceForm() + "-" + token.getAllFeatures())
}
}
}
(features)
})
// 集計
val topCounts60 = tweetStream.map((_, 1) // 出現回数をカウントするために各単語に「1」を付与
).reduceByKeyAndWindow(_+_, Seconds(60*60) // ウインドウ幅(60*60sec)に含まれる単語を集める
).map{case (topic, count) => (count, topic) // 単語の出現回数を集計
}.transform(_.sortByKey(false)) // ソート
// 出力
topCounts60.foreachRDD(rdd => {
// 出現回数上位20単語を取得
val topList = rdd.take(20)
// コマンドラインに出力
println("¥ nPopular topics in last 60*60 seconds (%s words):".format(rdd.count()))
topList.foreach{case (count, tag) => println("%s (%s tweets)".format(taupdag, count))}
})
// 定義した処理を実行するSpark Streamingを起動!
ssc.start()
ssc.awaitTermination()
}
|
j138/handle-spark
|
src/main/scala/org/example/Handlespark.scala
|
Scala
|
apache-2.0
| 3,585
|
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.